v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_ARM)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 #define __ ACCESS_MASM(masm)
41 
42 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
43  Label* slow,
44  Condition cond,
45  bool never_nan_nan);
46 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
47  Register lhs,
48  Register rhs,
49  Label* lhs_not_nan,
50  Label* slow,
51  bool strict);
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54  Register lhs,
55  Register rhs);
56 
57 
58 // Check if the operand is a heap number.
59 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
60  Register scratch1, Register scratch2,
61  Label* not_a_heap_number) {
62  __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
63  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
64  __ cmp(scratch1, scratch2);
65  __ b(ne, not_a_heap_number);
66 }
67 
68 
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70  // The ToNumber stub takes one argument in eax.
71  Label check_heap_number, call_builtin;
72  __ JumpIfNotSmi(r0, &check_heap_number);
73  __ Ret();
74 
75  __ bind(&check_heap_number);
76  EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
77  __ Ret();
78 
79  __ bind(&call_builtin);
80  __ push(r0);
81  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
82 }
83 
84 
85 void FastNewClosureStub::Generate(MacroAssembler* masm) {
86  // Create a new closure from the given function info in new
87  // space. Set the context to the current context in cp.
88  Counters* counters = masm->isolate()->counters();
89 
90  Label gc;
91 
92  // Pop the function info from the stack.
93  __ pop(r3);
94 
95  // Attempt to allocate new JSFunction in new space.
96  __ AllocateInNewSpace(JSFunction::kSize,
97  r0,
98  r1,
99  r2,
100  &gc,
101  TAG_OBJECT);
102 
103  __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
104 
105  int map_index = (language_mode_ == CLASSIC_MODE)
108 
109  // Compute the function map in the current native context and set that
110  // as the map of the allocated object.
113  __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
115 
116  // Initialize the rest of the function. We don't have to update the
117  // write barrier because the allocated object is in new space.
118  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
119  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
126 
127  // Initialize the code pointer in the function to be the one
128  // found in the shared function info object.
129  // But first check if there is an optimized version for our context.
130  Label check_optimized;
131  Label install_unoptimized;
132  if (FLAG_cache_optimized_code) {
133  __ ldr(r1,
135  __ tst(r1, r1);
136  __ b(ne, &check_optimized);
137  }
138  __ bind(&install_unoptimized);
139  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
142  __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
144 
145  // Return result. The argument function info has been popped already.
146  __ Ret();
147 
148  __ bind(&check_optimized);
149 
150  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
151 
152  // r2 holds native context, r1 points to fixed array of 3-element entries
153  // (native context, optimized code, literals).
154  // The optimized code map must never be empty, so check the first elements.
155  Label install_optimized;
156  // Speculatively move code object into r4.
159  __ cmp(r2, r5);
160  __ b(eq, &install_optimized);
161 
162  // Iterate through the rest of map backwards. r4 holds an index as a Smi.
163  Label loop;
165  __ bind(&loop);
166  // Do not double check first entry.
167 
169  __ b(eq, &install_unoptimized);
170  __ sub(r4, r4, Operand(
171  Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
172  __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
173  __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
174  __ ldr(r5, MemOperand(r5));
175  __ cmp(r2, r5);
176  __ b(ne, &loop);
177  // Hit: fetch the optimized code.
178  __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
179  __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
180  __ add(r5, r5, Operand(kPointerSize));
181  __ ldr(r4, MemOperand(r5));
182 
183  __ bind(&install_optimized);
184  __ IncrementCounter(counters->fast_new_closure_install_optimized(),
185  1, r6, r7);
186 
187  // TODO(fschneider): Idea: store proper code pointers in the map and either
188  // unmangle them on marking or do nothing as the whole map is discarded on
189  // major GC anyway.
190  __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
192 
193  // Now link a function into a list of optimized functions.
195 
197  // No need for write barrier as JSFunction (eax) is in the new space.
198 
200  // Store JSFunction (eax) into edx before issuing write barrier as
201  // it clobbers all the registers passed.
202  __ mov(r4, r0);
203  __ RecordWriteContextSlot(
204  r2,
206  r4,
207  r1,
210 
211  // Return result. The argument function info has been popped already.
212  __ Ret();
213 
214  // Create a new closure through the slower runtime call.
215  __ bind(&gc);
216  __ LoadRoot(r4, Heap::kFalseValueRootIndex);
217  __ Push(cp, r3, r4);
218  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
219 }
220 
221 
222 void FastNewContextStub::Generate(MacroAssembler* masm) {
223  // Try to allocate the context in new space.
224  Label gc;
225  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
226 
227  // Attempt to allocate the context in new space.
228  __ AllocateInNewSpace(FixedArray::SizeFor(length),
229  r0,
230  r1,
231  r2,
232  &gc,
233  TAG_OBJECT);
234 
235  // Load the function from the stack.
236  __ ldr(r3, MemOperand(sp, 0));
237 
238  // Set up the object header.
239  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
240  __ mov(r2, Operand(Smi::FromInt(length)));
243 
244  // Set up the fixed slots, copy the global object from the previous context.
246  __ mov(r1, Operand(Smi::FromInt(0)));
251 
252  // Initialize the rest of the slots to undefined.
253  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
254  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
256  }
257 
258  // Remove the on-stack argument and return.
259  __ mov(cp, r0);
260  __ pop();
261  __ Ret();
262 
263  // Need to collect. Call into runtime system.
264  __ bind(&gc);
265  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
266 }
267 
268 
269 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
270  // Stack layout on entry:
271  //
272  // [sp]: function.
273  // [sp + kPointerSize]: serialized scope info
274 
275  // Try to allocate the context in new space.
276  Label gc;
277  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
278  __ AllocateInNewSpace(FixedArray::SizeFor(length),
279  r0, r1, r2, &gc, TAG_OBJECT);
280 
281  // Load the function from the stack.
282  __ ldr(r3, MemOperand(sp, 0));
283 
284  // Load the serialized scope info from the stack.
285  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
286 
287  // Set up the object header.
288  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
290  __ mov(r2, Operand(Smi::FromInt(length)));
292 
293  // If this block context is nested in the native context we get a smi
294  // sentinel instead of a function. The block context should get the
295  // canonical empty function of the native context as its closure which
296  // we still have to look up.
297  Label after_sentinel;
298  __ JumpIfNotSmi(r3, &after_sentinel);
299  if (FLAG_debug_code) {
300  const char* message = "Expected 0 as a Smi sentinel";
301  __ cmp(r3, Operand::Zero());
302  __ Assert(eq, message);
303  }
304  __ ldr(r3, GlobalObjectOperand());
307  __ bind(&after_sentinel);
308 
309  // Set up the fixed slots, copy the global object from the previous context.
315 
316  // Initialize the rest of the slots to the hole value.
317  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
318  for (int i = 0; i < slots_; i++) {
319  __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
320  }
321 
322  // Remove the on-stack argument and return.
323  __ mov(cp, r0);
324  __ add(sp, sp, Operand(2 * kPointerSize));
325  __ Ret();
326 
327  // Need to collect. Call into runtime system.
328  __ bind(&gc);
329  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
330 }
331 
332 
333 static void GenerateFastCloneShallowArrayCommon(
334  MacroAssembler* masm,
335  int length,
337  Label* fail) {
338  // Registers on entry:
339  //
340  // r3: boilerplate literal array.
342 
343  // All sizes here are multiples of kPointerSize.
344  int elements_size = 0;
345  if (length > 0) {
347  ? FixedDoubleArray::SizeFor(length)
348  : FixedArray::SizeFor(length);
349  }
350  int size = JSArray::kSize + elements_size;
351 
352  // Allocate both the JS array and the elements array in one big
353  // allocation. This avoids multiple limit checks.
354  __ AllocateInNewSpace(size,
355  r0,
356  r1,
357  r2,
358  fail,
359  TAG_OBJECT);
360 
361  // Copy the JS array part.
362  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
363  if ((i != JSArray::kElementsOffset) || (length == 0)) {
364  __ ldr(r1, FieldMemOperand(r3, i));
365  __ str(r1, FieldMemOperand(r0, i));
366  }
367  }
368 
369  if (length > 0) {
370  // Get hold of the elements array of the boilerplate and setup the
371  // elements pointer in the resulting object.
373  __ add(r2, r0, Operand(JSArray::kSize));
375 
376  // Copy the elements array.
377  ASSERT((elements_size % kPointerSize) == 0);
378  __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
379  }
380 }
381 
382 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
383  // Stack layout on entry:
384  //
385  // [sp]: constant elements.
386  // [sp + kPointerSize]: literal index.
387  // [sp + (2 * kPointerSize)]: literals array.
388 
389  // Load boilerplate object into r3 and check if we need to create a
390  // boilerplate.
391  Label slow_case;
392  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
393  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
394  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
396  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
397  __ b(eq, &slow_case);
398 
399  FastCloneShallowArrayStub::Mode mode = mode_;
400  if (mode == CLONE_ANY_ELEMENTS) {
401  Label double_elements, check_fast_elements;
404  __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
405  __ b(ne, &check_fast_elements);
406  GenerateFastCloneShallowArrayCommon(masm, 0,
407  COPY_ON_WRITE_ELEMENTS, &slow_case);
408  // Return and remove the on-stack parameters.
409  __ add(sp, sp, Operand(3 * kPointerSize));
410  __ Ret();
411 
412  __ bind(&check_fast_elements);
413  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
414  __ b(ne, &double_elements);
415  GenerateFastCloneShallowArrayCommon(masm, length_,
416  CLONE_ELEMENTS, &slow_case);
417  // Return and remove the on-stack parameters.
418  __ add(sp, sp, Operand(3 * kPointerSize));
419  __ Ret();
420 
421  __ bind(&double_elements);
422  mode = CLONE_DOUBLE_ELEMENTS;
423  // Fall through to generate the code to handle double elements.
424  }
425 
426  if (FLAG_debug_code) {
427  const char* message;
428  Heap::RootListIndex expected_map_index;
429  if (mode == CLONE_ELEMENTS) {
430  message = "Expected (writable) fixed array";
431  expected_map_index = Heap::kFixedArrayMapRootIndex;
432  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
433  message = "Expected (writable) fixed double array";
434  expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
435  } else {
437  message = "Expected copy-on-write fixed array";
438  expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
439  }
440  __ push(r3);
443  __ CompareRoot(r3, expected_map_index);
444  __ Assert(eq, message);
445  __ pop(r3);
446  }
447 
448  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
449 
450  // Return and remove the on-stack parameters.
451  __ add(sp, sp, Operand(3 * kPointerSize));
452  __ Ret();
453 
454  __ bind(&slow_case);
455  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
456 }
457 
458 
459 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
460  // Stack layout on entry:
461  //
462  // [sp]: object literal flags.
463  // [sp + kPointerSize]: constant properties.
464  // [sp + (2 * kPointerSize)]: literal index.
465  // [sp + (3 * kPointerSize)]: literals array.
466 
467  // Load boilerplate object into r3 and check if we need to create a
468  // boilerplate.
469  Label slow_case;
470  __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
471  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
472  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
474  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
475  __ b(eq, &slow_case);
476 
477  // Check that the boilerplate contains only fast properties and we can
478  // statically determine the instance size.
479  int size = JSObject::kHeaderSize + length_ * kPointerSize;
482  __ cmp(r0, Operand(size >> kPointerSizeLog2));
483  __ b(ne, &slow_case);
484 
485  // Allocate the JS object and copy header together with all in-object
486  // properties from the boilerplate.
487  __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
488  for (int i = 0; i < size; i += kPointerSize) {
489  __ ldr(r1, FieldMemOperand(r3, i));
490  __ str(r1, FieldMemOperand(r0, i));
491  }
492 
493  // Return and remove the on-stack parameters.
494  __ add(sp, sp, Operand(4 * kPointerSize));
495  __ Ret();
496 
497  __ bind(&slow_case);
498  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
499 }
500 
501 
502 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
503 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
504 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
505 // scratch register. Destroys the source register. No GC occurs during this
506 // stub so you don't have to set up the frame.
507 class ConvertToDoubleStub : public CodeStub {
508  public:
509  ConvertToDoubleStub(Register result_reg_1,
510  Register result_reg_2,
511  Register source_reg,
512  Register scratch_reg)
513  : result1_(result_reg_1),
514  result2_(result_reg_2),
515  source_(source_reg),
516  zeros_(scratch_reg) { }
517 
518  private:
519  Register result1_;
520  Register result2_;
521  Register source_;
522  Register zeros_;
523 
524  // Minor key encoding in 16 bits.
525  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
526  class OpBits: public BitField<Token::Value, 2, 14> {};
527 
528  Major MajorKey() { return ConvertToDouble; }
529  int MinorKey() {
530  // Encode the parameters in a unique 16 bit value.
531  return result1_.code() +
532  (result2_.code() << 4) +
533  (source_.code() << 8) +
534  (zeros_.code() << 12);
535  }
536 
537  void Generate(MacroAssembler* masm);
538 };
539 
540 
541 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
542  Register exponent = result1_;
543  Register mantissa = result2_;
544 
545  Label not_special;
546  // Convert from Smi to integer.
547  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
548  // Move sign bit from source to destination. This works because the sign bit
549  // in the exponent word of the double has the same position and polarity as
550  // the 2's complement sign bit in a Smi.
551  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
552  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
553  // Subtract from 0 if source was negative.
554  __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
555 
556  // We have -1, 0 or 1, which we treat specially. Register source_ contains
557  // absolute value: it is either equal to 1 (special case of -1 and 1),
558  // greater than 1 (not a special case) or less than 1 (special case of 0).
559  __ cmp(source_, Operand(1));
560  __ b(gt, &not_special);
561 
562  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
563  const uint32_t exponent_word_for_1 =
565  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
566  // 1, 0 and -1 all have 0 for the second word.
567  __ mov(mantissa, Operand(0, RelocInfo::NONE));
568  __ Ret();
569 
570  __ bind(&not_special);
571  // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
572  // Gets the wrong answer for 0, but we already checked for that case above.
573  __ CountLeadingZeros(zeros_, source_, mantissa);
574  // Compute exponent and or it into the exponent register.
575  // We use mantissa as a scratch register here. Use a fudge factor to
576  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
577  // that fit in the ARM's constant field.
578  int fudge = 0x400;
579  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
580  __ add(mantissa, mantissa, Operand(fudge));
581  __ orr(exponent,
582  exponent,
583  Operand(mantissa, LSL, HeapNumber::kExponentShift));
584  // Shift up the source chopping the top bit off.
585  __ add(zeros_, zeros_, Operand(1));
586  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
587  __ mov(source_, Operand(source_, LSL, zeros_));
588  // Compute lower part of fraction (last 12 bits).
589  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
590  // And the top (top 20 bits).
591  __ orr(exponent,
592  exponent,
593  Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
594  __ Ret();
595 }
596 
597 
598 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
600  Register scratch1,
601  Register scratch2) {
603  CpuFeatures::Scope scope(VFP2);
604  __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
605  __ vmov(d7.high(), scratch1);
606  __ vcvt_f64_s32(d7, d7.high());
607  __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
608  __ vmov(d6.high(), scratch1);
609  __ vcvt_f64_s32(d6, d6.high());
610  if (destination == kCoreRegisters) {
611  __ vmov(r2, r3, d7);
612  __ vmov(r0, r1, d6);
613  }
614  } else {
615  ASSERT(destination == kCoreRegisters);
616  // Write Smi from r0 to r3 and r2 in double format.
617  __ mov(scratch1, Operand(r0));
618  ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
619  __ push(lr);
620  __ Call(stub1.GetCode());
621  // Write Smi from r1 to r1 and r0 in double format.
622  __ mov(scratch1, Operand(r1));
623  ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
624  __ Call(stub2.GetCode());
625  __ pop(lr);
626  }
627 }
628 
629 
631  MacroAssembler* masm,
633  Register heap_number_map,
634  Register scratch1,
635  Register scratch2,
636  Label* slow) {
637 
638  // Load right operand (r0) to d6 or r2/r3.
639  LoadNumber(masm, destination,
640  r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
641 
642  // Load left operand (r1) to d7 or r0/r1.
643  LoadNumber(masm, destination,
644  r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
645 }
646 
647 
648 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
649  Destination destination,
650  Register object,
651  DwVfpRegister dst,
652  Register dst1,
653  Register dst2,
654  Register heap_number_map,
655  Register scratch1,
656  Register scratch2,
657  Label* not_number) {
658  __ AssertRootValue(heap_number_map,
659  Heap::kHeapNumberMapRootIndex,
660  "HeapNumberMap register clobbered.");
661 
662  Label is_smi, done;
663 
664  // Smi-check
665  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
666  // Heap number check
667  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
668 
669  // Handle loading a double from a heap number.
671  destination == kVFPRegisters) {
672  CpuFeatures::Scope scope(VFP2);
673  // Load the double from tagged HeapNumber to double register.
674  __ sub(scratch1, object, Operand(kHeapObjectTag));
675  __ vldr(dst, scratch1, HeapNumber::kValueOffset);
676  } else {
677  ASSERT(destination == kCoreRegisters);
678  // Load the double from heap number to dst1 and dst2 in double format.
679  __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
680  }
681  __ jmp(&done);
682 
683  // Handle loading a double from a smi.
684  __ bind(&is_smi);
686  CpuFeatures::Scope scope(VFP2);
687  // Convert smi to double using VFP instructions.
688  __ vmov(dst.high(), scratch1);
689  __ vcvt_f64_s32(dst, dst.high());
690  if (destination == kCoreRegisters) {
691  // Load the converted smi to dst1 and dst2 in double format.
692  __ vmov(dst1, dst2, dst);
693  }
694  } else {
695  ASSERT(destination == kCoreRegisters);
696  // Write smi to dst1 and dst2 double format.
697  __ mov(scratch1, Operand(object));
698  ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
699  __ push(lr);
700  __ Call(stub.GetCode());
701  __ pop(lr);
702  }
703 
704  __ bind(&done);
705 }
706 
707 
708 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
709  Register object,
710  Register dst,
711  Register heap_number_map,
712  Register scratch1,
713  Register scratch2,
714  Register scratch3,
715  DwVfpRegister double_scratch,
716  Label* not_number) {
717  __ AssertRootValue(heap_number_map,
718  Heap::kHeapNumberMapRootIndex,
719  "HeapNumberMap register clobbered.");
720  Label done;
721  Label not_in_int32_range;
722 
723  __ UntagAndJumpIfSmi(dst, object, &done);
724  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
725  __ cmp(scratch1, heap_number_map);
726  __ b(ne, not_number);
727  __ ConvertToInt32(object,
728  dst,
729  scratch1,
730  scratch2,
731  double_scratch,
732  &not_in_int32_range);
733  __ jmp(&done);
734 
735  __ bind(&not_in_int32_range);
736  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
737  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
738 
739  __ EmitOutOfInt32RangeTruncate(dst,
740  scratch1,
741  scratch2,
742  scratch3);
743  __ bind(&done);
744 }
745 
746 
747 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
748  Register int_scratch,
749  Destination destination,
750  DwVfpRegister double_dst,
751  Register dst1,
752  Register dst2,
753  Register scratch2,
754  SwVfpRegister single_scratch) {
755  ASSERT(!int_scratch.is(scratch2));
756  ASSERT(!int_scratch.is(dst1));
757  ASSERT(!int_scratch.is(dst2));
758 
759  Label done;
760 
762  CpuFeatures::Scope scope(VFP2);
763  __ vmov(single_scratch, int_scratch);
764  __ vcvt_f64_s32(double_dst, single_scratch);
765  if (destination == kCoreRegisters) {
766  __ vmov(dst1, dst2, double_dst);
767  }
768  } else {
769  Label fewer_than_20_useful_bits;
770  // Expected output:
771  // | dst2 | dst1 |
772  // | s | exp | mantissa |
773 
774  // Check for zero.
775  __ cmp(int_scratch, Operand::Zero());
776  __ mov(dst2, int_scratch);
777  __ mov(dst1, int_scratch);
778  __ b(eq, &done);
779 
780  // Preload the sign of the value.
781  __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
782  // Get the absolute value of the object (as an unsigned integer).
783  __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
784 
785  // Get mantissa[51:20].
786 
787  // Get the position of the first set bit.
788  __ CountLeadingZeros(dst1, int_scratch, scratch2);
789  __ rsb(dst1, dst1, Operand(31));
790 
791  // Set the exponent.
792  __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
793  __ Bfi(dst2, scratch2, scratch2,
794  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
795 
796  // Clear the first non null bit.
797  __ mov(scratch2, Operand(1));
798  __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
799 
800  __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
801  // Get the number of bits to set in the lower part of the mantissa.
802  __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
803  __ b(mi, &fewer_than_20_useful_bits);
804  // Set the higher 20 bits of the mantissa.
805  __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
806  __ rsb(scratch2, scratch2, Operand(32));
807  __ mov(dst1, Operand(int_scratch, LSL, scratch2));
808  __ b(&done);
809 
810  __ bind(&fewer_than_20_useful_bits);
811  __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
812  __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
813  __ orr(dst2, dst2, scratch2);
814  // Set dst1 to 0.
815  __ mov(dst1, Operand::Zero());
816  }
817  __ bind(&done);
818 }
819 
820 
821 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
822  Register object,
823  Destination destination,
824  DwVfpRegister double_dst,
825  DwVfpRegister double_scratch,
826  Register dst1,
827  Register dst2,
828  Register heap_number_map,
829  Register scratch1,
830  Register scratch2,
831  SwVfpRegister single_scratch,
832  Label* not_int32) {
833  ASSERT(!scratch1.is(object) && !scratch2.is(object));
834  ASSERT(!scratch1.is(scratch2));
835  ASSERT(!heap_number_map.is(object) &&
836  !heap_number_map.is(scratch1) &&
837  !heap_number_map.is(scratch2));
838 
839  Label done, obj_is_not_smi;
840 
841  __ JumpIfNotSmi(object, &obj_is_not_smi);
842  __ SmiUntag(scratch1, object);
843  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
844  scratch2, single_scratch);
845  __ b(&done);
846 
847  __ bind(&obj_is_not_smi);
848  __ AssertRootValue(heap_number_map,
849  Heap::kHeapNumberMapRootIndex,
850  "HeapNumberMap register clobbered.");
851  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
852 
853  // Load the number.
855  CpuFeatures::Scope scope(VFP2);
856  // Load the double value.
857  __ sub(scratch1, object, Operand(kHeapObjectTag));
858  __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
859 
860  __ EmitVFPTruncate(kRoundToZero,
861  scratch1,
862  double_dst,
863  scratch2,
864  double_scratch,
866 
867  // Jump to not_int32 if the operation did not succeed.
868  __ b(ne, not_int32);
869 
870  if (destination == kCoreRegisters) {
871  __ vmov(dst1, dst2, double_dst);
872  }
873 
874  } else {
875  ASSERT(!scratch1.is(object) && !scratch2.is(object));
876  // Load the double value in the destination registers..
877  __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
878 
879  // Check for 0 and -0.
880  __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
881  __ orr(scratch1, scratch1, Operand(dst2));
882  __ cmp(scratch1, Operand::Zero());
883  __ b(eq, &done);
884 
885  // Check that the value can be exactly represented by a 32-bit integer.
886  // Jump to not_int32 if that's not the case.
887  DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
888 
889  // dst1 and dst2 were trashed. Reload the double value.
890  __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
891  }
892 
893  __ bind(&done);
894 }
895 
896 
897 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
898  Register object,
899  Register dst,
900  Register heap_number_map,
901  Register scratch1,
902  Register scratch2,
903  Register scratch3,
904  DwVfpRegister double_scratch0,
905  DwVfpRegister double_scratch1,
906  Label* not_int32) {
907  ASSERT(!dst.is(object));
908  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
909  ASSERT(!scratch1.is(scratch2) &&
910  !scratch1.is(scratch3) &&
911  !scratch2.is(scratch3));
912 
913  Label done;
914 
915  __ UntagAndJumpIfSmi(dst, object, &done);
916 
917  __ AssertRootValue(heap_number_map,
918  Heap::kHeapNumberMapRootIndex,
919  "HeapNumberMap register clobbered.");
920  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
921 
922  // Object is a heap number.
923  // Convert the floating point value to a 32-bit integer.
925  CpuFeatures::Scope scope(VFP2);
926 
927  // Load the double value.
928  __ sub(scratch1, object, Operand(kHeapObjectTag));
929  __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
930 
931  __ EmitVFPTruncate(kRoundToZero,
932  dst,
933  double_scratch0,
934  scratch1,
935  double_scratch1,
937 
938  // Jump to not_int32 if the operation did not succeed.
939  __ b(ne, not_int32);
940  } else {
941  // Load the double value in the destination registers.
942  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
943  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
944 
945  // Check for 0 and -0.
946  __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
947  __ orr(dst, scratch2, Operand(dst));
948  __ cmp(dst, Operand::Zero());
949  __ b(eq, &done);
950 
951  DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
952 
953  // Registers state after DoubleIs32BitInteger.
954  // dst: mantissa[51:20].
955  // scratch2: 1
956 
957  // Shift back the higher bits of the mantissa.
958  __ mov(dst, Operand(dst, LSR, scratch3));
959  // Set the implicit first bit.
960  __ rsb(scratch3, scratch3, Operand(32));
961  __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
962  // Set the sign.
963  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
964  __ tst(scratch1, Operand(HeapNumber::kSignMask));
965  __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
966  }
967 
968  __ bind(&done);
969 }
970 
971 
972 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
973  Register src1,
974  Register src2,
975  Register dst,
976  Register scratch,
977  Label* not_int32) {
978  // Get exponent alone in scratch.
979  __ Ubfx(scratch,
980  src1,
981  HeapNumber::kExponentShift,
983 
984  // Substract the bias from the exponent.
985  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
986 
987  // src1: higher (exponent) part of the double value.
988  // src2: lower (mantissa) part of the double value.
989  // scratch: unbiased exponent.
990 
991  // Fast cases. Check for obvious non 32-bit integer values.
992  // Negative exponent cannot yield 32-bit integers.
993  __ b(mi, not_int32);
994  // Exponent greater than 31 cannot yield 32-bit integers.
995  // Also, a positive value with an exponent equal to 31 is outside of the
996  // signed 32-bit integer range.
997  // Another way to put it is that if (exponent - signbit) > 30 then the
998  // number cannot be represented as an int32.
999  Register tmp = dst;
1000  __ sub(tmp, scratch, Operand(src1, LSR, 31));
1001  __ cmp(tmp, Operand(30));
1002  __ b(gt, not_int32);
1003  // - Bits [21:0] in the mantissa are not null.
1004  __ tst(src2, Operand(0x3fffff));
1005  __ b(ne, not_int32);
1006 
1007  // Otherwise the exponent needs to be big enough to shift left all the
1008  // non zero bits left. So we need the (30 - exponent) last bits of the
1009  // 31 higher bits of the mantissa to be null.
1010  // Because bits [21:0] are null, we can check instead that the
1011  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
1012 
1013  // Get the 32 higher bits of the mantissa in dst.
1014  __ Ubfx(dst,
1015  src2,
1018  __ orr(dst,
1019  dst,
1020  Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1021 
1022  // Create the mask and test the lower bits (of the higher bits).
1023  __ rsb(scratch, scratch, Operand(32));
1024  __ mov(src2, Operand(1));
1025  __ mov(src1, Operand(src2, LSL, scratch));
1026  __ sub(src1, src1, Operand(1));
1027  __ tst(dst, src1);
1028  __ b(ne, not_int32);
1029 }
1030 
1031 
1033  MacroAssembler* masm,
1034  Token::Value op,
1035  Register heap_number_result,
1036  Register scratch) {
1037  // Using core registers:
1038  // r0: Left value (least significant part of mantissa).
1039  // r1: Left value (sign, exponent, top of mantissa).
1040  // r2: Right value (least significant part of mantissa).
1041  // r3: Right value (sign, exponent, top of mantissa).
1042 
1043  // Assert that heap_number_result is callee-saved.
1044  // We currently always use r5 to pass it.
1045  ASSERT(heap_number_result.is(r5));
1046 
1047  // Push the current return address before the C call. Return will be
1048  // through pop(pc) below.
1049  __ push(lr);
1050  __ PrepareCallCFunction(0, 2, scratch);
1051  if (masm->use_eabi_hardfloat()) {
1052  CpuFeatures::Scope scope(VFP2);
1053  __ vmov(d0, r0, r1);
1054  __ vmov(d1, r2, r3);
1055  }
1056  {
1057  AllowExternalCallThatCantCauseGC scope(masm);
1058  __ CallCFunction(
1059  ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1060  }
1061  // Store answer in the overwritable heap number. Double returned in
1062  // registers r0 and r1 or in d0.
1063  if (masm->use_eabi_hardfloat()) {
1064  CpuFeatures::Scope scope(VFP2);
1065  __ vstr(d0,
1066  FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1067  } else {
1068  __ Strd(r0, r1, FieldMemOperand(heap_number_result,
1070  }
1071  // Place heap_number_result in r0 and return to the pushed return address.
1072  __ mov(r0, Operand(heap_number_result));
1073  __ pop(pc);
1074 }
1075 
1076 
1078  // These variants are compiled ahead of time. See next method.
1079  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
1080  return true;
1081  }
1082  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
1083  return true;
1084  }
1085  // Other register combinations are generated as and when they are needed,
1086  // so it is unsafe to call them from stubs (we can't generate a stub while
1087  // we are generating a stub).
1088  return false;
1089 }
1090 
1091 
1095  stub1.GetCode()->set_is_pregenerated(true);
1096  stub2.GetCode()->set_is_pregenerated(true);
1097 }
1098 
1099 
1100 // See comment for class.
1101 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1102  Label max_negative_int;
1103  // the_int_ has the answer which is a signed int32 but not a Smi.
1104  // We test for the special value that has a different exponent. This test
1105  // has the neat side effect of setting the flags according to the sign.
1106  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1107  __ cmp(the_int_, Operand(0x80000000u));
1108  __ b(eq, &max_negative_int);
1109  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1110  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1111  uint32_t non_smi_exponent =
1112  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1113  __ mov(scratch_, Operand(non_smi_exponent));
1114  // Set the sign bit in scratch_ if the value was negative.
1115  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1116  // Subtract from 0 if the value was negative.
1117  __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
1118  // We should be masking the implict first digit of the mantissa away here,
1119  // but it just ends up combining harmlessly with the last digit of the
1120  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1121  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1122  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1123  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1124  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1125  __ str(scratch_, FieldMemOperand(the_heap_number_,
1127  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1128  __ str(scratch_, FieldMemOperand(the_heap_number_,
1130  __ Ret();
1131 
1132  __ bind(&max_negative_int);
1133  // The max negative int32 is stored as a positive number in the mantissa of
1134  // a double because it uses a sign bit instead of using two's complement.
1135  // The actual mantissa bits stored are all 0 because the implicit most
1136  // significant 1 bit is not stored.
1137  non_smi_exponent += 1 << HeapNumber::kExponentShift;
1138  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1139  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1140  __ mov(ip, Operand(0, RelocInfo::NONE));
1141  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1142  __ Ret();
1143 }
1144 
1145 
1146 // Handle the case where the lhs and rhs are the same object.
1147 // Equality is almost reflexive (everything but NaN), so this is a test
1148 // for "identity and not NaN".
1149 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1150  Label* slow,
1151  Condition cond,
1152  bool never_nan_nan) {
1153  Label not_identical;
1154  Label heap_number, return_equal;
1155  __ cmp(r0, r1);
1156  __ b(ne, &not_identical);
1157 
1158  // The two objects are identical. If we know that one of them isn't NaN then
1159  // we now know they test equal.
1160  if (cond != eq || !never_nan_nan) {
1161  // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
1162  // so we do the second best thing - test it ourselves.
1163  // They are both equal and they are not both Smis so both of them are not
1164  // Smis. If it's not a heap number, then return equal.
1165  if (cond == lt || cond == gt) {
1166  __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
1167  __ b(ge, slow);
1168  } else {
1169  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1170  __ b(eq, &heap_number);
1171  // Comparing JS objects with <=, >= is complicated.
1172  if (cond != eq) {
1173  __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
1174  __ b(ge, slow);
1175  // Normally here we fall through to return_equal, but undefined is
1176  // special: (undefined == undefined) == true, but
1177  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1178  if (cond == le || cond == ge) {
1179  __ cmp(r4, Operand(ODDBALL_TYPE));
1180  __ b(ne, &return_equal);
1181  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1182  __ cmp(r0, r2);
1183  __ b(ne, &return_equal);
1184  if (cond == le) {
1185  // undefined <= undefined should fail.
1186  __ mov(r0, Operand(GREATER));
1187  } else {
1188  // undefined >= undefined should fail.
1189  __ mov(r0, Operand(LESS));
1190  }
1191  __ Ret();
1192  }
1193  }
1194  }
1195  }
1196 
1197  __ bind(&return_equal);
1198  if (cond == lt) {
1199  __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
1200  } else if (cond == gt) {
1201  __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
1202  } else {
1203  __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
1204  }
1205  __ Ret();
1206 
1207  if (cond != eq || !never_nan_nan) {
1208  // For less and greater we don't have to check for NaN since the result of
1209  // x < x is false regardless. For the others here is some code to check
1210  // for NaN.
1211  if (cond != lt && cond != gt) {
1212  __ bind(&heap_number);
1213  // It is a heap number, so return non-equal if it's NaN and equal if it's
1214  // not NaN.
1215 
1216  // The representation of NaN values has all exponent bits (52..62) set,
1217  // and not all mantissa bits (0..51) clear.
1218  // Read top bits of double representation (second word of value).
1220  // Test that exponent bits are all set.
1221  __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1222  // NaNs have all-one exponents so they sign extend to -1.
1223  __ cmp(r3, Operand(-1));
1224  __ b(ne, &return_equal);
1225 
1226  // Shift out flag and all exponent bits, retaining only mantissa.
1228  // Or with all low-bits of mantissa.
1230  __ orr(r0, r3, Operand(r2), SetCC);
1231  // For equal we already have the right value in r0: Return zero (equal)
1232  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1233  // not (it's a NaN). For <= and >= we need to load r0 with the failing
1234  // value if it's a NaN.
1235  if (cond != eq) {
1236  // All-zero means Infinity means equal.
1237  __ Ret(eq);
1238  if (cond == le) {
1239  __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
1240  } else {
1241  __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
1242  }
1243  }
1244  __ Ret();
1245  }
1246  // No fall through here.
1247  }
1248 
1249  __ bind(&not_identical);
1250 }
1251 
1252 
1253 // See comment at call site.
1254 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1255  Register lhs,
1256  Register rhs,
1257  Label* lhs_not_nan,
1258  Label* slow,
1259  bool strict) {
1260  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1261  (lhs.is(r1) && rhs.is(r0)));
1262 
1263  Label rhs_is_smi;
1264  __ JumpIfSmi(rhs, &rhs_is_smi);
1265 
1266  // Lhs is a Smi. Check whether the rhs is a heap number.
1267  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
1268  if (strict) {
1269  // If rhs is not a number and lhs is a Smi then strict equality cannot
1270  // succeed. Return non-equal
1271  // If rhs is r0 then there is already a non zero value in it.
1272  if (!rhs.is(r0)) {
1273  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1274  }
1275  __ Ret(ne);
1276  } else {
1277  // Smi compared non-strictly with a non-Smi non-heap-number. Call
1278  // the runtime.
1279  __ b(ne, slow);
1280  }
1281 
1282  // Lhs is a smi, rhs is a number.
1284  // Convert lhs to a double in d7.
1285  CpuFeatures::Scope scope(VFP2);
1286  __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1287  // Load the double from rhs, tagged HeapNumber r0, to d6.
1288  __ sub(r7, rhs, Operand(kHeapObjectTag));
1289  __ vldr(d6, r7, HeapNumber::kValueOffset);
1290  } else {
1291  __ push(lr);
1292  // Convert lhs to a double in r2, r3.
1293  __ mov(r7, Operand(lhs));
1294  ConvertToDoubleStub stub1(r3, r2, r7, r6);
1295  __ Call(stub1.GetCode());
1296  // Load rhs to a double in r0, r1.
1298  __ pop(lr);
1299  }
1300 
1301  // We now have both loaded as doubles but we can skip the lhs nan check
1302  // since it's a smi.
1303  __ jmp(lhs_not_nan);
1304 
1305  __ bind(&rhs_is_smi);
1306  // Rhs is a smi. Check whether the non-smi lhs is a heap number.
1307  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
1308  if (strict) {
1309  // If lhs is not a number and rhs is a smi then strict equality cannot
1310  // succeed. Return non-equal.
1311  // If lhs is r0 then there is already a non zero value in it.
1312  if (!lhs.is(r0)) {
1313  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1314  }
1315  __ Ret(ne);
1316  } else {
1317  // Smi compared non-strictly with a non-smi non-heap-number. Call
1318  // the runtime.
1319  __ b(ne, slow);
1320  }
1321 
1322  // Rhs is a smi, lhs is a heap number.
1324  CpuFeatures::Scope scope(VFP2);
1325  // Load the double from lhs, tagged HeapNumber r1, to d7.
1326  __ sub(r7, lhs, Operand(kHeapObjectTag));
1327  __ vldr(d7, r7, HeapNumber::kValueOffset);
1328  // Convert rhs to a double in d6 .
1329  __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
1330  } else {
1331  __ push(lr);
1332  // Load lhs to a double in r2, r3.
1334  // Convert rhs to a double in r0, r1.
1335  __ mov(r7, Operand(rhs));
1336  ConvertToDoubleStub stub2(r1, r0, r7, r6);
1337  __ Call(stub2.GetCode());
1338  __ pop(lr);
1339  }
1340  // Fall through to both_loaded_as_doubles.
1341 }
1342 
1343 
1344 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1346  Register rhs_exponent = exp_first ? r0 : r1;
1347  Register lhs_exponent = exp_first ? r2 : r3;
1348  Register rhs_mantissa = exp_first ? r1 : r0;
1349  Register lhs_mantissa = exp_first ? r3 : r2;
1350  Label one_is_nan, neither_is_nan;
1351 
1352  __ Sbfx(r4,
1353  lhs_exponent,
1354  HeapNumber::kExponentShift,
1356  // NaNs have all-one exponents so they sign extend to -1.
1357  __ cmp(r4, Operand(-1));
1358  __ b(ne, lhs_not_nan);
1359  __ mov(r4,
1360  Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1361  SetCC);
1362  __ b(ne, &one_is_nan);
1363  __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
1364  __ b(ne, &one_is_nan);
1365 
1366  __ bind(lhs_not_nan);
1367  __ Sbfx(r4,
1368  rhs_exponent,
1369  HeapNumber::kExponentShift,
1371  // NaNs have all-one exponents so they sign extend to -1.
1372  __ cmp(r4, Operand(-1));
1373  __ b(ne, &neither_is_nan);
1374  __ mov(r4,
1375  Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1376  SetCC);
1377  __ b(ne, &one_is_nan);
1378  __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
1379  __ b(eq, &neither_is_nan);
1380 
1381  __ bind(&one_is_nan);
1382  // NaN comparisons always fail.
1383  // Load whatever we need in r0 to make the comparison fail.
1384  if (cond == lt || cond == le) {
1385  __ mov(r0, Operand(GREATER));
1386  } else {
1387  __ mov(r0, Operand(LESS));
1388  }
1389  __ Ret();
1390 
1391  __ bind(&neither_is_nan);
1392 }
1393 
1394 
1395 // See comment at call site.
1396 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
1397  Condition cond) {
1399  Register rhs_exponent = exp_first ? r0 : r1;
1400  Register lhs_exponent = exp_first ? r2 : r3;
1401  Register rhs_mantissa = exp_first ? r1 : r0;
1402  Register lhs_mantissa = exp_first ? r3 : r2;
1403 
1404  // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
1405  if (cond == eq) {
1406  // Doubles are not equal unless they have the same bit pattern.
1407  // Exception: 0 and -0.
1408  __ cmp(rhs_mantissa, Operand(lhs_mantissa));
1409  __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
1410  // Return non-zero if the numbers are unequal.
1411  __ Ret(ne);
1412 
1413  __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
1414  // If exponents are equal then return 0.
1415  __ Ret(eq);
1416 
1417  // Exponents are unequal. The only way we can return that the numbers
1418  // are equal is if one is -0 and the other is 0. We already dealt
1419  // with the case where both are -0 or both are 0.
1420  // We start by seeing if the mantissas (that are equal) or the bottom
1421  // 31 bits of the rhs exponent are non-zero. If so we return not
1422  // equal.
1423  __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
1424  __ mov(r0, Operand(r4), LeaveCC, ne);
1425  __ Ret(ne);
1426  // Now they are equal if and only if the lhs exponent is zero in its
1427  // low 31 bits.
1428  __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
1429  __ Ret();
1430  } else {
1431  // Call a native function to do a comparison between two non-NaNs.
1432  // Call C routine that may not cause GC or other trouble.
1433  __ push(lr);
1434  __ PrepareCallCFunction(0, 2, r5);
1435  if (masm->use_eabi_hardfloat()) {
1436  CpuFeatures::Scope scope(VFP2);
1437  __ vmov(d0, r0, r1);
1438  __ vmov(d1, r2, r3);
1439  }
1440 
1441  AllowExternalCallThatCantCauseGC scope(masm);
1442  __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1443  0, 2);
1444  __ pop(pc); // Return.
1445  }
1446 }
1447 
1448 
1449 // See comment at call site.
1450 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1451  Register lhs,
1452  Register rhs) {
1453  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1454  (lhs.is(r1) && rhs.is(r0)));
1455 
1456  // If either operand is a JS object or an oddball value, then they are
1457  // not equal since their pointers are different.
1458  // There is no test for undetectability in strict equality.
1460  Label first_non_object;
1461  // Get the type of the first operand into r2 and compare it with
1462  // FIRST_SPEC_OBJECT_TYPE.
1463  __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
1464  __ b(lt, &first_non_object);
1465 
1466  // Return non-zero (r0 is not zero)
1467  Label return_not_equal;
1468  __ bind(&return_not_equal);
1469  __ Ret();
1470 
1471  __ bind(&first_non_object);
1472  // Check for oddballs: true, false, null, undefined.
1473  __ cmp(r2, Operand(ODDBALL_TYPE));
1474  __ b(eq, &return_not_equal);
1475 
1476  __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
1477  __ b(ge, &return_not_equal);
1478 
1479  // Check for oddballs: true, false, null, undefined.
1480  __ cmp(r3, Operand(ODDBALL_TYPE));
1481  __ b(eq, &return_not_equal);
1482 
1483  // Now that we have the types we might as well check for symbol-symbol.
1484  // Ensure that no non-strings have the symbol bit set.
1486  STATIC_ASSERT(kSymbolTag != 0);
1487  __ and_(r2, r2, Operand(r3));
1488  __ tst(r2, Operand(kIsSymbolMask));
1489  __ b(ne, &return_not_equal);
1490 }
1491 
1492 
1493 // See comment at call site.
1494 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1495  Register lhs,
1496  Register rhs,
1497  Label* both_loaded_as_doubles,
1498  Label* not_heap_numbers,
1499  Label* slow) {
1500  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1501  (lhs.is(r1) && rhs.is(r0)));
1502 
1503  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1504  __ b(ne, not_heap_numbers);
1506  __ cmp(r2, r3);
1507  __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
1508 
1509  // Both are heap numbers. Load them up then jump to the code we have
1510  // for that.
1512  CpuFeatures::Scope scope(VFP2);
1513  __ sub(r7, rhs, Operand(kHeapObjectTag));
1514  __ vldr(d6, r7, HeapNumber::kValueOffset);
1515  __ sub(r7, lhs, Operand(kHeapObjectTag));
1516  __ vldr(d7, r7, HeapNumber::kValueOffset);
1517  } else {
1520  }
1521  __ jmp(both_loaded_as_doubles);
1522 }
1523 
1524 
1525 // Fast negative check for symbol-to-symbol equality.
1526 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1527  Register lhs,
1528  Register rhs,
1529  Label* possible_strings,
1530  Label* not_both_strings) {
1531  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1532  (lhs.is(r1) && rhs.is(r0)));
1533 
1534  // r2 is object type of rhs.
1535  // Ensure that no non-strings have the symbol bit set.
1536  Label object_test;
1537  STATIC_ASSERT(kSymbolTag != 0);
1538  __ tst(r2, Operand(kIsNotStringMask));
1539  __ b(ne, &object_test);
1540  __ tst(r2, Operand(kIsSymbolMask));
1541  __ b(eq, possible_strings);
1542  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1543  __ b(ge, not_both_strings);
1544  __ tst(r3, Operand(kIsSymbolMask));
1545  __ b(eq, possible_strings);
1546 
1547  // Both are symbols. We already checked they weren't the same pointer
1548  // so they are not equal.
1549  __ mov(r0, Operand(NOT_EQUAL));
1550  __ Ret();
1551 
1552  __ bind(&object_test);
1553  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1554  __ b(lt, not_both_strings);
1555  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1556  __ b(lt, not_both_strings);
1557  // If both objects are undetectable, they are equal. Otherwise, they
1558  // are not equal, since they are different objects and an object is not
1559  // equal to undefined.
1563  __ and_(r0, r2, Operand(r3));
1564  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1565  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1566  __ Ret();
1567 }
1568 
1569 
1570 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1571  Register object,
1572  Register result,
1573  Register scratch1,
1574  Register scratch2,
1575  Register scratch3,
1576  bool object_is_smi,
1577  Label* not_found) {
1578  // Use of registers. Register result is used as a temporary.
1579  Register number_string_cache = result;
1580  Register mask = scratch3;
1581 
1582  // Load the number string cache.
1583  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1584 
1585  // Make the hash mask from the length of the number string cache. It
1586  // contains two elements (number and string) for each cache entry.
1587  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1588  // Divide length by two (length is a smi).
1589  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
1590  __ sub(mask, mask, Operand(1)); // Make mask.
1591 
1592  // Calculate the entry in the number string cache. The hash value in the
1593  // number string cache for smis is just the smi value, and the hash for
1594  // doubles is the xor of the upper and lower words. See
1595  // Heap::GetNumberStringCache.
1596  Isolate* isolate = masm->isolate();
1597  Label is_smi;
1598  Label load_result_from_cache;
1599  if (!object_is_smi) {
1600  __ JumpIfSmi(object, &is_smi);
1602  CpuFeatures::Scope scope(VFP2);
1603  __ CheckMap(object,
1604  scratch1,
1605  Heap::kHeapNumberMapRootIndex,
1606  not_found,
1608 
1609  STATIC_ASSERT(8 == kDoubleSize);
1610  __ add(scratch1,
1611  object,
1613  __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
1614  __ eor(scratch1, scratch1, Operand(scratch2));
1615  __ and_(scratch1, scratch1, Operand(mask));
1616 
1617  // Calculate address of entry in string cache: each entry consists
1618  // of two pointer sized fields.
1619  __ add(scratch1,
1620  number_string_cache,
1621  Operand(scratch1, LSL, kPointerSizeLog2 + 1));
1622 
1623  Register probe = mask;
1624  __ ldr(probe,
1626  __ JumpIfSmi(probe, not_found);
1627  __ sub(scratch2, object, Operand(kHeapObjectTag));
1628  __ vldr(d0, scratch2, HeapNumber::kValueOffset);
1629  __ sub(probe, probe, Operand(kHeapObjectTag));
1630  __ vldr(d1, probe, HeapNumber::kValueOffset);
1631  __ VFPCompareAndSetFlags(d0, d1);
1632  __ b(ne, not_found); // The cache did not contain this value.
1633  __ b(&load_result_from_cache);
1634  } else {
1635  __ b(not_found);
1636  }
1637  }
1638 
1639  __ bind(&is_smi);
1640  Register scratch = scratch1;
1641  __ and_(scratch, mask, Operand(object, ASR, 1));
1642  // Calculate address of entry in string cache: each entry consists
1643  // of two pointer sized fields.
1644  __ add(scratch,
1645  number_string_cache,
1646  Operand(scratch, LSL, kPointerSizeLog2 + 1));
1647 
1648  // Check if the entry is the smi we are looking for.
1649  Register probe = mask;
1650  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1651  __ cmp(object, probe);
1652  __ b(ne, not_found);
1653 
1654  // Get the result from the cache.
1655  __ bind(&load_result_from_cache);
1656  __ ldr(result,
1657  FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1658  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1659  1,
1660  scratch1,
1661  scratch2);
1662 }
1663 
1664 
1665 void NumberToStringStub::Generate(MacroAssembler* masm) {
1666  Label runtime;
1667 
1668  __ ldr(r1, MemOperand(sp, 0));
1669 
1670  // Generate code to lookup number in the number string cache.
1671  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
1672  __ add(sp, sp, Operand(1 * kPointerSize));
1673  __ Ret();
1674 
1675  __ bind(&runtime);
1676  // Handle number to string in the runtime system if not found in the cache.
1677  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
1678 }
1679 
1680 
1681 // On entry lhs_ and rhs_ are the values to be compared.
1682 // On exit r0 is 0, positive or negative to indicate the result of
1683 // the comparison.
1684 void CompareStub::Generate(MacroAssembler* masm) {
1685  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
1686  (lhs_.is(r1) && rhs_.is(r0)));
1687 
1688  Label slow; // Call builtin.
1689  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1690 
1691  if (include_smi_compare_) {
1692  Label not_two_smis, smi_done;
1693  __ orr(r2, r1, r0);
1694  __ JumpIfNotSmi(r2, &not_two_smis);
1695  __ mov(r1, Operand(r1, ASR, 1));
1696  __ sub(r0, r1, Operand(r0, ASR, 1));
1697  __ Ret();
1698  __ bind(&not_two_smis);
1699  } else if (FLAG_debug_code) {
1700  __ orr(r2, r1, r0);
1701  __ tst(r2, Operand(kSmiTagMask));
1702  __ Assert(ne, "CompareStub: unexpected smi operands.");
1703  }
1704 
1705  // NOTICE! This code is only reached after a smi-fast-case check, so
1706  // it is certain that at least one operand isn't a smi.
1707 
1708  // Handle the case where the objects are identical. Either returns the answer
1709  // or goes to slow. Only falls through if the objects were not identical.
1710  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1711 
1712  // If either is a Smi (we know that not both are), then they can only
1713  // be strictly equal if the other is a HeapNumber.
1714  STATIC_ASSERT(kSmiTag == 0);
1715  ASSERT_EQ(0, Smi::FromInt(0));
1716  __ and_(r2, lhs_, Operand(rhs_));
1717  __ JumpIfNotSmi(r2, &not_smis);
1718  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1719  // 1) Return the answer.
1720  // 2) Go to slow.
1721  // 3) Fall through to both_loaded_as_doubles.
1722  // 4) Jump to lhs_not_nan.
1723  // In cases 3 and 4 we have found out we were dealing with a number-number
1724  // comparison. If VFP3 is supported the double values of the numbers have
1725  // been loaded into d7 and d6. Otherwise, the double values have been loaded
1726  // into r0, r1, r2, and r3.
1727  EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1728 
1729  __ bind(&both_loaded_as_doubles);
1730  // The arguments have been converted to doubles and stored in d6 and d7, if
1731  // VFP3 is supported, or in r0, r1, r2, and r3.
1732  Isolate* isolate = masm->isolate();
1734  __ bind(&lhs_not_nan);
1735  CpuFeatures::Scope scope(VFP2);
1736  Label no_nan;
1737  // ARMv7 VFP3 instructions to implement double precision comparison.
1738  __ VFPCompareAndSetFlags(d7, d6);
1739  Label nan;
1740  __ b(vs, &nan);
1741  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1742  __ mov(r0, Operand(LESS), LeaveCC, lt);
1743  __ mov(r0, Operand(GREATER), LeaveCC, gt);
1744  __ Ret();
1745 
1746  __ bind(&nan);
1747  // If one of the sides was a NaN then the v flag is set. Load r0 with
1748  // whatever it takes to make the comparison fail, since comparisons with NaN
1749  // always fail.
1750  if (cc_ == lt || cc_ == le) {
1751  __ mov(r0, Operand(GREATER));
1752  } else {
1753  __ mov(r0, Operand(LESS));
1754  }
1755  __ Ret();
1756  } else {
1757  // Checks for NaN in the doubles we have loaded. Can return the answer or
1758  // fall through if neither is a NaN. Also binds lhs_not_nan.
1759  EmitNanCheck(masm, &lhs_not_nan, cc_);
1760  // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
1761  // answer. Never falls through.
1762  EmitTwoNonNanDoubleComparison(masm, cc_);
1763  }
1764 
1765  __ bind(&not_smis);
1766  // At this point we know we are dealing with two different objects,
1767  // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1768  if (strict_) {
1769  // This returns non-equal for some object types, or falls through if it
1770  // was not lucky.
1771  EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1772  }
1773 
1774  Label check_for_symbols;
1775  Label flat_string_check;
1776  // Check for heap-number-heap-number comparison. Can jump to slow case,
1777  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1778  // that case. If the inputs are not doubles then jumps to check_for_symbols.
1779  // In this case r2 will contain the type of rhs_. Never falls through.
1780  EmitCheckForTwoHeapNumbers(masm,
1781  lhs_,
1782  rhs_,
1783  &both_loaded_as_doubles,
1784  &check_for_symbols,
1785  &flat_string_check);
1786 
1787  __ bind(&check_for_symbols);
1788  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1789  // symbols.
1790  if (cc_ == eq && !strict_) {
1791  // Returns an answer for two symbols or two detectable objects.
1792  // Otherwise jumps to string case or not both strings case.
1793  // Assumes that r2 is the type of rhs_ on entry.
1794  EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1795  }
1796 
1797  // Check for both being sequential ASCII strings, and inline if that is the
1798  // case.
1799  __ bind(&flat_string_check);
1800 
1801  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1802 
1803  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1804  if (cc_ == eq) {
1806  lhs_,
1807  rhs_,
1808  r2,
1809  r3,
1810  r4);
1811  } else {
1813  lhs_,
1814  rhs_,
1815  r2,
1816  r3,
1817  r4,
1818  r5);
1819  }
1820  // Never falls through to here.
1821 
1822  __ bind(&slow);
1823 
1824  __ Push(lhs_, rhs_);
1825  // Figure out which native to call and setup the arguments.
1826  Builtins::JavaScript native;
1827  if (cc_ == eq) {
1828  native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1829  } else {
1830  native = Builtins::COMPARE;
1831  int ncr; // NaN compare result
1832  if (cc_ == lt || cc_ == le) {
1833  ncr = GREATER;
1834  } else {
1835  ASSERT(cc_ == gt || cc_ == ge); // remaining cases
1836  ncr = LESS;
1837  }
1838  __ mov(r0, Operand(Smi::FromInt(ncr)));
1839  __ push(r0);
1840  }
1841 
1842  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1843  // tagged as a small integer.
1844  __ InvokeBuiltin(native, JUMP_FUNCTION);
1845 }
1846 
1847 
1848 // The stub expects its argument in the tos_ register and returns its result in
1849 // it, too: zero for false, and a non-zero value for true.
1850 void ToBooleanStub::Generate(MacroAssembler* masm) {
1851  // This stub overrides SometimesSetsUpAFrame() to return false. That means
1852  // we cannot call anything that could cause a GC from this stub.
1853  Label patch;
1854  const Register map = r9.is(tos_) ? r7 : r9;
1855  const Register temp = map;
1856 
1857  // undefined -> false.
1858  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1859 
1860  // Boolean -> its value.
1861  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1862  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1863 
1864  // 'null' -> false.
1865  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1866 
1867  if (types_.Contains(SMI)) {
1868  // Smis: 0 -> false, all other -> true
1869  __ tst(tos_, Operand(kSmiTagMask));
1870  // tos_ contains the correct return value already
1871  __ Ret(eq);
1872  } else if (types_.NeedsMap()) {
1873  // If we need a map later and have a Smi -> patch.
1874  __ JumpIfSmi(tos_, &patch);
1875  }
1876 
1877  if (types_.NeedsMap()) {
1878  __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1879 
1880  if (types_.CanBeUndetectable()) {
1882  __ tst(ip, Operand(1 << Map::kIsUndetectable));
1883  // Undetectable -> false.
1884  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1885  __ Ret(ne);
1886  }
1887  }
1888 
1889  if (types_.Contains(SPEC_OBJECT)) {
1890  // Spec object -> true.
1891  __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1892  // tos_ contains the correct non-zero return value already.
1893  __ Ret(ge);
1894  }
1895 
1896  if (types_.Contains(STRING)) {
1897  // String value -> false iff empty.
1898  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1899  __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
1900  __ Ret(lt); // the string length is OK as the return value
1901  }
1902 
1903  if (types_.Contains(HEAP_NUMBER)) {
1904  // Heap number -> false iff +0, -0, or NaN.
1905  Label not_heap_number;
1906  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1907  __ b(ne, &not_heap_number);
1908 
1910  CpuFeatures::Scope scope(VFP2);
1911 
1913  __ VFPCompareAndSetFlags(d1, 0.0);
1914  // "tos_" is a register, and contains a non zero value by default.
1915  // Hence we only need to overwrite "tos_" with zero to return false for
1916  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1917  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
1918  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
1919  } else {
1920  Label done, not_nan, not_zero;
1922  // -0 maps to false:
1923  __ bic(
1924  temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
1925  __ b(ne, &not_zero);
1926  // If exponent word is zero then the answer depends on the mantissa word.
1928  __ jmp(&done);
1929 
1930  // Check for NaN.
1931  __ bind(&not_zero);
1932  // We already zeroed the sign bit, now shift out the mantissa so we only
1933  // have the exponent left.
1934  __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
1935  unsigned int shifted_exponent_mask =
1937  __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
1938  __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
1939 
1940  // Reload exponent word.
1942  __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
1943  // If mantissa is not zero then we have a NaN, so return 0.
1944  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1945  __ b(ne, &done);
1946 
1947  // Load mantissa word.
1949  __ cmp(temp, Operand(0, RelocInfo::NONE));
1950  // If mantissa is not zero then we have a NaN, so return 0.
1951  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1952  __ b(ne, &done);
1953 
1954  __ bind(&not_nan);
1955  __ mov(tos_, Operand(1, RelocInfo::NONE));
1956  __ bind(&done);
1957  }
1958  __ Ret();
1959  __ bind(&not_heap_number);
1960  }
1961 
1962  __ bind(&patch);
1963  GenerateTypeTransition(masm);
1964 }
1965 
1966 
1967 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1968  Type type,
1969  Heap::RootListIndex value,
1970  bool result) {
1971  if (types_.Contains(type)) {
1972  // If we see an expected oddball, return its ToBoolean value tos_.
1973  __ LoadRoot(ip, value);
1974  __ cmp(tos_, ip);
1975  // The value of a root is never NULL, so we can avoid loading a non-null
1976  // value into tos_ when we want to return 'true'.
1977  if (!result) {
1978  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1979  }
1980  __ Ret(eq);
1981  }
1982 }
1983 
1984 
1985 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1986  if (!tos_.is(r3)) {
1987  __ mov(r3, Operand(tos_));
1988  }
1989  __ mov(r2, Operand(Smi::FromInt(tos_.code())));
1990  __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
1991  __ Push(r3, r2, r1);
1992  // Patch the caller to an appropriate specialized stub and return the
1993  // operation result to the caller of the stub.
1994  __ TailCallExternalReference(
1995  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1996  3,
1997  1);
1998 }
1999 
2000 
2001 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
2002  // We don't allow a GC during a store buffer overflow so there is no need to
2003  // store the registers in any particular way, but we do have to store and
2004  // restore them.
2005  __ stm(db_w, sp, kCallerSaved | lr.bit());
2006  if (save_doubles_ == kSaveFPRegs) {
2007  CpuFeatures::Scope scope(VFP2);
2008  __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
2009  for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
2010  DwVfpRegister reg = DwVfpRegister::from_code(i);
2011  __ vstr(reg, MemOperand(sp, i * kDoubleSize));
2012  }
2013  }
2014  const int argument_count = 1;
2015  const int fp_argument_count = 0;
2016  const Register scratch = r1;
2017 
2018  AllowExternalCallThatCantCauseGC scope(masm);
2019  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2020  __ mov(r0, Operand(ExternalReference::isolate_address()));
2021  __ CallCFunction(
2022  ExternalReference::store_buffer_overflow_function(masm->isolate()),
2023  argument_count);
2024  if (save_doubles_ == kSaveFPRegs) {
2025  CpuFeatures::Scope scope(VFP2);
2026  for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
2027  DwVfpRegister reg = DwVfpRegister::from_code(i);
2028  __ vldr(reg, MemOperand(sp, i * kDoubleSize));
2029  }
2030  __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
2031  }
2032  __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
2033 }
2034 
2035 
2036 void UnaryOpStub::PrintName(StringStream* stream) {
2037  const char* op_name = Token::Name(op_);
2038  const char* overwrite_name = NULL; // Make g++ happy.
2039  switch (mode_) {
2040  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2041  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2042  }
2043  stream->Add("UnaryOpStub_%s_%s_%s",
2044  op_name,
2045  overwrite_name,
2046  UnaryOpIC::GetName(operand_type_));
2047 }
2048 
2049 
2050 // TODO(svenpanne): Use virtual functions instead of switch.
2051 void UnaryOpStub::Generate(MacroAssembler* masm) {
2052  switch (operand_type_) {
2054  GenerateTypeTransition(masm);
2055  break;
2056  case UnaryOpIC::SMI:
2057  GenerateSmiStub(masm);
2058  break;
2060  GenerateHeapNumberStub(masm);
2061  break;
2062  case UnaryOpIC::GENERIC:
2063  GenerateGenericStub(masm);
2064  break;
2065  }
2066 }
2067 
2068 
2069 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2070  __ mov(r3, Operand(r0)); // the operand
2071  __ mov(r2, Operand(Smi::FromInt(op_)));
2072  __ mov(r1, Operand(Smi::FromInt(mode_)));
2073  __ mov(r0, Operand(Smi::FromInt(operand_type_)));
2074  __ Push(r3, r2, r1, r0);
2075 
2076  __ TailCallExternalReference(
2077  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2078 }
2079 
2080 
2081 // TODO(svenpanne): Use virtual functions instead of switch.
2082 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2083  switch (op_) {
2084  case Token::SUB:
2085  GenerateSmiStubSub(masm);
2086  break;
2087  case Token::BIT_NOT:
2088  GenerateSmiStubBitNot(masm);
2089  break;
2090  default:
2091  UNREACHABLE();
2092  }
2093 }
2094 
2095 
2096 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2097  Label non_smi, slow;
2098  GenerateSmiCodeSub(masm, &non_smi, &slow);
2099  __ bind(&non_smi);
2100  __ bind(&slow);
2101  GenerateTypeTransition(masm);
2102 }
2103 
2104 
2105 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2106  Label non_smi;
2107  GenerateSmiCodeBitNot(masm, &non_smi);
2108  __ bind(&non_smi);
2109  GenerateTypeTransition(masm);
2110 }
2111 
2112 
2113 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2114  Label* non_smi,
2115  Label* slow) {
2116  __ JumpIfNotSmi(r0, non_smi);
2117 
2118  // The result of negating zero or the smallest negative smi is not a smi.
2119  __ bic(ip, r0, Operand(0x80000000), SetCC);
2120  __ b(eq, slow);
2121 
2122  // Return '0 - value'.
2123  __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
2124  __ Ret();
2125 }
2126 
2127 
2128 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2129  Label* non_smi) {
2130  __ JumpIfNotSmi(r0, non_smi);
2131 
2132  // Flip bits and revert inverted smi-tag.
2133  __ mvn(r0, Operand(r0));
2134  __ bic(r0, r0, Operand(kSmiTagMask));
2135  __ Ret();
2136 }
2137 
2138 
2139 // TODO(svenpanne): Use virtual functions instead of switch.
2140 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2141  switch (op_) {
2142  case Token::SUB:
2143  GenerateHeapNumberStubSub(masm);
2144  break;
2145  case Token::BIT_NOT:
2146  GenerateHeapNumberStubBitNot(masm);
2147  break;
2148  default:
2149  UNREACHABLE();
2150  }
2151 }
2152 
2153 
2154 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2155  Label non_smi, slow, call_builtin;
2156  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2157  __ bind(&non_smi);
2158  GenerateHeapNumberCodeSub(masm, &slow);
2159  __ bind(&slow);
2160  GenerateTypeTransition(masm);
2161  __ bind(&call_builtin);
2162  GenerateGenericCodeFallback(masm);
2163 }
2164 
2165 
2166 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2167  Label non_smi, slow;
2168  GenerateSmiCodeBitNot(masm, &non_smi);
2169  __ bind(&non_smi);
2170  GenerateHeapNumberCodeBitNot(masm, &slow);
2171  __ bind(&slow);
2172  GenerateTypeTransition(masm);
2173 }
2174 
2175 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2176  Label* slow) {
2177  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2178  // r0 is a heap number. Get a new heap number in r1.
2179  if (mode_ == UNARY_OVERWRITE) {
2181  __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2183  } else {
2184  Label slow_allocate_heapnumber, heapnumber_allocated;
2185  __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
2186  __ jmp(&heapnumber_allocated);
2187 
2188  __ bind(&slow_allocate_heapnumber);
2189  {
2190  FrameScope scope(masm, StackFrame::INTERNAL);
2191  __ push(r0);
2192  __ CallRuntime(Runtime::kNumberAlloc, 0);
2193  __ mov(r1, Operand(r0));
2194  __ pop(r0);
2195  }
2196 
2197  __ bind(&heapnumber_allocated);
2201  __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2203  __ mov(r0, Operand(r1));
2204  }
2205  __ Ret();
2206 }
2207 
2208 
2209 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2210  MacroAssembler* masm, Label* slow) {
2211  Label impossible;
2212 
2213  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2214  // Convert the heap number is r0 to an untagged integer in r1.
2215  __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
2216 
2217  // Do the bitwise operation and check if the result fits in a smi.
2218  Label try_float;
2219  __ mvn(r1, Operand(r1));
2220  __ add(r2, r1, Operand(0x40000000), SetCC);
2221  __ b(mi, &try_float);
2222 
2223  // Tag the result as a smi and we're done.
2224  __ mov(r0, Operand(r1, LSL, kSmiTagSize));
2225  __ Ret();
2226 
2227  // Try to store the result in a heap number.
2228  __ bind(&try_float);
2229  if (mode_ == UNARY_NO_OVERWRITE) {
2230  Label slow_allocate_heapnumber, heapnumber_allocated;
2231  // Allocate a new heap number without zapping r0, which we need if it fails.
2232  __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
2233  __ jmp(&heapnumber_allocated);
2234 
2235  __ bind(&slow_allocate_heapnumber);
2236  {
2237  FrameScope scope(masm, StackFrame::INTERNAL);
2238  __ push(r0); // Push the heap number, not the untagged int32.
2239  __ CallRuntime(Runtime::kNumberAlloc, 0);
2240  __ mov(r2, r0); // Move the new heap number into r2.
2241  // Get the heap number into r0, now that the new heap number is in r2.
2242  __ pop(r0);
2243  }
2244 
2245  // Convert the heap number in r0 to an untagged integer in r1.
2246  // This can't go slow-case because it's the same number we already
2247  // converted once again.
2248  __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
2249  __ mvn(r1, Operand(r1));
2250 
2251  __ bind(&heapnumber_allocated);
2252  __ mov(r0, r2); // Move newly allocated heap number to r0.
2253  }
2254 
2256  // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
2257  CpuFeatures::Scope scope(VFP2);
2258  __ vmov(s0, r1);
2259  __ vcvt_f64_s32(d0, s0);
2260  __ sub(r2, r0, Operand(kHeapObjectTag));
2261  __ vstr(d0, r2, HeapNumber::kValueOffset);
2262  __ Ret();
2263  } else {
2264  // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2265  // have to set up a frame.
2266  WriteInt32ToHeapNumberStub stub(r1, r0, r2);
2267  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2268  }
2269 
2270  __ bind(&impossible);
2271  if (FLAG_debug_code) {
2272  __ stop("Incorrect assumption in bit-not stub");
2273  }
2274 }
2275 
2276 
2277 // TODO(svenpanne): Use virtual functions instead of switch.
2278 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2279  switch (op_) {
2280  case Token::SUB:
2281  GenerateGenericStubSub(masm);
2282  break;
2283  case Token::BIT_NOT:
2284  GenerateGenericStubBitNot(masm);
2285  break;
2286  default:
2287  UNREACHABLE();
2288  }
2289 }
2290 
2291 
2292 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2293  Label non_smi, slow;
2294  GenerateSmiCodeSub(masm, &non_smi, &slow);
2295  __ bind(&non_smi);
2296  GenerateHeapNumberCodeSub(masm, &slow);
2297  __ bind(&slow);
2298  GenerateGenericCodeFallback(masm);
2299 }
2300 
2301 
2302 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2303  Label non_smi, slow;
2304  GenerateSmiCodeBitNot(masm, &non_smi);
2305  __ bind(&non_smi);
2306  GenerateHeapNumberCodeBitNot(masm, &slow);
2307  __ bind(&slow);
2308  GenerateGenericCodeFallback(masm);
2309 }
2310 
2311 
2312 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
2313  // Handle the slow case by jumping to the JavaScript builtin.
2314  __ push(r0);
2315  switch (op_) {
2316  case Token::SUB:
2317  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2318  break;
2319  case Token::BIT_NOT:
2320  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2321  break;
2322  default:
2323  UNREACHABLE();
2324  }
2325 }
2326 
2327 
2328 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2329  Label get_result;
2330 
2331  __ Push(r1, r0);
2332 
2333  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2334  __ mov(r1, Operand(Smi::FromInt(op_)));
2335  __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2336  __ Push(r2, r1, r0);
2337 
2338  __ TailCallExternalReference(
2339  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2340  masm->isolate()),
2341  5,
2342  1);
2343 }
2344 
2345 
2346 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2347  MacroAssembler* masm) {
2348  UNIMPLEMENTED();
2349 }
2350 
2351 
2352 void BinaryOpStub::Generate(MacroAssembler* masm) {
2353  // Explicitly allow generation of nested stubs. It is safe here because
2354  // generation code does not use any raw pointers.
2355  AllowStubCallsScope allow_stub_calls(masm, true);
2356 
2357  switch (operands_type_) {
2359  GenerateTypeTransition(masm);
2360  break;
2361  case BinaryOpIC::SMI:
2362  GenerateSmiStub(masm);
2363  break;
2364  case BinaryOpIC::INT32:
2365  GenerateInt32Stub(masm);
2366  break;
2368  GenerateHeapNumberStub(masm);
2369  break;
2370  case BinaryOpIC::ODDBALL:
2371  GenerateOddballStub(masm);
2372  break;
2374  GenerateBothStringStub(masm);
2375  break;
2376  case BinaryOpIC::STRING:
2377  GenerateStringStub(masm);
2378  break;
2379  case BinaryOpIC::GENERIC:
2380  GenerateGeneric(masm);
2381  break;
2382  default:
2383  UNREACHABLE();
2384  }
2385 }
2386 
2387 
2388 void BinaryOpStub::PrintName(StringStream* stream) {
2389  const char* op_name = Token::Name(op_);
2390  const char* overwrite_name;
2391  switch (mode_) {
2392  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2393  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2394  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2395  default: overwrite_name = "UnknownOverwrite"; break;
2396  }
2397  stream->Add("BinaryOpStub_%s_%s_%s",
2398  op_name,
2399  overwrite_name,
2400  BinaryOpIC::GetName(operands_type_));
2401 }
2402 
2403 
2404 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2405  Register left = r1;
2406  Register right = r0;
2407  Register scratch1 = r7;
2408  Register scratch2 = r9;
2409 
2410  ASSERT(right.is(r0));
2411  STATIC_ASSERT(kSmiTag == 0);
2412 
2413  Label not_smi_result;
2414  switch (op_) {
2415  case Token::ADD:
2416  __ add(right, left, Operand(right), SetCC); // Add optimistically.
2417  __ Ret(vc);
2418  __ sub(right, right, Operand(left)); // Revert optimistic add.
2419  break;
2420  case Token::SUB:
2421  __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
2422  __ Ret(vc);
2423  __ sub(right, left, Operand(right)); // Revert optimistic subtract.
2424  break;
2425  case Token::MUL:
2426  // Remove tag from one of the operands. This way the multiplication result
2427  // will be a smi if it fits the smi range.
2428  __ SmiUntag(ip, right);
2429  // Do multiplication
2430  // scratch1 = lower 32 bits of ip * left.
2431  // scratch2 = higher 32 bits of ip * left.
2432  __ smull(scratch1, scratch2, left, ip);
2433  // Check for overflowing the smi range - no overflow if higher 33 bits of
2434  // the result are identical.
2435  __ mov(ip, Operand(scratch1, ASR, 31));
2436  __ cmp(ip, Operand(scratch2));
2437  __ b(ne, &not_smi_result);
2438  // Go slow on zero result to handle -0.
2439  __ cmp(scratch1, Operand(0));
2440  __ mov(right, Operand(scratch1), LeaveCC, ne);
2441  __ Ret(ne);
2442  // We need -0 if we were multiplying a negative number with 0 to get 0.
2443  // We know one of them was zero.
2444  __ add(scratch2, right, Operand(left), SetCC);
2445  __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2446  __ Ret(pl); // Return smi 0 if the non-zero one was positive.
2447  // We fall through here if we multiplied a negative number with 0, because
2448  // that would mean we should produce -0.
2449  break;
2450  case Token::DIV:
2451  // Check for power of two on the right hand side.
2452  __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2453  // Check for positive and no remainder (scratch1 contains right - 1).
2454  __ orr(scratch2, scratch1, Operand(0x80000000u));
2455  __ tst(left, scratch2);
2456  __ b(ne, &not_smi_result);
2457 
2458  // Perform division by shifting.
2459  __ CountLeadingZeros(scratch1, scratch1, scratch2);
2460  __ rsb(scratch1, scratch1, Operand(31));
2461  __ mov(right, Operand(left, LSR, scratch1));
2462  __ Ret();
2463  break;
2464  case Token::MOD:
2465  // Check for two positive smis.
2466  __ orr(scratch1, left, Operand(right));
2467  __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2468  __ b(ne, &not_smi_result);
2469 
2470  // Check for power of two on the right hand side.
2471  __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2472 
2473  // Perform modulus by masking.
2474  __ and_(right, left, Operand(scratch1));
2475  __ Ret();
2476  break;
2477  case Token::BIT_OR:
2478  __ orr(right, left, Operand(right));
2479  __ Ret();
2480  break;
2481  case Token::BIT_AND:
2482  __ and_(right, left, Operand(right));
2483  __ Ret();
2484  break;
2485  case Token::BIT_XOR:
2486  __ eor(right, left, Operand(right));
2487  __ Ret();
2488  break;
2489  case Token::SAR:
2490  // Remove tags from right operand.
2491  __ GetLeastBitsFromSmi(scratch1, right, 5);
2492  __ mov(right, Operand(left, ASR, scratch1));
2493  // Smi tag result.
2494  __ bic(right, right, Operand(kSmiTagMask));
2495  __ Ret();
2496  break;
2497  case Token::SHR:
2498  // Remove tags from operands. We can't do this on a 31 bit number
2499  // because then the 0s get shifted into bit 30 instead of bit 31.
2500  __ SmiUntag(scratch1, left);
2501  __ GetLeastBitsFromSmi(scratch2, right, 5);
2502  __ mov(scratch1, Operand(scratch1, LSR, scratch2));
2503  // Unsigned shift is not allowed to produce a negative number, so
2504  // check the sign bit and the sign bit after Smi tagging.
2505  __ tst(scratch1, Operand(0xc0000000));
2506  __ b(ne, &not_smi_result);
2507  // Smi tag result.
2508  __ SmiTag(right, scratch1);
2509  __ Ret();
2510  break;
2511  case Token::SHL:
2512  // Remove tags from operands.
2513  __ SmiUntag(scratch1, left);
2514  __ GetLeastBitsFromSmi(scratch2, right, 5);
2515  __ mov(scratch1, Operand(scratch1, LSL, scratch2));
2516  // Check that the signed result fits in a Smi.
2517  __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2518  __ b(mi, &not_smi_result);
2519  __ SmiTag(right, scratch1);
2520  __ Ret();
2521  break;
2522  default:
2523  UNREACHABLE();
2524  }
2525  __ bind(&not_smi_result);
2526 }
2527 
2528 
2529 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2530  bool smi_operands,
2531  Label* not_numbers,
2532  Label* gc_required) {
2533  Register left = r1;
2534  Register right = r0;
2535  Register scratch1 = r7;
2536  Register scratch2 = r9;
2537  Register scratch3 = r4;
2538 
2539  ASSERT(smi_operands || (not_numbers != NULL));
2540  if (smi_operands) {
2541  __ AssertSmi(left);
2542  __ AssertSmi(right);
2543  }
2544 
2545  Register heap_number_map = r6;
2546  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2547 
2548  switch (op_) {
2549  case Token::ADD:
2550  case Token::SUB:
2551  case Token::MUL:
2552  case Token::DIV:
2553  case Token::MOD: {
2554  // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2555  // depending on whether VFP3 is available or not.
2556  FloatingPointHelper::Destination destination =
2558  op_ != Token::MOD ?
2561 
2562  // Allocate new heap number for result.
2563  Register result = r5;
2564  GenerateHeapResultAllocation(
2565  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2566 
2567  // Load the operands.
2568  if (smi_operands) {
2569  FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2570  } else {
2572  destination,
2573  heap_number_map,
2574  scratch1,
2575  scratch2,
2576  not_numbers);
2577  }
2578 
2579  // Calculate the result.
2580  if (destination == FloatingPointHelper::kVFPRegisters) {
2581  // Using VFP registers:
2582  // d6: Left value
2583  // d7: Right value
2584  CpuFeatures::Scope scope(VFP2);
2585  switch (op_) {
2586  case Token::ADD:
2587  __ vadd(d5, d6, d7);
2588  break;
2589  case Token::SUB:
2590  __ vsub(d5, d6, d7);
2591  break;
2592  case Token::MUL:
2593  __ vmul(d5, d6, d7);
2594  break;
2595  case Token::DIV:
2596  __ vdiv(d5, d6, d7);
2597  break;
2598  default:
2599  UNREACHABLE();
2600  }
2601 
2602  __ sub(r0, result, Operand(kHeapObjectTag));
2603  __ vstr(d5, r0, HeapNumber::kValueOffset);
2604  __ add(r0, r0, Operand(kHeapObjectTag));
2605  __ Ret();
2606  } else {
2607  // Call the C function to handle the double operation.
2609  op_,
2610  result,
2611  scratch1);
2612  if (FLAG_debug_code) {
2613  __ stop("Unreachable code.");
2614  }
2615  }
2616  break;
2617  }
2618  case Token::BIT_OR:
2619  case Token::BIT_XOR:
2620  case Token::BIT_AND:
2621  case Token::SAR:
2622  case Token::SHR:
2623  case Token::SHL: {
2624  if (smi_operands) {
2625  __ SmiUntag(r3, left);
2626  __ SmiUntag(r2, right);
2627  } else {
2628  // Convert operands to 32-bit integers. Right in r2 and left in r3.
2630  left,
2631  r3,
2632  heap_number_map,
2633  scratch1,
2634  scratch2,
2635  scratch3,
2636  d0,
2637  not_numbers);
2639  right,
2640  r2,
2641  heap_number_map,
2642  scratch1,
2643  scratch2,
2644  scratch3,
2645  d0,
2646  not_numbers);
2647  }
2648 
2649  Label result_not_a_smi;
2650  switch (op_) {
2651  case Token::BIT_OR:
2652  __ orr(r2, r3, Operand(r2));
2653  break;
2654  case Token::BIT_XOR:
2655  __ eor(r2, r3, Operand(r2));
2656  break;
2657  case Token::BIT_AND:
2658  __ and_(r2, r3, Operand(r2));
2659  break;
2660  case Token::SAR:
2661  // Use only the 5 least significant bits of the shift count.
2662  __ GetLeastBitsFromInt32(r2, r2, 5);
2663  __ mov(r2, Operand(r3, ASR, r2));
2664  break;
2665  case Token::SHR:
2666  // Use only the 5 least significant bits of the shift count.
2667  __ GetLeastBitsFromInt32(r2, r2, 5);
2668  __ mov(r2, Operand(r3, LSR, r2), SetCC);
2669  // SHR is special because it is required to produce a positive answer.
2670  // The code below for writing into heap numbers isn't capable of
2671  // writing the register as an unsigned int so we go to slow case if we
2672  // hit this case.
2674  __ b(mi, &result_not_a_smi);
2675  } else {
2676  __ b(mi, not_numbers);
2677  }
2678  break;
2679  case Token::SHL:
2680  // Use only the 5 least significant bits of the shift count.
2681  __ GetLeastBitsFromInt32(r2, r2, 5);
2682  __ mov(r2, Operand(r3, LSL, r2));
2683  break;
2684  default:
2685  UNREACHABLE();
2686  }
2687 
2688  // Check that the *signed* result fits in a smi.
2689  __ add(r3, r2, Operand(0x40000000), SetCC);
2690  __ b(mi, &result_not_a_smi);
2691  __ SmiTag(r0, r2);
2692  __ Ret();
2693 
2694  // Allocate new heap number for result.
2695  __ bind(&result_not_a_smi);
2696  Register result = r5;
2697  if (smi_operands) {
2698  __ AllocateHeapNumber(
2699  result, scratch1, scratch2, heap_number_map, gc_required);
2700  } else {
2701  GenerateHeapResultAllocation(
2702  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2703  }
2704 
2705  // r2: Answer as signed int32.
2706  // r5: Heap number to write answer into.
2707 
2708  // Nothing can go wrong now, so move the heap number to r0, which is the
2709  // result.
2710  __ mov(r0, Operand(r5));
2711 
2713  // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
2714  // mentioned above SHR needs to always produce a positive result.
2715  CpuFeatures::Scope scope(VFP2);
2716  __ vmov(s0, r2);
2717  if (op_ == Token::SHR) {
2718  __ vcvt_f64_u32(d0, s0);
2719  } else {
2720  __ vcvt_f64_s32(d0, s0);
2721  }
2722  __ sub(r3, r0, Operand(kHeapObjectTag));
2723  __ vstr(d0, r3, HeapNumber::kValueOffset);
2724  __ Ret();
2725  } else {
2726  // Tail call that writes the int32 in r2 to the heap number in r0, using
2727  // r3 as scratch. r0 is preserved and returned.
2728  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2729  __ TailCallStub(&stub);
2730  }
2731  break;
2732  }
2733  default:
2734  UNREACHABLE();
2735  }
2736 }
2737 
2738 
2739 // Generate the smi code. If the operation on smis are successful this return is
2740 // generated. If the result is not a smi and heap number allocation is not
2741 // requested the code falls through. If number allocation is requested but a
2742 // heap number cannot be allocated the code jumps to the lable gc_required.
2743 void BinaryOpStub::GenerateSmiCode(
2744  MacroAssembler* masm,
2745  Label* use_runtime,
2746  Label* gc_required,
2747  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2748  Label not_smis;
2749 
2750  Register left = r1;
2751  Register right = r0;
2752  Register scratch1 = r7;
2753 
2754  // Perform combined smi check on both operands.
2755  __ orr(scratch1, left, Operand(right));
2756  STATIC_ASSERT(kSmiTag == 0);
2757  __ JumpIfNotSmi(scratch1, &not_smis);
2758 
2759  // If the smi-smi operation results in a smi return is generated.
2760  GenerateSmiSmiOperation(masm);
2761 
2762  // If heap number results are possible generate the result in an allocated
2763  // heap number.
2764  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2765  GenerateFPOperation(masm, true, use_runtime, gc_required);
2766  }
2767  __ bind(&not_smis);
2768 }
2769 
2770 
2771 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2772  Label not_smis, call_runtime;
2773 
2774  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2775  result_type_ == BinaryOpIC::SMI) {
2776  // Only allow smi results.
2777  GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2778  } else {
2779  // Allow heap number result and don't make a transition if a heap number
2780  // cannot be allocated.
2781  GenerateSmiCode(masm,
2782  &call_runtime,
2783  &call_runtime,
2784  ALLOW_HEAPNUMBER_RESULTS);
2785  }
2786 
2787  // Code falls through if the result is not returned as either a smi or heap
2788  // number.
2789  GenerateTypeTransition(masm);
2790 
2791  __ bind(&call_runtime);
2792  GenerateCallRuntime(masm);
2793 }
2794 
2795 
2796 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2797  ASSERT(operands_type_ == BinaryOpIC::STRING);
2798  ASSERT(op_ == Token::ADD);
2799  // Try to add arguments as strings, otherwise, transition to the generic
2800  // BinaryOpIC type.
2801  GenerateAddStrings(masm);
2802  GenerateTypeTransition(masm);
2803 }
2804 
2805 
2806 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2807  Label call_runtime;
2808  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2809  ASSERT(op_ == Token::ADD);
2810  // If both arguments are strings, call the string add stub.
2811  // Otherwise, do a transition.
2812 
2813  // Registers containing left and right operands respectively.
2814  Register left = r1;
2815  Register right = r0;
2816 
2817  // Test if left operand is a string.
2818  __ JumpIfSmi(left, &call_runtime);
2819  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2820  __ b(ge, &call_runtime);
2821 
2822  // Test if right operand is a string.
2823  __ JumpIfSmi(right, &call_runtime);
2824  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2825  __ b(ge, &call_runtime);
2826 
2827  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2828  GenerateRegisterArgsPush(masm);
2829  __ TailCallStub(&string_add_stub);
2830 
2831  __ bind(&call_runtime);
2832  GenerateTypeTransition(masm);
2833 }
2834 
2835 
2836 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2837  ASSERT(operands_type_ == BinaryOpIC::INT32);
2838 
2839  Register left = r1;
2840  Register right = r0;
2841  Register scratch1 = r7;
2842  Register scratch2 = r9;
2843  DwVfpRegister double_scratch = d0;
2844 
2845  Register heap_number_result = no_reg;
2846  Register heap_number_map = r6;
2847  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2848 
2849  Label call_runtime;
2850  // Labels for type transition, used for wrong input or output types.
2851  // Both label are currently actually bound to the same position. We use two
2852  // different label to differentiate the cause leading to type transition.
2853  Label transition;
2854 
2855  // Smi-smi fast case.
2856  Label skip;
2857  __ orr(scratch1, left, right);
2858  __ JumpIfNotSmi(scratch1, &skip);
2859  GenerateSmiSmiOperation(masm);
2860  // Fall through if the result is not a smi.
2861  __ bind(&skip);
2862 
2863  switch (op_) {
2864  case Token::ADD:
2865  case Token::SUB:
2866  case Token::MUL:
2867  case Token::DIV:
2868  case Token::MOD: {
2869  // Load both operands and check that they are 32-bit integer.
2870  // Jump to type transition if they are not. The registers r0 and r1 (right
2871  // and left) are preserved for the runtime call.
2872  FloatingPointHelper::Destination destination =
2873  (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
2876 
2878  right,
2879  destination,
2880  d7,
2881  d8,
2882  r2,
2883  r3,
2884  heap_number_map,
2885  scratch1,
2886  scratch2,
2887  s0,
2888  &transition);
2890  left,
2891  destination,
2892  d6,
2893  d8,
2894  r4,
2895  r5,
2896  heap_number_map,
2897  scratch1,
2898  scratch2,
2899  s0,
2900  &transition);
2901 
2902  if (destination == FloatingPointHelper::kVFPRegisters) {
2903  CpuFeatures::Scope scope(VFP2);
2904  Label return_heap_number;
2905  switch (op_) {
2906  case Token::ADD:
2907  __ vadd(d5, d6, d7);
2908  break;
2909  case Token::SUB:
2910  __ vsub(d5, d6, d7);
2911  break;
2912  case Token::MUL:
2913  __ vmul(d5, d6, d7);
2914  break;
2915  case Token::DIV:
2916  __ vdiv(d5, d6, d7);
2917  break;
2918  default:
2919  UNREACHABLE();
2920  }
2921 
2922  if (op_ != Token::DIV) {
2923  // These operations produce an integer result.
2924  // Try to return a smi if we can.
2925  // Otherwise return a heap number if allowed, or jump to type
2926  // transition.
2927 
2928  __ EmitVFPTruncate(kRoundToZero,
2929  scratch1,
2930  d5,
2931  scratch2,
2932  d8);
2933 
2934  if (result_type_ <= BinaryOpIC::INT32) {
2935  // If the ne condition is set, result does
2936  // not fit in a 32-bit integer.
2937  __ b(ne, &transition);
2938  }
2939 
2940  // Check if the result fits in a smi.
2941  __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2942  // If not try to return a heap number.
2943  __ b(mi, &return_heap_number);
2944  // Check for minus zero. Return heap number for minus zero.
2945  Label not_zero;
2946  __ cmp(scratch1, Operand::Zero());
2947  __ b(ne, &not_zero);
2948  __ vmov(scratch2, d5.high());
2949  __ tst(scratch2, Operand(HeapNumber::kSignMask));
2950  __ b(ne, &return_heap_number);
2951  __ bind(&not_zero);
2952 
2953  // Tag the result and return.
2954  __ SmiTag(r0, scratch1);
2955  __ Ret();
2956  } else {
2957  // DIV just falls through to allocating a heap number.
2958  }
2959 
2960  __ bind(&return_heap_number);
2961  // Return a heap number, or fall through to type transition or runtime
2962  // call if we can't.
2963  if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2964  : BinaryOpIC::INT32)) {
2965  // We are using vfp registers so r5 is available.
2966  heap_number_result = r5;
2967  GenerateHeapResultAllocation(masm,
2968  heap_number_result,
2969  heap_number_map,
2970  scratch1,
2971  scratch2,
2972  &call_runtime);
2973  __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
2974  __ vstr(d5, r0, HeapNumber::kValueOffset);
2975  __ mov(r0, heap_number_result);
2976  __ Ret();
2977  }
2978 
2979  // A DIV operation expecting an integer result falls through
2980  // to type transition.
2981 
2982  } else {
2983  // We preserved r0 and r1 to be able to call runtime.
2984  // Save the left value on the stack.
2985  __ Push(r5, r4);
2986 
2987  Label pop_and_call_runtime;
2988 
2989  // Allocate a heap number to store the result.
2990  heap_number_result = r5;
2991  GenerateHeapResultAllocation(masm,
2992  heap_number_result,
2993  heap_number_map,
2994  scratch1,
2995  scratch2,
2996  &pop_and_call_runtime);
2997 
2998  // Load the left value from the value saved on the stack.
2999  __ Pop(r1, r0);
3000 
3001  // Call the C function to handle the double operation.
3003  masm, op_, heap_number_result, scratch1);
3004  if (FLAG_debug_code) {
3005  __ stop("Unreachable code.");
3006  }
3007 
3008  __ bind(&pop_and_call_runtime);
3009  __ Drop(2);
3010  __ b(&call_runtime);
3011  }
3012 
3013  break;
3014  }
3015 
3016  case Token::BIT_OR:
3017  case Token::BIT_XOR:
3018  case Token::BIT_AND:
3019  case Token::SAR:
3020  case Token::SHR:
3021  case Token::SHL: {
3022  Label return_heap_number;
3023  Register scratch3 = r5;
3024  // Convert operands to 32-bit integers. Right in r2 and left in r3. The
3025  // registers r0 and r1 (right and left) are preserved for the runtime
3026  // call.
3028  left,
3029  r3,
3030  heap_number_map,
3031  scratch1,
3032  scratch2,
3033  scratch3,
3034  d0,
3035  d1,
3036  &transition);
3038  right,
3039  r2,
3040  heap_number_map,
3041  scratch1,
3042  scratch2,
3043  scratch3,
3044  d0,
3045  d1,
3046  &transition);
3047 
3048  // The ECMA-262 standard specifies that, for shift operations, only the
3049  // 5 least significant bits of the shift value should be used.
3050  switch (op_) {
3051  case Token::BIT_OR:
3052  __ orr(r2, r3, Operand(r2));
3053  break;
3054  case Token::BIT_XOR:
3055  __ eor(r2, r3, Operand(r2));
3056  break;
3057  case Token::BIT_AND:
3058  __ and_(r2, r3, Operand(r2));
3059  break;
3060  case Token::SAR:
3061  __ and_(r2, r2, Operand(0x1f));
3062  __ mov(r2, Operand(r3, ASR, r2));
3063  break;
3064  case Token::SHR:
3065  __ and_(r2, r2, Operand(0x1f));
3066  __ mov(r2, Operand(r3, LSR, r2), SetCC);
3067  // SHR is special because it is required to produce a positive answer.
3068  // We only get a negative result if the shift value (r2) is 0.
3069  // This result cannot be respresented as a signed 32-bit integer, try
3070  // to return a heap number if we can.
3071  // The non vfp2 code does not support this special case, so jump to
3072  // runtime if we don't support it.
3074  __ b(mi, (result_type_ <= BinaryOpIC::INT32)
3075  ? &transition
3076  : &return_heap_number);
3077  } else {
3078  __ b(mi, (result_type_ <= BinaryOpIC::INT32)
3079  ? &transition
3080  : &call_runtime);
3081  }
3082  break;
3083  case Token::SHL:
3084  __ and_(r2, r2, Operand(0x1f));
3085  __ mov(r2, Operand(r3, LSL, r2));
3086  break;
3087  default:
3088  UNREACHABLE();
3089  }
3090 
3091  // Check if the result fits in a smi.
3092  __ add(scratch1, r2, Operand(0x40000000), SetCC);
3093  // If not try to return a heap number. (We know the result is an int32.)
3094  __ b(mi, &return_heap_number);
3095  // Tag the result and return.
3096  __ SmiTag(r0, r2);
3097  __ Ret();
3098 
3099  __ bind(&return_heap_number);
3100  heap_number_result = r5;
3101  GenerateHeapResultAllocation(masm,
3102  heap_number_result,
3103  heap_number_map,
3104  scratch1,
3105  scratch2,
3106  &call_runtime);
3107 
3109  CpuFeatures::Scope scope(VFP2);
3110  if (op_ != Token::SHR) {
3111  // Convert the result to a floating point value.
3112  __ vmov(double_scratch.low(), r2);
3113  __ vcvt_f64_s32(double_scratch, double_scratch.low());
3114  } else {
3115  // The result must be interpreted as an unsigned 32-bit integer.
3116  __ vmov(double_scratch.low(), r2);
3117  __ vcvt_f64_u32(double_scratch, double_scratch.low());
3118  }
3119 
3120  // Store the result.
3121  __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3122  __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3123  __ mov(r0, heap_number_result);
3124  __ Ret();
3125  } else {
3126  // Tail call that writes the int32 in r2 to the heap number in r0, using
3127  // r3 as scratch. r0 is preserved and returned.
3128  __ mov(r0, r5);
3129  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3130  __ TailCallStub(&stub);
3131  }
3132 
3133  break;
3134  }
3135 
3136  default:
3137  UNREACHABLE();
3138  }
3139 
3140  // We never expect DIV to yield an integer result, so we always generate
3141  // type transition code for DIV operations expecting an integer result: the
3142  // code will fall through to this type transition.
3143  if (transition.is_linked() ||
3144  ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3145  __ bind(&transition);
3146  GenerateTypeTransition(masm);
3147  }
3148 
3149  __ bind(&call_runtime);
3150  GenerateCallRuntime(masm);
3151 }
3152 
3153 
3154 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3155  Label call_runtime;
3156 
3157  if (op_ == Token::ADD) {
3158  // Handle string addition here, because it is the only operation
3159  // that does not do a ToNumber conversion on the operands.
3160  GenerateAddStrings(masm);
3161  }
3162 
3163  // Convert oddball arguments to numbers.
3164  Label check, done;
3165  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3166  __ b(ne, &check);
3167  if (Token::IsBitOp(op_)) {
3168  __ mov(r1, Operand(Smi::FromInt(0)));
3169  } else {
3170  __ LoadRoot(r1, Heap::kNanValueRootIndex);
3171  }
3172  __ jmp(&done);
3173  __ bind(&check);
3174  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3175  __ b(ne, &done);
3176  if (Token::IsBitOp(op_)) {
3177  __ mov(r0, Operand(Smi::FromInt(0)));
3178  } else {
3179  __ LoadRoot(r0, Heap::kNanValueRootIndex);
3180  }
3181  __ bind(&done);
3182 
3183  GenerateHeapNumberStub(masm);
3184 }
3185 
3186 
3187 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3188  Label call_runtime;
3189  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3190 
3191  __ bind(&call_runtime);
3192  GenerateCallRuntime(masm);
3193 }
3194 
3195 
3196 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3197  Label call_runtime, call_string_add_or_runtime;
3198 
3199  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3200 
3201  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3202 
3203  __ bind(&call_string_add_or_runtime);
3204  if (op_ == Token::ADD) {
3205  GenerateAddStrings(masm);
3206  }
3207 
3208  __ bind(&call_runtime);
3209  GenerateCallRuntime(masm);
3210 }
3211 
3212 
3213 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3214  ASSERT(op_ == Token::ADD);
3215  Label left_not_string, call_runtime;
3216 
3217  Register left = r1;
3218  Register right = r0;
3219 
3220  // Check if left argument is a string.
3221  __ JumpIfSmi(left, &left_not_string);
3222  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
3223  __ b(ge, &left_not_string);
3224 
3225  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3226  GenerateRegisterArgsPush(masm);
3227  __ TailCallStub(&string_add_left_stub);
3228 
3229  // Left operand is not a string, test right.
3230  __ bind(&left_not_string);
3231  __ JumpIfSmi(right, &call_runtime);
3232  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
3233  __ b(ge, &call_runtime);
3234 
3235  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3236  GenerateRegisterArgsPush(masm);
3237  __ TailCallStub(&string_add_right_stub);
3238 
3239  // At least one argument is not a string.
3240  __ bind(&call_runtime);
3241 }
3242 
3243 
3244 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3245  GenerateRegisterArgsPush(masm);
3246  switch (op_) {
3247  case Token::ADD:
3248  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3249  break;
3250  case Token::SUB:
3251  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3252  break;
3253  case Token::MUL:
3254  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3255  break;
3256  case Token::DIV:
3257  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3258  break;
3259  case Token::MOD:
3260  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3261  break;
3262  case Token::BIT_OR:
3263  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3264  break;
3265  case Token::BIT_AND:
3266  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3267  break;
3268  case Token::BIT_XOR:
3269  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3270  break;
3271  case Token::SAR:
3272  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3273  break;
3274  case Token::SHR:
3275  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3276  break;
3277  case Token::SHL:
3278  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3279  break;
3280  default:
3281  UNREACHABLE();
3282  }
3283 }
3284 
3285 
3286 void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
3287  Register result,
3288  Register heap_number_map,
3289  Register scratch1,
3290  Register scratch2,
3291  Label* gc_required) {
3292  // Code below will scratch result if allocation fails. To keep both arguments
3293  // intact for the runtime call result cannot be one of these.
3294  ASSERT(!result.is(r0) && !result.is(r1));
3295 
3296  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3297  Label skip_allocation, allocated;
3298  Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
3299  // If the overwritable operand is already an object, we skip the
3300  // allocation of a heap number.
3301  __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3302  // Allocate a heap number for the result.
3303  __ AllocateHeapNumber(
3304  result, scratch1, scratch2, heap_number_map, gc_required);
3305  __ b(&allocated);
3306  __ bind(&skip_allocation);
3307  // Use object holding the overwritable operand for result.
3308  __ mov(result, Operand(overwritable_operand));
3309  __ bind(&allocated);
3310  } else {
3311  ASSERT(mode_ == NO_OVERWRITE);
3312  __ AllocateHeapNumber(
3313  result, scratch1, scratch2, heap_number_map, gc_required);
3314  }
3315 }
3316 
3317 
3318 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3319  __ Push(r1, r0);
3320 }
3321 
3322 
3323 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3324  // Untagged case: double input in d2, double result goes
3325  // into d2.
3326  // Tagged case: tagged input on top of stack and in r0,
3327  // tagged result (heap number) goes into r0.
3328 
3329  Label input_not_smi;
3330  Label loaded;
3331  Label calculate;
3332  Label invalid_cache;
3333  const Register scratch0 = r9;
3334  const Register scratch1 = r7;
3335  const Register cache_entry = r0;
3336  const bool tagged = (argument_type_ == TAGGED);
3337 
3339  CpuFeatures::Scope scope(VFP2);
3340  if (tagged) {
3341  // Argument is a number and is on stack and in r0.
3342  // Load argument and check if it is a smi.
3343  __ JumpIfNotSmi(r0, &input_not_smi);
3344 
3345  // Input is a smi. Convert to double and load the low and high words
3346  // of the double into r2, r3.
3347  __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3348  __ b(&loaded);
3349 
3350  __ bind(&input_not_smi);
3351  // Check if input is a HeapNumber.
3352  __ CheckMap(r0,
3353  r1,
3354  Heap::kHeapNumberMapRootIndex,
3355  &calculate,
3357  // Input is a HeapNumber. Load it to a double register and store the
3358  // low and high words into r2, r3.
3360  __ vmov(r2, r3, d0);
3361  } else {
3362  // Input is untagged double in d2. Output goes to d2.
3363  __ vmov(r2, r3, d2);
3364  }
3365  __ bind(&loaded);
3366  // r2 = low 32 bits of double value
3367  // r3 = high 32 bits of double value
3368  // Compute hash (the shifts are arithmetic):
3369  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3370  __ eor(r1, r2, Operand(r3));
3371  __ eor(r1, r1, Operand(r1, ASR, 16));
3372  __ eor(r1, r1, Operand(r1, ASR, 8));
3373  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3374  __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3375 
3376  // r2 = low 32 bits of double value.
3377  // r3 = high 32 bits of double value.
3378  // r1 = TranscendentalCache::hash(double value).
3379  Isolate* isolate = masm->isolate();
3380  ExternalReference cache_array =
3381  ExternalReference::transcendental_cache_array_address(isolate);
3382  __ mov(cache_entry, Operand(cache_array));
3383  // cache_entry points to cache array.
3384  int cache_array_index
3385  = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3386  __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3387  // r0 points to the cache for the type type_.
3388  // If NULL, the cache hasn't been initialized yet, so go through runtime.
3389  __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3390  __ b(eq, &invalid_cache);
3391 
3392 #ifdef DEBUG
3393  // Check that the layout of cache elements match expectations.
3394  { TranscendentalCache::SubCache::Element test_elem[2];
3395  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3396  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3397  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3398  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3399  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3400  CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3401  CHECK_EQ(0, elem_in0 - elem_start);
3402  CHECK_EQ(kIntSize, elem_in1 - elem_start);
3403  CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3404  }
3405 #endif
3406 
3407  // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3408  __ add(r1, r1, Operand(r1, LSL, 1));
3409  __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3410  // Check if cache matches: Double value is stored in uint32_t[2] array.
3411  __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3412  __ cmp(r2, r4);
3413  __ cmp(r3, r5, eq);
3414  __ b(ne, &calculate);
3415  // Cache hit. Load result, cleanup and return.
3416  Counters* counters = masm->isolate()->counters();
3417  __ IncrementCounter(
3418  counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3419  if (tagged) {
3420  // Pop input value from stack and load result into r0.
3421  __ pop();
3422  __ mov(r0, Operand(r6));
3423  } else {
3424  // Load result into d2.
3426  }
3427  __ Ret();
3428  } // if (CpuFeatures::IsSupported(VFP3))
3429 
3430  __ bind(&calculate);
3431  Counters* counters = masm->isolate()->counters();
3432  __ IncrementCounter(
3433  counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3434  if (tagged) {
3435  __ bind(&invalid_cache);
3436  ExternalReference runtime_function =
3437  ExternalReference(RuntimeFunction(), masm->isolate());
3438  __ TailCallExternalReference(runtime_function, 1, 1);
3439  } else {
3441  CpuFeatures::Scope scope(VFP2);
3442 
3443  Label no_update;
3444  Label skip_cache;
3445 
3446  // Call C function to calculate the result and update the cache.
3447  // r0: precalculated cache entry address.
3448  // r2 and r3: parts of the double value.
3449  // Store r0, r2 and r3 on stack for later before calling C function.
3450  __ Push(r3, r2, cache_entry);
3451  GenerateCallCFunction(masm, scratch0);
3452  __ GetCFunctionDoubleResult(d2);
3453 
3454  // Try to update the cache. If we cannot allocate a
3455  // heap number, we return the result without updating.
3456  __ Pop(r3, r2, cache_entry);
3457  __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3458  __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3460  __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3461  __ Ret();
3462 
3463  __ bind(&invalid_cache);
3464  // The cache is invalid. Call runtime which will recreate the
3465  // cache.
3466  __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3467  __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3469  {
3470  FrameScope scope(masm, StackFrame::INTERNAL);
3471  __ push(r0);
3472  __ CallRuntime(RuntimeFunction(), 1);
3473  }
3475  __ Ret();
3476 
3477  __ bind(&skip_cache);
3478  // Call C function to calculate the result and answer directly
3479  // without updating the cache.
3480  GenerateCallCFunction(masm, scratch0);
3481  __ GetCFunctionDoubleResult(d2);
3482  __ bind(&no_update);
3483 
3484  // We return the value in d2 without adding it to the cache, but
3485  // we cause a scavenging GC so that future allocations will succeed.
3486  {
3487  FrameScope scope(masm, StackFrame::INTERNAL);
3488 
3489  // Allocate an aligned object larger than a HeapNumber.
3490  ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3491  __ mov(scratch0, Operand(4 * kPointerSize));
3492  __ push(scratch0);
3493  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3494  }
3495  __ Ret();
3496  }
3497 }
3498 
3499 
3500 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3501  Register scratch) {
3502  ASSERT(CpuFeatures::IsEnabled(VFP2));
3503  Isolate* isolate = masm->isolate();
3504 
3505  __ push(lr);
3506  __ PrepareCallCFunction(0, 1, scratch);
3507  if (masm->use_eabi_hardfloat()) {
3508  __ vmov(d0, d2);
3509  } else {
3510  __ vmov(r0, r1, d2);
3511  }
3512  AllowExternalCallThatCantCauseGC scope(masm);
3513  switch (type_) {
3515  __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
3516  0, 1);
3517  break;
3519  __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
3520  0, 1);
3521  break;
3523  __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3524  0, 1);
3525  break;
3527  __ CallCFunction(ExternalReference::math_log_double_function(isolate),
3528  0, 1);
3529  break;
3530  default:
3531  UNIMPLEMENTED();
3532  break;
3533  }
3534  __ pop(lr);
3535 }
3536 
3537 
3538 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3539  switch (type_) {
3540  // Add more cases when necessary.
3541  case TranscendentalCache::SIN: return Runtime::kMath_sin;
3542  case TranscendentalCache::COS: return Runtime::kMath_cos;
3543  case TranscendentalCache::TAN: return Runtime::kMath_tan;
3544  case TranscendentalCache::LOG: return Runtime::kMath_log;
3545  default:
3546  UNIMPLEMENTED();
3547  return Runtime::kAbort;
3548  }
3549 }
3550 
3551 
3552 void StackCheckStub::Generate(MacroAssembler* masm) {
3553  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3554 }
3555 
3556 
3557 void InterruptStub::Generate(MacroAssembler* masm) {
3558  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3559 }
3560 
3561 
3562 void MathPowStub::Generate(MacroAssembler* masm) {
3563  CpuFeatures::Scope vfp2_scope(VFP2);
3564  const Register base = r1;
3565  const Register exponent = r2;
3566  const Register heapnumbermap = r5;
3567  const Register heapnumber = r0;
3568  const DoubleRegister double_base = d1;
3569  const DoubleRegister double_exponent = d2;
3570  const DoubleRegister double_result = d3;
3571  const DoubleRegister double_scratch = d0;
3572  const SwVfpRegister single_scratch = s0;
3573  const Register scratch = r9;
3574  const Register scratch2 = r7;
3575 
3576  Label call_runtime, done, int_exponent;
3577  if (exponent_type_ == ON_STACK) {
3578  Label base_is_smi, unpack_exponent;
3579  // The exponent and base are supplied as arguments on the stack.
3580  // This can only happen if the stub is called from non-optimized code.
3581  // Load input parameters from stack to double registers.
3582  __ ldr(base, MemOperand(sp, 1 * kPointerSize));
3583  __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
3584 
3585  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3586 
3587  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3588  __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3589  __ cmp(scratch, heapnumbermap);
3590  __ b(ne, &call_runtime);
3591 
3592  __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3593  __ jmp(&unpack_exponent);
3594 
3595  __ bind(&base_is_smi);
3596  __ vmov(single_scratch, scratch);
3597  __ vcvt_f64_s32(double_base, single_scratch);
3598  __ bind(&unpack_exponent);
3599 
3600  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3601 
3602  __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3603  __ cmp(scratch, heapnumbermap);
3604  __ b(ne, &call_runtime);
3605  __ vldr(double_exponent,
3607  } else if (exponent_type_ == TAGGED) {
3608  // Base is already in double_base.
3609  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3610 
3611  __ vldr(double_exponent,
3613  }
3614 
3615  if (exponent_type_ != INTEGER) {
3616  Label int_exponent_convert;
3617  // Detect integer exponents stored as double.
3618  __ vcvt_u32_f64(single_scratch, double_exponent);
3619  // We do not check for NaN or Infinity here because comparing numbers on
3620  // ARM correctly distinguishes NaNs. We end up calling the built-in.
3621  __ vcvt_f64_u32(double_scratch, single_scratch);
3622  __ VFPCompareAndSetFlags(double_scratch, double_exponent);
3623  __ b(eq, &int_exponent_convert);
3624 
3625  if (exponent_type_ == ON_STACK) {
3626  // Detect square root case. Crankshaft detects constant +/-0.5 at
3627  // compile time and uses DoMathPowHalf instead. We then skip this check
3628  // for non-constant cases of +/-0.5 as these hardly occur.
3629  Label not_plus_half;
3630 
3631  // Test for 0.5.
3632  __ vmov(double_scratch, 0.5, scratch);
3633  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3634  __ b(ne, &not_plus_half);
3635 
3636  // Calculates square root of base. Check for the special case of
3637  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3638  __ vmov(double_scratch, -V8_INFINITY, scratch);
3639  __ VFPCompareAndSetFlags(double_base, double_scratch);
3640  __ vneg(double_result, double_scratch, eq);
3641  __ b(eq, &done);
3642 
3643  // Add +0 to convert -0 to +0.
3644  __ vadd(double_scratch, double_base, kDoubleRegZero);
3645  __ vsqrt(double_result, double_scratch);
3646  __ jmp(&done);
3647 
3648  __ bind(&not_plus_half);
3649  __ vmov(double_scratch, -0.5, scratch);
3650  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3651  __ b(ne, &call_runtime);
3652 
3653  // Calculates square root of base. Check for the special case of
3654  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3655  __ vmov(double_scratch, -V8_INFINITY, scratch);
3656  __ VFPCompareAndSetFlags(double_base, double_scratch);
3657  __ vmov(double_result, kDoubleRegZero, eq);
3658  __ b(eq, &done);
3659 
3660  // Add +0 to convert -0 to +0.
3661  __ vadd(double_scratch, double_base, kDoubleRegZero);
3662  __ vmov(double_result, 1.0, scratch);
3663  __ vsqrt(double_scratch, double_scratch);
3664  __ vdiv(double_result, double_result, double_scratch);
3665  __ jmp(&done);
3666  }
3667 
3668  __ push(lr);
3669  {
3670  AllowExternalCallThatCantCauseGC scope(masm);
3671  __ PrepareCallCFunction(0, 2, scratch);
3672  __ SetCallCDoubleArguments(double_base, double_exponent);
3673  __ CallCFunction(
3674  ExternalReference::power_double_double_function(masm->isolate()),
3675  0, 2);
3676  }
3677  __ pop(lr);
3678  __ GetCFunctionDoubleResult(double_result);
3679  __ jmp(&done);
3680 
3681  __ bind(&int_exponent_convert);
3682  __ vcvt_u32_f64(single_scratch, double_exponent);
3683  __ vmov(scratch, single_scratch);
3684  }
3685 
3686  // Calculate power with integer exponent.
3687  __ bind(&int_exponent);
3688 
3689  // Get two copies of exponent in the registers scratch and exponent.
3690  if (exponent_type_ == INTEGER) {
3691  __ mov(scratch, exponent);
3692  } else {
3693  // Exponent has previously been stored into scratch as untagged integer.
3694  __ mov(exponent, scratch);
3695  }
3696  __ vmov(double_scratch, double_base); // Back up base.
3697  __ vmov(double_result, 1.0, scratch2);
3698 
3699  // Get absolute value of exponent.
3700  __ cmp(scratch, Operand(0));
3701  __ mov(scratch2, Operand(0), LeaveCC, mi);
3702  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3703 
3704  Label while_true;
3705  __ bind(&while_true);
3706  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3707  __ vmul(double_result, double_result, double_scratch, cs);
3708  __ vmul(double_scratch, double_scratch, double_scratch, ne);
3709  __ b(ne, &while_true);
3710 
3711  __ cmp(exponent, Operand(0));
3712  __ b(ge, &done);
3713  __ vmov(double_scratch, 1.0, scratch);
3714  __ vdiv(double_result, double_scratch, double_result);
3715  // Test whether result is zero. Bail out to check for subnormal result.
3716  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3717  __ VFPCompareAndSetFlags(double_result, 0.0);
3718  __ b(ne, &done);
3719  // double_exponent may not containe the exponent value if the input was a
3720  // smi. We set it with exponent value before bailing out.
3721  __ vmov(single_scratch, exponent);
3722  __ vcvt_f64_s32(double_exponent, single_scratch);
3723 
3724  // Returning or bailing out.
3725  Counters* counters = masm->isolate()->counters();
3726  if (exponent_type_ == ON_STACK) {
3727  // The arguments are still on the stack.
3728  __ bind(&call_runtime);
3729  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3730 
3731  // The stub is called from non-optimized code, which expects the result
3732  // as heap number in exponent.
3733  __ bind(&done);
3734  __ AllocateHeapNumber(
3735  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3736  __ vstr(double_result,
3738  ASSERT(heapnumber.is(r0));
3739  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3740  __ Ret(2);
3741  } else {
3742  __ push(lr);
3743  {
3744  AllowExternalCallThatCantCauseGC scope(masm);
3745  __ PrepareCallCFunction(0, 2, scratch);
3746  __ SetCallCDoubleArguments(double_base, double_exponent);
3747  __ CallCFunction(
3748  ExternalReference::power_double_double_function(masm->isolate()),
3749  0, 2);
3750  }
3751  __ pop(lr);
3752  __ GetCFunctionDoubleResult(double_result);
3753 
3754  __ bind(&done);
3755  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3756  __ Ret();
3757  }
3758 }
3759 
3760 
3761 bool CEntryStub::NeedsImmovableCode() {
3762  return true;
3763 }
3764 
3765 
3767  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3768  result_size_ == 1;
3769 }
3770 
3771 
3772 void CodeStub::GenerateStubsAheadOfTime() {
3777 }
3778 
3779 
3780 void CodeStub::GenerateFPStubs() {
3781  CEntryStub save_doubles(1, kSaveFPRegs);
3782  Handle<Code> code = save_doubles.GetCode();
3783  code->set_is_pregenerated(true);
3784  StoreBufferOverflowStub stub(kSaveFPRegs);
3785  stub.GetCode()->set_is_pregenerated(true);
3786  code->GetIsolate()->set_fp_stubs_generated(true);
3787 }
3788 
3789 
3791  CEntryStub stub(1, kDontSaveFPRegs);
3792  Handle<Code> code = stub.GetCode();
3793  code->set_is_pregenerated(true);
3794 }
3795 
3796 
3797 void CEntryStub::GenerateCore(MacroAssembler* masm,
3798  Label* throw_normal_exception,
3799  Label* throw_termination_exception,
3800  Label* throw_out_of_memory_exception,
3801  bool do_gc,
3802  bool always_allocate) {
3803  // r0: result parameter for PerformGC, if any
3804  // r4: number of arguments including receiver (C callee-saved)
3805  // r5: pointer to builtin function (C callee-saved)
3806  // r6: pointer to the first argument (C callee-saved)
3807  Isolate* isolate = masm->isolate();
3808 
3809  if (do_gc) {
3810  // Passing r0.
3811  __ PrepareCallCFunction(1, 0, r1);
3812  __ CallCFunction(ExternalReference::perform_gc_function(isolate),
3813  1, 0);
3814  }
3815 
3816  ExternalReference scope_depth =
3817  ExternalReference::heap_always_allocate_scope_depth(isolate);
3818  if (always_allocate) {
3819  __ mov(r0, Operand(scope_depth));
3820  __ ldr(r1, MemOperand(r0));
3821  __ add(r1, r1, Operand(1));
3822  __ str(r1, MemOperand(r0));
3823  }
3824 
3825  // Call C built-in.
3826  // r0 = argc, r1 = argv
3827  __ mov(r0, Operand(r4));
3828  __ mov(r1, Operand(r6));
3829 
3830 #if defined(V8_HOST_ARCH_ARM)
3831  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
3832  int frame_alignment_mask = frame_alignment - 1;
3833  if (FLAG_debug_code) {
3834  if (frame_alignment > kPointerSize) {
3835  Label alignment_as_expected;
3836  ASSERT(IsPowerOf2(frame_alignment));
3837  __ tst(sp, Operand(frame_alignment_mask));
3838  __ b(eq, &alignment_as_expected);
3839  // Don't use Check here, as it will call Runtime_Abort re-entering here.
3840  __ stop("Unexpected alignment");
3841  __ bind(&alignment_as_expected);
3842  }
3843  }
3844 #endif
3845 
3846  __ mov(r2, Operand(ExternalReference::isolate_address()));
3847 
3848  // To let the GC traverse the return address of the exit frames, we need to
3849  // know where the return address is. The CEntryStub is unmovable, so
3850  // we can store the address on the stack to be able to find it again and
3851  // we never have to restore it, because it will not change.
3852  // Compute the return address in lr to return to after the jump below. Pc is
3853  // already at '+ 8' from the current instruction but return is after three
3854  // instructions so add another 4 to pc to get the return address.
3855  {
3856  // Prevent literal pool emission before return address.
3857  Assembler::BlockConstPoolScope block_const_pool(masm);
3858  masm->add(lr, pc, Operand(4));
3859  __ str(lr, MemOperand(sp, 0));
3860  masm->Jump(r5);
3861  }
3862 
3863  if (always_allocate) {
3864  // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
3865  // though (contain the result).
3866  __ mov(r2, Operand(scope_depth));
3867  __ ldr(r3, MemOperand(r2));
3868  __ sub(r3, r3, Operand(1));
3869  __ str(r3, MemOperand(r2));
3870  }
3871 
3872  // check for failure result
3873  Label failure_returned;
3874  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3875  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
3876  __ add(r2, r0, Operand(1));
3877  __ tst(r2, Operand(kFailureTagMask));
3878  __ b(eq, &failure_returned);
3879 
3880  // Exit C frame and return.
3881  // r0:r1: result
3882  // sp: stack pointer
3883  // fp: frame pointer
3884  // Callee-saved register r4 still holds argc.
3885  __ LeaveExitFrame(save_doubles_, r4);
3886  __ mov(pc, lr);
3887 
3888  // check if we should retry or throw exception
3889  Label retry;
3890  __ bind(&failure_returned);
3892  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3893  __ b(eq, &retry);
3894 
3895  // Special handling of out of memory exceptions.
3896  Failure* out_of_memory = Failure::OutOfMemoryException();
3897  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3898  __ b(eq, throw_out_of_memory_exception);
3899 
3900  // Retrieve the pending exception and clear the variable.
3901  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
3902  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3903  isolate)));
3904  __ ldr(r0, MemOperand(ip));
3905  __ str(r3, MemOperand(ip));
3906 
3907  // Special handling of termination exceptions which are uncatchable
3908  // by javascript code.
3909  __ cmp(r0, Operand(isolate->factory()->termination_exception()));
3910  __ b(eq, throw_termination_exception);
3911 
3912  // Handle normal exception.
3913  __ jmp(throw_normal_exception);
3914 
3915  __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
3916 }
3917 
3918 
3919 void CEntryStub::Generate(MacroAssembler* masm) {
3920  // Called from JavaScript; parameters are on stack as if calling JS function
3921  // r0: number of arguments including receiver
3922  // r1: pointer to builtin function
3923  // fp: frame pointer (restored after C call)
3924  // sp: stack pointer (restored as callee's sp after C call)
3925  // cp: current context (C callee-saved)
3926 
3927  // Result returned in r0 or r0+r1 by default.
3928 
3929  // NOTE: Invocations of builtins may return failure objects
3930  // instead of a proper result. The builtin entry handles
3931  // this by performing a garbage collection and retrying the
3932  // builtin once.
3933 
3934  // Compute the argv pointer in a callee-saved register.
3935  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
3936  __ sub(r6, r6, Operand(kPointerSize));
3937 
3938  // Enter the exit frame that transitions from JavaScript to C++.
3939  FrameScope scope(masm, StackFrame::MANUAL);
3940  __ EnterExitFrame(save_doubles_);
3941 
3942  // Set up argc and the builtin function in callee-saved registers.
3943  __ mov(r4, Operand(r0));
3944  __ mov(r5, Operand(r1));
3945 
3946  // r4: number of arguments (C callee-saved)
3947  // r5: pointer to builtin function (C callee-saved)
3948  // r6: pointer to first argument (C callee-saved)
3949 
3950  Label throw_normal_exception;
3951  Label throw_termination_exception;
3952  Label throw_out_of_memory_exception;
3953 
3954  // Call into the runtime system.
3955  GenerateCore(masm,
3956  &throw_normal_exception,
3957  &throw_termination_exception,
3958  &throw_out_of_memory_exception,
3959  false,
3960  false);
3961 
3962  // Do space-specific GC and retry runtime call.
3963  GenerateCore(masm,
3964  &throw_normal_exception,
3965  &throw_termination_exception,
3966  &throw_out_of_memory_exception,
3967  true,
3968  false);
3969 
3970  // Do full GC and retry runtime call one final time.
3971  Failure* failure = Failure::InternalError();
3972  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
3973  GenerateCore(masm,
3974  &throw_normal_exception,
3975  &throw_termination_exception,
3976  &throw_out_of_memory_exception,
3977  true,
3978  true);
3979 
3980  __ bind(&throw_out_of_memory_exception);
3981  // Set external caught exception to false.
3982  Isolate* isolate = masm->isolate();
3983  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3984  isolate);
3985  __ mov(r0, Operand(false, RelocInfo::NONE));
3986  __ mov(r2, Operand(external_caught));
3987  __ str(r0, MemOperand(r2));
3988 
3989  // Set pending exception and r0 to out of memory exception.
3990  Failure* out_of_memory = Failure::OutOfMemoryException();
3991  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3992  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3993  isolate)));
3994  __ str(r0, MemOperand(r2));
3995  // Fall through to the next label.
3996 
3997  __ bind(&throw_termination_exception);
3998  __ ThrowUncatchable(r0);
3999 
4000  __ bind(&throw_normal_exception);
4001  __ Throw(r0);
4002 }
4003 
4004 
4005 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4006  // r0: code entry
4007  // r1: function
4008  // r2: receiver
4009  // r3: argc
4010  // [sp+0]: argv
4011 
4012  Label invoke, handler_entry, exit;
4013 
4014  // Called from C, so do not pop argc and args on exit (preserve sp)
4015  // No need to save register-passed args
4016  // Save callee-saved registers (incl. cp and fp), sp, and lr
4017  __ stm(db_w, sp, kCalleeSaved | lr.bit());
4018 
4020  CpuFeatures::Scope scope(VFP2);
4021  // Save callee-saved vfp registers.
4023  // Set up the reserved register for 0.0.
4024  __ vmov(kDoubleRegZero, 0.0);
4025  }
4026 
4027  // Get address of argv, see stm above.
4028  // r0: code entry
4029  // r1: function
4030  // r2: receiver
4031  // r3: argc
4032 
4033  // Set up argv in r4.
4034  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4036  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
4037  }
4038  __ ldr(r4, MemOperand(sp, offset_to_argv));
4039 
4040  // Push a frame with special values setup to mark it as an entry frame.
4041  // r0: code entry
4042  // r1: function
4043  // r2: receiver
4044  // r3: argc
4045  // r4: argv
4046  Isolate* isolate = masm->isolate();
4047  __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
4048  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4049  __ mov(r7, Operand(Smi::FromInt(marker)));
4050  __ mov(r6, Operand(Smi::FromInt(marker)));
4051  __ mov(r5,
4052  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
4053  __ ldr(r5, MemOperand(r5));
4054  __ Push(r8, r7, r6, r5);
4055 
4056  // Set up frame pointer for the frame to be pushed.
4057  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4058 
4059  // If this is the outermost JS call, set js_entry_sp value.
4060  Label non_outermost_js;
4061  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4062  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
4063  __ ldr(r6, MemOperand(r5));
4064  __ cmp(r6, Operand::Zero());
4065  __ b(ne, &non_outermost_js);
4066  __ str(fp, MemOperand(r5));
4067  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4068  Label cont;
4069  __ b(&cont);
4070  __ bind(&non_outermost_js);
4071  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4072  __ bind(&cont);
4073  __ push(ip);
4074 
4075  // Jump to a faked try block that does the invoke, with a faked catch
4076  // block that sets the pending exception.
4077  __ jmp(&invoke);
4078 
4079  // Block literal pool emission whilst taking the position of the handler
4080  // entry. This avoids making the assumption that literal pools are always
4081  // emitted after an instruction is emitted, rather than before.
4082  {
4083  Assembler::BlockConstPoolScope block_const_pool(masm);
4084  __ bind(&handler_entry);
4085  handler_offset_ = handler_entry.pos();
4086  // Caught exception: Store result (exception) in the pending exception
4087  // field in the JSEnv and return a failure sentinel. Coming in here the
4088  // fp will be invalid because the PushTryHandler below sets it to 0 to
4089  // signal the existence of the JSEntry frame.
4090  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4091  isolate)));
4092  }
4093  __ str(r0, MemOperand(ip));
4094  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4095  __ b(&exit);
4096 
4097  // Invoke: Link this frame into the handler chain. There's only one
4098  // handler block in this code object, so its index is 0.
4099  __ bind(&invoke);
4100  // Must preserve r0-r4, r5-r7 are available.
4101  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4102  // If an exception not caught by another handler occurs, this handler
4103  // returns control to the code after the bl(&invoke) above, which
4104  // restores all kCalleeSaved registers (including cp and fp) to their
4105  // saved values before returning a failure to C.
4106 
4107  // Clear any pending exceptions.
4108  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
4109  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4110  isolate)));
4111  __ str(r5, MemOperand(ip));
4112 
4113  // Invoke the function by calling through JS entry trampoline builtin.
4114  // Notice that we cannot store a reference to the trampoline code directly in
4115  // this stub, because runtime stubs are not traversed when doing GC.
4116 
4117  // Expected registers by Builtins::JSEntryTrampoline
4118  // r0: code entry
4119  // r1: function
4120  // r2: receiver
4121  // r3: argc
4122  // r4: argv
4123  if (is_construct) {
4124  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4125  isolate);
4126  __ mov(ip, Operand(construct_entry));
4127  } else {
4128  ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4129  __ mov(ip, Operand(entry));
4130  }
4131  __ ldr(ip, MemOperand(ip)); // deref address
4132 
4133  // Branch and link to JSEntryTrampoline. We don't use the double underscore
4134  // macro for the add instruction because we don't want the coverage tool
4135  // inserting instructions here after we read the pc. We block literal pool
4136  // emission for the same reason.
4137  {
4138  Assembler::BlockConstPoolScope block_const_pool(masm);
4139  __ mov(lr, Operand(pc));
4140  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4141  }
4142 
4143  // Unlink this frame from the handler chain.
4144  __ PopTryHandler();
4145 
4146  __ bind(&exit); // r0 holds result
4147  // Check if the current stack frame is marked as the outermost JS frame.
4148  Label non_outermost_js_2;
4149  __ pop(r5);
4150  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4151  __ b(ne, &non_outermost_js_2);
4152  __ mov(r6, Operand::Zero());
4153  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
4154  __ str(r6, MemOperand(r5));
4155  __ bind(&non_outermost_js_2);
4156 
4157  // Restore the top frame descriptors from the stack.
4158  __ pop(r3);
4159  __ mov(ip,
4160  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
4161  __ str(r3, MemOperand(ip));
4162 
4163  // Reset the stack to the callee saved registers.
4164  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4165 
4166  // Restore callee-saved registers and return.
4167 #ifdef DEBUG
4168  if (FLAG_debug_code) {
4169  __ mov(lr, Operand(pc));
4170  }
4171 #endif
4172 
4174  CpuFeatures::Scope scope(VFP2);
4175  // Restore callee-saved vfp registers.
4177  }
4178 
4179  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
4180 }
4181 
4182 
4183 // Uses registers r0 to r4.
4184 // Expected input (depending on whether args are in registers or on the stack):
4185 // * object: r0 or at sp + 1 * kPointerSize.
4186 // * function: r1 or at sp.
4187 //
4188 // An inlined call site may have been generated before calling this stub.
4189 // In this case the offset to the inline site to patch is passed on the stack,
4190 // in the safepoint slot for register r4.
4191 // (See LCodeGen::DoInstanceOfKnownGlobal)
4192 void InstanceofStub::Generate(MacroAssembler* masm) {
4193  // Call site inlining and patching implies arguments in registers.
4194  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4195  // ReturnTrueFalse is only implemented for inlined call sites.
4196  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4197 
4198  // Fixed register usage throughout the stub:
4199  const Register object = r0; // Object (lhs).
4200  Register map = r3; // Map of the object.
4201  const Register function = r1; // Function (rhs).
4202  const Register prototype = r4; // Prototype of the function.
4203  const Register inline_site = r9;
4204  const Register scratch = r2;
4205 
4206  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
4207 
4208  Label slow, loop, is_instance, is_not_instance, not_js_object;
4209 
4210  if (!HasArgsInRegisters()) {
4211  __ ldr(object, MemOperand(sp, 1 * kPointerSize));
4212  __ ldr(function, MemOperand(sp, 0));
4213  }
4214 
4215  // Check that the left hand is a JS object and load map.
4216  __ JumpIfSmi(object, &not_js_object);
4217  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4218 
4219  // If there is a call site cache don't look in the global cache, but do the
4220  // real lookup and update the call site cache.
4221  if (!HasCallSiteInlineCheck()) {
4222  Label miss;
4223  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4224  __ b(ne, &miss);
4225  __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
4226  __ b(ne, &miss);
4227  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4228  __ Ret(HasArgsInRegisters() ? 0 : 2);
4229 
4230  __ bind(&miss);
4231  }
4232 
4233  // Get the prototype of the function.
4234  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4235 
4236  // Check that the function prototype is a JS object.
4237  __ JumpIfSmi(prototype, &slow);
4238  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4239 
4240  // Update the global instanceof or call site inlined cache with the current
4241  // map and function. The cached answer will be set when it is known below.
4242  if (!HasCallSiteInlineCheck()) {
4243  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4244  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4245  } else {
4246  ASSERT(HasArgsInRegisters());
4247  // Patch the (relocated) inlined map check.
4248 
4249  // The offset was stored in r4 safepoint slot.
4250  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
4251  __ LoadFromSafepointRegisterSlot(scratch, r4);
4252  __ sub(inline_site, lr, scratch);
4253  // Get the map location in scratch and patch it.
4254  __ GetRelocatedValueLocation(inline_site, scratch);
4255  __ ldr(scratch, MemOperand(scratch));
4257  }
4258 
4259  // Register mapping: r3 is object map and r4 is function prototype.
4260  // Get prototype of object into r2.
4261  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4262 
4263  // We don't need map any more. Use it as a scratch register.
4264  Register scratch2 = map;
4265  map = no_reg;
4266 
4267  // Loop through the prototype chain looking for the function prototype.
4268  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4269  __ bind(&loop);
4270  __ cmp(scratch, Operand(prototype));
4271  __ b(eq, &is_instance);
4272  __ cmp(scratch, scratch2);
4273  __ b(eq, &is_not_instance);
4274  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4275  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4276  __ jmp(&loop);
4277 
4278  __ bind(&is_instance);
4279  if (!HasCallSiteInlineCheck()) {
4280  __ mov(r0, Operand(Smi::FromInt(0)));
4281  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4282  } else {
4283  // Patch the call site to return true.
4284  __ LoadRoot(r0, Heap::kTrueValueRootIndex);
4285  __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4286  // Get the boolean result location in scratch and patch it.
4287  __ GetRelocatedValueLocation(inline_site, scratch);
4288  __ str(r0, MemOperand(scratch));
4289 
4290  if (!ReturnTrueFalseObject()) {
4291  __ mov(r0, Operand(Smi::FromInt(0)));
4292  }
4293  }
4294  __ Ret(HasArgsInRegisters() ? 0 : 2);
4295 
4296  __ bind(&is_not_instance);
4297  if (!HasCallSiteInlineCheck()) {
4298  __ mov(r0, Operand(Smi::FromInt(1)));
4299  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4300  } else {
4301  // Patch the call site to return false.
4302  __ LoadRoot(r0, Heap::kFalseValueRootIndex);
4303  __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4304  // Get the boolean result location in scratch and patch it.
4305  __ GetRelocatedValueLocation(inline_site, scratch);
4306  __ str(r0, MemOperand(scratch));
4307 
4308  if (!ReturnTrueFalseObject()) {
4309  __ mov(r0, Operand(Smi::FromInt(1)));
4310  }
4311  }
4312  __ Ret(HasArgsInRegisters() ? 0 : 2);
4313 
4314  Label object_not_null, object_not_null_or_smi;
4315  __ bind(&not_js_object);
4316  // Before null, smi and string value checks, check that the rhs is a function
4317  // as for a non-function rhs an exception needs to be thrown.
4318  __ JumpIfSmi(function, &slow);
4319  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
4320  __ b(ne, &slow);
4321 
4322  // Null is not instance of anything.
4323  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
4324  __ b(ne, &object_not_null);
4325  __ mov(r0, Operand(Smi::FromInt(1)));
4326  __ Ret(HasArgsInRegisters() ? 0 : 2);
4327 
4328  __ bind(&object_not_null);
4329  // Smi values are not instances of anything.
4330  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4331  __ mov(r0, Operand(Smi::FromInt(1)));
4332  __ Ret(HasArgsInRegisters() ? 0 : 2);
4333 
4334  __ bind(&object_not_null_or_smi);
4335  // String values are not instances of anything.
4336  __ IsObjectJSStringType(object, scratch, &slow);
4337  __ mov(r0, Operand(Smi::FromInt(1)));
4338  __ Ret(HasArgsInRegisters() ? 0 : 2);
4339 
4340  // Slow-case. Tail call builtin.
4341  __ bind(&slow);
4342  if (!ReturnTrueFalseObject()) {
4343  if (HasArgsInRegisters()) {
4344  __ Push(r0, r1);
4345  }
4346  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4347  } else {
4348  {
4349  FrameScope scope(masm, StackFrame::INTERNAL);
4350  __ Push(r0, r1);
4351  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4352  }
4353  __ cmp(r0, Operand::Zero());
4354  __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
4355  __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
4356  __ Ret(HasArgsInRegisters() ? 0 : 2);
4357  }
4358 }
4359 
4360 
4361 Register InstanceofStub::left() { return r0; }
4362 
4363 
4364 Register InstanceofStub::right() { return r1; }
4365 
4366 
4367 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4368  // The displacement is the offset of the last parameter (if any)
4369  // relative to the frame pointer.
4370  const int kDisplacement =
4372 
4373  // Check that the key is a smi.
4374  Label slow;
4375  __ JumpIfNotSmi(r1, &slow);
4376 
4377  // Check if the calling frame is an arguments adaptor frame.
4378  Label adaptor;
4382  __ b(eq, &adaptor);
4383 
4384  // Check index against formal parameters count limit passed in
4385  // through register r0. Use unsigned comparison to get negative
4386  // check for free.
4387  __ cmp(r1, r0);
4388  __ b(hs, &slow);
4389 
4390  // Read the argument from the stack and return it.
4391  __ sub(r3, r0, r1);
4392  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4393  __ ldr(r0, MemOperand(r3, kDisplacement));
4394  __ Jump(lr);
4395 
4396  // Arguments adaptor case: Check index against actual arguments
4397  // limit found in the arguments adaptor frame. Use unsigned
4398  // comparison to get negative check for free.
4399  __ bind(&adaptor);
4401  __ cmp(r1, r0);
4402  __ b(cs, &slow);
4403 
4404  // Read the argument from the adaptor frame and return it.
4405  __ sub(r3, r0, r1);
4406  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4407  __ ldr(r0, MemOperand(r3, kDisplacement));
4408  __ Jump(lr);
4409 
4410  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4411  // by calling the runtime system.
4412  __ bind(&slow);
4413  __ push(r1);
4414  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4415 }
4416 
4417 
4418 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4419  // sp[0] : number of parameters
4420  // sp[4] : receiver displacement
4421  // sp[8] : function
4422 
4423  // Check if the calling frame is an arguments adaptor frame.
4424  Label runtime;
4428  __ b(ne, &runtime);
4429 
4430  // Patch the arguments.length and the parameters pointer in the current frame.
4432  __ str(r2, MemOperand(sp, 0 * kPointerSize));
4433  __ add(r3, r3, Operand(r2, LSL, 1));
4435  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4436 
4437  __ bind(&runtime);
4438  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4439 }
4440 
4441 
4442 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4443  // Stack layout:
4444  // sp[0] : number of parameters (tagged)
4445  // sp[4] : address of receiver argument
4446  // sp[8] : function
4447  // Registers used over whole function:
4448  // r6 : allocated object (tagged)
4449  // r9 : mapped parameter count (tagged)
4450 
4451  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4452  // r1 = parameter count (tagged)
4453 
4454  // Check if the calling frame is an arguments adaptor frame.
4455  Label runtime;
4456  Label adaptor_frame, try_allocate;
4460  __ b(eq, &adaptor_frame);
4461 
4462  // No adaptor, parameter count = argument count.
4463  __ mov(r2, r1);
4464  __ b(&try_allocate);
4465 
4466  // We have an adaptor frame. Patch the parameters pointer.
4467  __ bind(&adaptor_frame);
4469  __ add(r3, r3, Operand(r2, LSL, 1));
4471  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4472 
4473  // r1 = parameter count (tagged)
4474  // r2 = argument count (tagged)
4475  // Compute the mapped parameter count = min(r1, r2) in r1.
4476  __ cmp(r1, Operand(r2));
4477  __ mov(r1, Operand(r2), LeaveCC, gt);
4478 
4479  __ bind(&try_allocate);
4480 
4481  // Compute the sizes of backing store, parameter map, and arguments object.
4482  // 1. Parameter map, has 2 extra words containing context and backing store.
4483  const int kParameterMapHeaderSize =
4485  // If there are no mapped parameters, we do not need the parameter_map.
4486  __ cmp(r1, Operand(Smi::FromInt(0)));
4487  __ mov(r9, Operand::Zero(), LeaveCC, eq);
4488  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
4489  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
4490 
4491  // 2. Backing store.
4492  __ add(r9, r9, Operand(r2, LSL, 1));
4493  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
4494 
4495  // 3. Arguments object.
4496  __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
4497 
4498  // Do the allocation of all three objects in one go.
4499  __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
4500 
4501  // r0 = address of new object(s) (tagged)
4502  // r2 = argument count (tagged)
4503  // Get the arguments boilerplate from the current native context into r4.
4504  const int kNormalOffset =
4506  const int kAliasedOffset =
4508 
4511  __ cmp(r1, Operand::Zero());
4512  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4513  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
4514 
4515  // r0 = address of new object (tagged)
4516  // r1 = mapped parameter count (tagged)
4517  // r2 = argument count (tagged)
4518  // r4 = address of boilerplate object (tagged)
4519  // Copy the JS object part.
4520  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4521  __ ldr(r3, FieldMemOperand(r4, i));
4522  __ str(r3, FieldMemOperand(r0, i));
4523  }
4524 
4525  // Set up the callee in-object property.
4527  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4528  const int kCalleeOffset = JSObject::kHeaderSize +
4530  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
4531 
4532  // Use the length (smi tagged) and set that as an in-object property too.
4534  const int kLengthOffset = JSObject::kHeaderSize +
4536  __ str(r2, FieldMemOperand(r0, kLengthOffset));
4537 
4538  // Set up the elements pointer in the allocated arguments object.
4539  // If we allocated a parameter map, r4 will point there, otherwise
4540  // it will point to the backing store.
4541  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
4543 
4544  // r0 = address of new object (tagged)
4545  // r1 = mapped parameter count (tagged)
4546  // r2 = argument count (tagged)
4547  // r4 = address of parameter map or backing store (tagged)
4548  // Initialize parameter map. If there are no mapped arguments, we're done.
4549  Label skip_parameter_map;
4550  __ cmp(r1, Operand(Smi::FromInt(0)));
4551  // Move backing store address to r3, because it is
4552  // expected there when filling in the unmapped arguments.
4553  __ mov(r3, r4, LeaveCC, eq);
4554  __ b(eq, &skip_parameter_map);
4555 
4556  __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
4558  __ add(r6, r1, Operand(Smi::FromInt(2)));
4560  __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
4561  __ add(r6, r4, Operand(r1, LSL, 1));
4562  __ add(r6, r6, Operand(kParameterMapHeaderSize));
4563  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
4564 
4565  // Copy the parameter slots and the holes in the arguments.
4566  // We need to fill in mapped_parameter_count slots. They index the context,
4567  // where parameters are stored in reverse order, at
4568  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4569  // The mapped parameter thus need to get indices
4570  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4571  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4572  // We loop from right to left.
4573  Label parameters_loop, parameters_test;
4574  __ mov(r6, r1);
4575  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
4576  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4577  __ sub(r9, r9, Operand(r1));
4578  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
4579  __ add(r3, r4, Operand(r6, LSL, 1));
4580  __ add(r3, r3, Operand(kParameterMapHeaderSize));
4581 
4582  // r6 = loop variable (tagged)
4583  // r1 = mapping index (tagged)
4584  // r3 = address of backing store (tagged)
4585  // r4 = address of parameter map (tagged)
4586  // r5 = temporary scratch (a.o., for address calculation)
4587  // r7 = the hole value
4588  __ jmp(&parameters_test);
4589 
4590  __ bind(&parameters_loop);
4591  __ sub(r6, r6, Operand(Smi::FromInt(1)));
4592  __ mov(r5, Operand(r6, LSL, 1));
4593  __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4594  __ str(r9, MemOperand(r4, r5));
4595  __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4596  __ str(r7, MemOperand(r3, r5));
4597  __ add(r9, r9, Operand(Smi::FromInt(1)));
4598  __ bind(&parameters_test);
4599  __ cmp(r6, Operand(Smi::FromInt(0)));
4600  __ b(ne, &parameters_loop);
4601 
4602  __ bind(&skip_parameter_map);
4603  // r2 = argument count (tagged)
4604  // r3 = address of backing store (tagged)
4605  // r5 = scratch
4606  // Copy arguments header and remaining slots (if there are any).
4607  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
4610 
4611  Label arguments_loop, arguments_test;
4612  __ mov(r9, r1);
4613  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
4614  __ sub(r4, r4, Operand(r9, LSL, 1));
4615  __ jmp(&arguments_test);
4616 
4617  __ bind(&arguments_loop);
4618  __ sub(r4, r4, Operand(kPointerSize));
4619  __ ldr(r6, MemOperand(r4, 0));
4620  __ add(r5, r3, Operand(r9, LSL, 1));
4622  __ add(r9, r9, Operand(Smi::FromInt(1)));
4623 
4624  __ bind(&arguments_test);
4625  __ cmp(r9, Operand(r2));
4626  __ b(lt, &arguments_loop);
4627 
4628  // Return and remove the on-stack parameters.
4629  __ add(sp, sp, Operand(3 * kPointerSize));
4630  __ Ret();
4631 
4632  // Do the runtime call to allocate the arguments object.
4633  // r2 = argument count (tagged)
4634  __ bind(&runtime);
4635  __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4636  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4637 }
4638 
4639 
4640 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4641  // sp[0] : number of parameters
4642  // sp[4] : receiver displacement
4643  // sp[8] : function
4644  // Check if the calling frame is an arguments adaptor frame.
4645  Label adaptor_frame, try_allocate, runtime;
4649  __ b(eq, &adaptor_frame);
4650 
4651  // Get the length from the frame.
4652  __ ldr(r1, MemOperand(sp, 0));
4653  __ b(&try_allocate);
4654 
4655  // Patch the arguments.length and the parameters pointer.
4656  __ bind(&adaptor_frame);
4658  __ str(r1, MemOperand(sp, 0));
4659  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4661  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4662 
4663  // Try the new space allocation. Start out with computing the size
4664  // of the arguments object and the elements array in words.
4665  Label add_arguments_object;
4666  __ bind(&try_allocate);
4667  __ cmp(r1, Operand(0, RelocInfo::NONE));
4668  __ b(eq, &add_arguments_object);
4669  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4670  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4671  __ bind(&add_arguments_object);
4672  __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4673 
4674  // Do the allocation of both objects in one go.
4675  __ AllocateInNewSpace(r1,
4676  r0,
4677  r2,
4678  r3,
4679  &runtime,
4680  static_cast<AllocationFlags>(TAG_OBJECT |
4681  SIZE_IN_WORDS));
4682 
4683  // Get the arguments boilerplate from the current native context.
4688 
4689  // Copy the JS object part.
4690  __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4691 
4692  // Get the length (smi tagged) and set that as an in-object property too.
4694  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4695  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4696  Heap::kArgumentsLengthIndex * kPointerSize));
4697 
4698  // If there are no actual arguments, we're done.
4699  Label done;
4700  __ cmp(r1, Operand(0, RelocInfo::NONE));
4701  __ b(eq, &done);
4702 
4703  // Get the parameters pointer from the stack.
4704  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4705 
4706  // Set up the elements pointer in the allocated arguments object and
4707  // initialize the header in the elements fixed array.
4708  __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4710  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4713  // Untag the length for the loop.
4714  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4715 
4716  // Copy the fixed array slots.
4717  Label loop;
4718  // Set up r4 to point to the first array slot.
4719  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4720  __ bind(&loop);
4721  // Pre-decrement r2 with kPointerSize on each iteration.
4722  // Pre-decrement in order to skip receiver.
4723  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4724  // Post-increment r4 with kPointerSize on each iteration.
4725  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4726  __ sub(r1, r1, Operand(1));
4727  __ cmp(r1, Operand(0, RelocInfo::NONE));
4728  __ b(ne, &loop);
4729 
4730  // Return and remove the on-stack parameters.
4731  __ bind(&done);
4732  __ add(sp, sp, Operand(3 * kPointerSize));
4733  __ Ret();
4734 
4735  // Do the runtime call to allocate the arguments object.
4736  __ bind(&runtime);
4737  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4738 }
4739 
4740 
4741 void RegExpExecStub::Generate(MacroAssembler* masm) {
4742  // Just jump directly to runtime if native RegExp is not selected at compile
4743  // time or if regexp entry in generated code is turned off runtime switch or
4744  // at compilation.
4745 #ifdef V8_INTERPRETED_REGEXP
4746  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4747 #else // V8_INTERPRETED_REGEXP
4748 
4749  // Stack frame on entry.
4750  // sp[0]: last_match_info (expected JSArray)
4751  // sp[4]: previous index
4752  // sp[8]: subject string
4753  // sp[12]: JSRegExp object
4754 
4755  const int kLastMatchInfoOffset = 0 * kPointerSize;
4756  const int kPreviousIndexOffset = 1 * kPointerSize;
4757  const int kSubjectOffset = 2 * kPointerSize;
4758  const int kJSRegExpOffset = 3 * kPointerSize;
4759 
4760  Label runtime, invoke_regexp;
4761 
4762  // Allocation of registers for this function. These are in callee save
4763  // registers and will be preserved by the call to the native RegExp code, as
4764  // this code is called using the normal C calling convention. When calling
4765  // directly from generated code the native RegExp code will not do a GC and
4766  // therefore the content of these registers are safe to use after the call.
4767  Register subject = r4;
4768  Register regexp_data = r5;
4769  Register last_match_info_elements = r6;
4770 
4771  // Ensure that a RegExp stack is allocated.
4772  Isolate* isolate = masm->isolate();
4773  ExternalReference address_of_regexp_stack_memory_address =
4774  ExternalReference::address_of_regexp_stack_memory_address(isolate);
4775  ExternalReference address_of_regexp_stack_memory_size =
4776  ExternalReference::address_of_regexp_stack_memory_size(isolate);
4777  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4778  __ ldr(r0, MemOperand(r0, 0));
4779  __ cmp(r0, Operand(0));
4780  __ b(eq, &runtime);
4781 
4782  // Check that the first argument is a JSRegExp object.
4783  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4784  STATIC_ASSERT(kSmiTag == 0);
4785  __ JumpIfSmi(r0, &runtime);
4786  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4787  __ b(ne, &runtime);
4788 
4789  // Check that the RegExp has been compiled (data contains a fixed array).
4790  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
4791  if (FLAG_debug_code) {
4792  __ tst(regexp_data, Operand(kSmiTagMask));
4793  __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
4794  __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
4795  __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
4796  }
4797 
4798  // regexp_data: RegExp data (FixedArray)
4799  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4800  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4801  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4802  __ b(ne, &runtime);
4803 
4804  // regexp_data: RegExp data (FixedArray)
4805  // Check that the number of captures fit in the static offsets vector buffer.
4806  __ ldr(r2,
4808  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4809  // uses the asumption that smis are 2 * their untagged value.
4810  STATIC_ASSERT(kSmiTag == 0);
4812  __ add(r2, r2, Operand(2)); // r2 was a smi.
4813  // Check that the static offsets vector buffer is large enough.
4815  __ b(hi, &runtime);
4816 
4817  // r2: Number of capture registers
4818  // regexp_data: RegExp data (FixedArray)
4819  // Check that the second argument is a string.
4820  __ ldr(subject, MemOperand(sp, kSubjectOffset));
4821  __ JumpIfSmi(subject, &runtime);
4822  Condition is_string = masm->IsObjectStringType(subject, r0);
4823  __ b(NegateCondition(is_string), &runtime);
4824  // Get the length of the string to r3.
4825  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
4826 
4827  // r2: Number of capture registers
4828  // r3: Length of subject string as a smi
4829  // subject: Subject string
4830  // regexp_data: RegExp data (FixedArray)
4831  // Check that the third argument is a positive smi less than the subject
4832  // string length. A negative value will be greater (unsigned comparison).
4833  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
4834  __ JumpIfNotSmi(r0, &runtime);
4835  __ cmp(r3, Operand(r0));
4836  __ b(ls, &runtime);
4837 
4838  // r2: Number of capture registers
4839  // subject: Subject string
4840  // regexp_data: RegExp data (FixedArray)
4841  // Check that the fourth object is a JSArray object.
4842  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
4843  __ JumpIfSmi(r0, &runtime);
4844  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4845  __ b(ne, &runtime);
4846  // Check that the JSArray is in fast case.
4847  __ ldr(last_match_info_elements,
4849  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4850  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
4851  __ b(ne, &runtime);
4852  // Check that the last match info has space for the capture registers and the
4853  // additional information.
4854  __ ldr(r0,
4855  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4856  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4857  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4858  __ b(gt, &runtime);
4859 
4860  // Reset offset for possibly sliced string.
4861  __ mov(r9, Operand(0));
4862  // subject: Subject string
4863  // regexp_data: RegExp data (FixedArray)
4864  // Check the representation and encoding of the subject string.
4865  Label seq_string;
4866  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4868  // First check for flat string. None of the following string type tests will
4869  // succeed if subject is not a string or a short external string.
4870  __ and_(r1,
4871  r0,
4872  Operand(kIsNotStringMask |
4875  SetCC);
4877  __ b(eq, &seq_string);
4878 
4879  // subject: Subject string
4880  // regexp_data: RegExp data (FixedArray)
4881  // r1: whether subject is a string and if yes, its string representation
4882  // Check for flat cons string or sliced string.
4883  // A flat cons string is a cons string where the second part is the empty
4884  // string. In that case the subject string is just the first part of the cons
4885  // string. Also in this case the first part of the cons string is known to be
4886  // a sequential string or an external string.
4887  // In the case of a sliced string its offset has to be taken into account.
4888  Label cons_string, external_string, check_encoding;
4893  __ cmp(r1, Operand(kExternalStringTag));
4894  __ b(lt, &cons_string);
4895  __ b(eq, &external_string);
4896 
4897  // Catch non-string subject or short external string.
4900  __ b(ne, &runtime);
4901 
4902  // String is sliced.
4904  __ mov(r9, Operand(r9, ASR, kSmiTagSize));
4905  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4906  // r9: offset of sliced string, smi-tagged.
4907  __ jmp(&check_encoding);
4908  // String is a cons string, check whether it is flat.
4909  __ bind(&cons_string);
4911  __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
4912  __ b(ne, &runtime);
4913  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4914  // Is first part of cons or parent of slice a flat string?
4915  __ bind(&check_encoding);
4916  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4919  __ tst(r0, Operand(kStringRepresentationMask));
4920  __ b(ne, &external_string);
4921 
4922  __ bind(&seq_string);
4923  // subject: Subject string
4924  // regexp_data: RegExp data (FixedArray)
4925  // r0: Instance type of subject string
4928  // Find the code object based on the assumptions above.
4929  __ and_(r0, r0, Operand(kStringEncodingMask));
4930  __ mov(r3, Operand(r0, ASR, 2), SetCC);
4931  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
4932  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
4933 
4934  // Check that the irregexp code has been generated for the actual string
4935  // encoding. If it has, the field contains a code object otherwise it contains
4936  // a smi (code flushing support).
4937  __ JumpIfSmi(r7, &runtime);
4938 
4939  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4940  // r7: code
4941  // subject: Subject string
4942  // regexp_data: RegExp data (FixedArray)
4943  // Load used arguments before starting to push arguments for call to native
4944  // RegExp code to avoid handling changing stack height.
4945  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
4946  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
4947 
4948  // r1: previous index
4949  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4950  // r7: code
4951  // subject: Subject string
4952  // regexp_data: RegExp data (FixedArray)
4953  // All checks done. Now push arguments for native regexp code.
4954  __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
4955 
4956  // Isolates: note we add an additional parameter here (isolate pointer).
4957  const int kRegExpExecuteArguments = 9;
4958  const int kParameterRegisters = 4;
4959  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4960 
4961  // Stack pointer now points to cell where return address is to be written.
4962  // Arguments are before that on the stack or in registers.
4963 
4964  // Argument 9 (sp[20]): Pass current isolate address.
4965  __ mov(r0, Operand(ExternalReference::isolate_address()));
4966  __ str(r0, MemOperand(sp, 5 * kPointerSize));
4967 
4968  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
4969  __ mov(r0, Operand(1));
4970  __ str(r0, MemOperand(sp, 4 * kPointerSize));
4971 
4972  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
4973  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
4974  __ ldr(r0, MemOperand(r0, 0));
4975  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
4976  __ ldr(r2, MemOperand(r2, 0));
4977  __ add(r0, r0, Operand(r2));
4978  __ str(r0, MemOperand(sp, 3 * kPointerSize));
4979 
4980  // Argument 6: Set the number of capture registers to zero to force global
4981  // regexps to behave as non-global. This does not affect non-global regexps.
4982  __ mov(r0, Operand(0));
4983  __ str(r0, MemOperand(sp, 2 * kPointerSize));
4984 
4985  // Argument 5 (sp[4]): static offsets vector buffer.
4986  __ mov(r0,
4987  Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
4988  __ str(r0, MemOperand(sp, 1 * kPointerSize));
4989 
4990  // For arguments 4 and 3 get string length, calculate start of string data and
4991  // calculate the shift of the index (0 for ASCII and 1 for two byte).
4992  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
4993  __ eor(r3, r3, Operand(1));
4994  // Load the length from the original subject string from the previous stack
4995  // frame. Therefore we have to use fp, which points exactly to two pointer
4996  // sizes below the previous sp. (Because creating a new stack frame pushes
4997  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4998  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4999  // If slice offset is not 0, load the length from the original sliced string.
5000  // Argument 4, r3: End of string data
5001  // Argument 3, r2: Start of string data
5002  // Prepare start and end index of the input.
5003  __ add(r9, r8, Operand(r9, LSL, r3));
5004  __ add(r2, r9, Operand(r1, LSL, r3));
5005 
5006  __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
5007  __ mov(r8, Operand(r8, ASR, kSmiTagSize));
5008  __ add(r3, r9, Operand(r8, LSL, r3));
5009 
5010  // Argument 2 (r1): Previous index.
5011  // Already there
5012 
5013  // Argument 1 (r0): Subject string.
5014  __ mov(r0, subject);
5015 
5016  // Locate the code entry and call it.
5017  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
5018  DirectCEntryStub stub;
5019  stub.GenerateCall(masm, r7);
5020 
5021  __ LeaveExitFrame(false, no_reg);
5022 
5023  // r0: result
5024  // subject: subject string (callee saved)
5025  // regexp_data: RegExp data (callee saved)
5026  // last_match_info_elements: Last match info elements (callee saved)
5027 
5028  // Check the result.
5029  Label success;
5030 
5031  __ cmp(r0, Operand(1));
5032  // We expect exactly one result since we force the called regexp to behave
5033  // as non-global.
5034  __ b(eq, &success);
5035  Label failure;
5036  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
5037  __ b(eq, &failure);
5039  // If not exception it can only be retry. Handle that in the runtime system.
5040  __ b(ne, &runtime);
5041  // Result must now be exception. If there is no pending exception already a
5042  // stack overflow (on the backtrack stack) was detected in RegExp code but
5043  // haven't created the exception yet. Handle that in the runtime system.
5044  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5045  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
5046  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5047  isolate)));
5048  __ ldr(r0, MemOperand(r2, 0));
5049  __ cmp(r0, r1);
5050  __ b(eq, &runtime);
5051 
5052  __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
5053 
5054  // Check if the exception is a termination. If so, throw as uncatchable.
5055  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
5056 
5057  Label termination_exception;
5058  __ b(eq, &termination_exception);
5059 
5060  __ Throw(r0);
5061 
5062  __ bind(&termination_exception);
5063  __ ThrowUncatchable(r0);
5064 
5065  __ bind(&failure);
5066  // For failure and exception return null.
5067  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
5068  __ add(sp, sp, Operand(4 * kPointerSize));
5069  __ Ret();
5070 
5071  // Process the result from the native regexp code.
5072  __ bind(&success);
5073  __ ldr(r1,
5075  // Calculate number of capture registers (number_of_captures + 1) * 2.
5076  STATIC_ASSERT(kSmiTag == 0);
5078  __ add(r1, r1, Operand(2)); // r1 was a smi.
5079 
5080  // r1: number of capture registers
5081  // r4: subject string
5082  // Store the capture count.
5083  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
5084  __ str(r2, FieldMemOperand(last_match_info_elements,
5086  // Store last subject and last input.
5087  __ str(subject,
5088  FieldMemOperand(last_match_info_elements,
5090  __ mov(r2, subject);
5091  __ RecordWriteField(last_match_info_elements,
5093  r2,
5094  r7,
5096  kDontSaveFPRegs);
5097  __ str(subject,
5098  FieldMemOperand(last_match_info_elements,
5100  __ RecordWriteField(last_match_info_elements,
5102  subject,
5103  r7,
5105  kDontSaveFPRegs);
5106 
5107  // Get the static offsets vector filled by the native regexp code.
5108  ExternalReference address_of_static_offsets_vector =
5109  ExternalReference::address_of_static_offsets_vector(isolate);
5110  __ mov(r2, Operand(address_of_static_offsets_vector));
5111 
5112  // r1: number of capture registers
5113  // r2: offsets vector
5114  Label next_capture, done;
5115  // Capture register counter starts from number of capture registers and
5116  // counts down until wraping after zero.
5117  __ add(r0,
5118  last_match_info_elements,
5120  __ bind(&next_capture);
5121  __ sub(r1, r1, Operand(1), SetCC);
5122  __ b(mi, &done);
5123  // Read the value from the static offsets vector buffer.
5124  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
5125  // Store the smi value in the last match info.
5126  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
5127  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
5128  __ jmp(&next_capture);
5129  __ bind(&done);
5130 
5131  // Return last match info.
5132  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
5133  __ add(sp, sp, Operand(4 * kPointerSize));
5134  __ Ret();
5135 
5136  // External string. Short external strings have already been ruled out.
5137  // r0: scratch
5138  __ bind(&external_string);
5139  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
5141  if (FLAG_debug_code) {
5142  // Assert that we do not have a cons or slice (indirect strings) here.
5143  // Sequential strings have already been ruled out.
5144  __ tst(r0, Operand(kIsIndirectStringMask));
5145  __ Assert(eq, "external string expected, but not found");
5146  }
5147  __ ldr(subject,
5149  // Move the pointer so that offset-wise, it looks like a sequential string.
5151  __ sub(subject,
5152  subject,
5154  __ jmp(&seq_string);
5155 
5156  // Do the runtime call to execute the regexp.
5157  __ bind(&runtime);
5158  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5159 #endif // V8_INTERPRETED_REGEXP
5160 }
5161 
5162 
5163 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5164  const int kMaxInlineLength = 100;
5165  Label slowcase;
5166  Label done;
5167  Factory* factory = masm->isolate()->factory();
5168 
5169  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
5170  STATIC_ASSERT(kSmiTag == 0);
5171  STATIC_ASSERT(kSmiTagSize == 1);
5172  __ JumpIfNotSmi(r1, &slowcase);
5173  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
5174  __ b(hi, &slowcase);
5175  // Smi-tagging is equivalent to multiplying by 2.
5176  // Allocate RegExpResult followed by FixedArray with size in ebx.
5177  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5178  // Elements: [Map][Length][..elements..]
5179  // Size of JSArray with two in-object properties and the header of a
5180  // FixedArray.
5181  int objects_size =
5183  __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5184  __ add(r2, r5, Operand(objects_size));
5185  __ AllocateInNewSpace(
5186  r2, // In: Size, in words.
5187  r0, // Out: Start of allocation (tagged).
5188  r3, // Scratch register.
5189  r4, // Scratch register.
5190  &slowcase,
5191  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5192  // r0: Start of allocated area, object-tagged.
5193  // r1: Number of elements in array, as smi.
5194  // r5: Number of elements, untagged.
5195 
5196  // Set JSArray map to global.regexp_result_map().
5197  // Set empty properties FixedArray.
5198  // Set elements to point to FixedArray allocated right after the JSArray.
5199  // Interleave operations for better latency.
5201  __ add(r3, r0, Operand(JSRegExpResult::kSize));
5202  __ mov(r4, Operand(factory->empty_fixed_array()));
5208 
5209  // Set input, index and length fields from arguments.
5210  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
5211  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
5212  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
5216 
5217  // Fill out the elements FixedArray.
5218  // r0: JSArray, tagged.
5219  // r3: FixedArray, tagged.
5220  // r5: Number of elements in array, untagged.
5221 
5222  // Set map.
5223  __ mov(r2, Operand(factory->fixed_array_map()));
5225  // Set FixedArray length.
5226  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5228  // Fill contents of fixed-array with undefined.
5229  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
5230  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5231  // Fill fixed array elements with undefined.
5232  // r0: JSArray, tagged.
5233  // r2: undefined.
5234  // r3: Start of elements in FixedArray.
5235  // r5: Number of elements to fill.
5236  Label loop;
5237  __ cmp(r5, Operand(0));
5238  __ bind(&loop);
5239  __ b(le, &done); // Jump if r5 is negative or zero.
5240  __ sub(r5, r5, Operand(1), SetCC);
5241  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5242  __ jmp(&loop);
5243 
5244  __ bind(&done);
5245  __ add(sp, sp, Operand(3 * kPointerSize));
5246  __ Ret();
5247 
5248  __ bind(&slowcase);
5249  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5250 }
5251 
5252 
5253 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5254  // Cache the called function in a global property cell. Cache states
5255  // are uninitialized, monomorphic (indicated by a JSFunction), and
5256  // megamorphic.
5257  // r1 : the function to call
5258  // r2 : cache cell for call target
5259  Label done;
5260 
5262  masm->isolate()->heap()->undefined_value());
5264  masm->isolate()->heap()->the_hole_value());
5265 
5266  // Load the cache state into r3.
5268 
5269  // A monomorphic cache hit or an already megamorphic state: invoke the
5270  // function without changing the state.
5271  __ cmp(r3, r1);
5272  __ b(eq, &done);
5273  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
5274  __ b(eq, &done);
5275 
5276  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5277  // megamorphic.
5278  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
5279  // MegamorphicSentinel is an immortal immovable object (undefined) so no
5280  // write-barrier is needed.
5281  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
5283 
5284  // An uninitialized cache is patched with the function.
5286  // No need for a write barrier here - cells are rescanned.
5287 
5288  __ bind(&done);
5289 }
5290 
5291 
5292 void CallFunctionStub::Generate(MacroAssembler* masm) {
5293  // r1 : the function to call
5294  // r2 : cache cell for call target
5295  Label slow, non_function;
5296 
5297  // The receiver might implicitly be the global object. This is
5298  // indicated by passing the hole as the receiver to the call
5299  // function stub.
5300  if (ReceiverMightBeImplicit()) {
5301  Label call;
5302  // Get the receiver from the stack.
5303  // function, receiver [, arguments]
5304  __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
5305  // Call as function is indicated with the hole.
5306  __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5307  __ b(ne, &call);
5308  // Patch the receiver on the stack with the global receiver object.
5309  __ ldr(r3,
5312  __ str(r3, MemOperand(sp, argc_ * kPointerSize));
5313  __ bind(&call);
5314  }
5315 
5316  // Check that the function is really a JavaScript function.
5317  // r1: pushed function (to be verified)
5318  __ JumpIfSmi(r1, &non_function);
5319  // Get the map of the function object.
5320  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
5321  __ b(ne, &slow);
5322 
5323  if (RecordCallTarget()) {
5324  GenerateRecordCallTarget(masm);
5325  }
5326 
5327  // Fast-case: Invoke the function now.
5328  // r1: pushed function
5329  ParameterCount actual(argc_);
5330 
5331  if (ReceiverMightBeImplicit()) {
5332  Label call_as_function;
5333  __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5334  __ b(eq, &call_as_function);
5335  __ InvokeFunction(r1,
5336  actual,
5337  JUMP_FUNCTION,
5338  NullCallWrapper(),
5339  CALL_AS_METHOD);
5340  __ bind(&call_as_function);
5341  }
5342  __ InvokeFunction(r1,
5343  actual,
5344  JUMP_FUNCTION,
5345  NullCallWrapper(),
5347 
5348  // Slow-case: Non-function called.
5349  __ bind(&slow);
5350  if (RecordCallTarget()) {
5351  // If there is a call target cache, mark it megamorphic in the
5352  // non-function case. MegamorphicSentinel is an immortal immovable
5353  // object (undefined) so no write barrier is needed.
5355  masm->isolate()->heap()->undefined_value());
5356  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5358  }
5359  // Check for function proxy.
5360  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5361  __ b(ne, &non_function);
5362  __ push(r1); // put proxy as additional argument
5363  __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
5364  __ mov(r2, Operand(0, RelocInfo::NONE));
5365  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5366  __ SetCallKind(r5, CALL_AS_METHOD);
5367  {
5368  Handle<Code> adaptor =
5369  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5370  __ Jump(adaptor, RelocInfo::CODE_TARGET);
5371  }
5372 
5373  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5374  // of the original receiver from the call site).
5375  __ bind(&non_function);
5376  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5377  __ mov(r0, Operand(argc_)); // Set up the number of arguments.
5378  __ mov(r2, Operand(0, RelocInfo::NONE));
5379  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5380  __ SetCallKind(r5, CALL_AS_METHOD);
5381  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5382  RelocInfo::CODE_TARGET);
5383 }
5384 
5385 
5386 void CallConstructStub::Generate(MacroAssembler* masm) {
5387  // r0 : number of arguments
5388  // r1 : the function to call
5389  // r2 : cache cell for call target
5390  Label slow, non_function_call;
5391 
5392  // Check that the function is not a smi.
5393  __ JumpIfSmi(r1, &non_function_call);
5394  // Check that the function is a JSFunction.
5395  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
5396  __ b(ne, &slow);
5397 
5398  if (RecordCallTarget()) {
5399  GenerateRecordCallTarget(masm);
5400  }
5401 
5402  // Jump to the function-specific construct stub.
5405  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
5406 
5407  // r0: number of arguments
5408  // r1: called object
5409  // r3: object type
5410  Label do_call;
5411  __ bind(&slow);
5412  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5413  __ b(ne, &non_function_call);
5414  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5415  __ jmp(&do_call);
5416 
5417  __ bind(&non_function_call);
5418  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5419  __ bind(&do_call);
5420  // Set expected number of arguments to zero (not changing r0).
5421  __ mov(r2, Operand(0, RelocInfo::NONE));
5422  __ SetCallKind(r5, CALL_AS_METHOD);
5423  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5424  RelocInfo::CODE_TARGET);
5425 }
5426 
5427 
5428 // Unfortunately you have to run without snapshots to see most of these
5429 // names in the profile since most compare stubs end up in the snapshot.
5430 void CompareStub::PrintName(StringStream* stream) {
5431  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5432  (lhs_.is(r1) && rhs_.is(r0)));
5433  const char* cc_name;
5434  switch (cc_) {
5435  case lt: cc_name = "LT"; break;
5436  case gt: cc_name = "GT"; break;
5437  case le: cc_name = "LE"; break;
5438  case ge: cc_name = "GE"; break;
5439  case eq: cc_name = "EQ"; break;
5440  case ne: cc_name = "NE"; break;
5441  default: cc_name = "UnknownCondition"; break;
5442  }
5443  bool is_equality = cc_ == eq || cc_ == ne;
5444  stream->Add("CompareStub_%s", cc_name);
5445  stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
5446  stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
5447  if (strict_ && is_equality) stream->Add("_STRICT");
5448  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5449  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5450  if (!include_smi_compare_) stream->Add("_NO_SMI");
5451 }
5452 
5453 
5454 int CompareStub::MinorKey() {
5455  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5456  // stubs the never NaN NaN condition is only taken into account if the
5457  // condition is equals.
5458  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
5459  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5460  (lhs_.is(r1) && rhs_.is(r0)));
5461  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
5462  | RegisterField::encode(lhs_.is(r0))
5463  | StrictField::encode(strict_)
5464  | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5465  | IncludeNumberCompareField::encode(include_number_compare_)
5466  | IncludeSmiCompareField::encode(include_smi_compare_);
5467 }
5468 
5469 
5470 // StringCharCodeAtGenerator
5471 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5472  Label flat_string;
5473  Label ascii_string;
5474  Label got_char_code;
5475  Label sliced_string;
5476 
5477  // If the receiver is a smi trigger the non-string case.
5478  __ JumpIfSmi(object_, receiver_not_string_);
5479 
5480  // Fetch the instance type of the receiver into result register.
5481  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5482  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5483  // If the receiver is not a string trigger the non-string case.
5484  __ tst(result_, Operand(kIsNotStringMask));
5485  __ b(ne, receiver_not_string_);
5486 
5487  // If the index is non-smi trigger the non-smi case.
5488  __ JumpIfNotSmi(index_, &index_not_smi_);
5489  __ bind(&got_smi_index_);
5490 
5491  // Check for index out of range.
5492  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
5493  __ cmp(ip, Operand(index_));
5494  __ b(ls, index_out_of_range_);
5495 
5496  __ mov(index_, Operand(index_, ASR, kSmiTagSize));
5497 
5499  object_,
5500  index_,
5501  result_,
5502  &call_runtime_);
5503 
5504  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
5505  __ bind(&exit_);
5506 }
5507 
5508 
5510  MacroAssembler* masm,
5511  const RuntimeCallHelper& call_helper) {
5512  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5513 
5514  // Index is not a smi.
5515  __ bind(&index_not_smi_);
5516  // If index is a heap number, try converting it to an integer.
5517  __ CheckMap(index_,
5518  result_,
5519  Heap::kHeapNumberMapRootIndex,
5520  index_not_number_,
5522  call_helper.BeforeCall(masm);
5523  __ push(object_);
5524  __ push(index_); // Consumed by runtime conversion function.
5525  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5526  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5527  } else {
5528  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5529  // NumberToSmi discards numbers that are not exact integers.
5530  __ CallRuntime(Runtime::kNumberToSmi, 1);
5531  }
5532  // Save the conversion result before the pop instructions below
5533  // have a chance to overwrite it.
5534  __ Move(index_, r0);
5535  __ pop(object_);
5536  // Reload the instance type.
5537  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5538  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5539  call_helper.AfterCall(masm);
5540  // If index is still not a smi, it must be out of range.
5541  __ JumpIfNotSmi(index_, index_out_of_range_);
5542  // Otherwise, return to the fast path.
5543  __ jmp(&got_smi_index_);
5544 
5545  // Call runtime. We get here when the receiver is a string and the
5546  // index is a number, but the code of getting the actual character
5547  // is too complex (e.g., when the string needs to be flattened).
5548  __ bind(&call_runtime_);
5549  call_helper.BeforeCall(masm);
5550  __ mov(index_, Operand(index_, LSL, kSmiTagSize));
5551  __ Push(object_, index_);
5552  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5553  __ Move(result_, r0);
5554  call_helper.AfterCall(masm);
5555  __ jmp(&exit_);
5556 
5557  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5558 }
5559 
5560 
5561 // -------------------------------------------------------------------------
5562 // StringCharFromCodeGenerator
5563 
5564 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5565  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5566  STATIC_ASSERT(kSmiTag == 0);
5569  __ tst(code_,
5570  Operand(kSmiTagMask |
5572  __ b(ne, &slow_case_);
5573 
5574  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5575  // At this point code register contains smi tagged ASCII char code.
5576  STATIC_ASSERT(kSmiTag == 0);
5577  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
5578  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5579  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
5580  __ b(eq, &slow_case_);
5581  __ bind(&exit_);
5582 }
5583 
5584 
5586  MacroAssembler* masm,
5587  const RuntimeCallHelper& call_helper) {
5588  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5589 
5590  __ bind(&slow_case_);
5591  call_helper.BeforeCall(masm);
5592  __ push(code_);
5593  __ CallRuntime(Runtime::kCharFromCode, 1);
5594  __ Move(result_, r0);
5595  call_helper.AfterCall(masm);
5596  __ jmp(&exit_);
5597 
5598  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5599 }
5600 
5601 
5602 // -------------------------------------------------------------------------
5603 // StringCharAtGenerator
5604 
5605 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5606  char_code_at_generator_.GenerateFast(masm);
5607  char_from_code_generator_.GenerateFast(masm);
5608 }
5609 
5610 
5612  MacroAssembler* masm,
5613  const RuntimeCallHelper& call_helper) {
5614  char_code_at_generator_.GenerateSlow(masm, call_helper);
5615  char_from_code_generator_.GenerateSlow(masm, call_helper);
5616 }
5617 
5618 
5619 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5620  Register dest,
5621  Register src,
5622  Register count,
5623  Register scratch,
5624  bool ascii) {
5625  Label loop;
5626  Label done;
5627  // This loop just copies one character at a time, as it is only used for very
5628  // short strings.
5629  if (!ascii) {
5630  __ add(count, count, Operand(count), SetCC);
5631  } else {
5632  __ cmp(count, Operand(0, RelocInfo::NONE));
5633  }
5634  __ b(eq, &done);
5635 
5636  __ bind(&loop);
5637  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5638  // Perform sub between load and dependent store to get the load time to
5639  // complete.
5640  __ sub(count, count, Operand(1), SetCC);
5641  __ strb(scratch, MemOperand(dest, 1, PostIndex));
5642  // last iteration.
5643  __ b(gt, &loop);
5644 
5645  __ bind(&done);
5646 }
5647 
5648 
5649 enum CopyCharactersFlags {
5650  COPY_ASCII = 1,
5651  DEST_ALWAYS_ALIGNED = 2
5652 };
5653 
5654 
5655 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5656  Register dest,
5657  Register src,
5658  Register count,
5659  Register scratch1,
5660  Register scratch2,
5661  Register scratch3,
5662  Register scratch4,
5663  Register scratch5,
5664  int flags) {
5665  bool ascii = (flags & COPY_ASCII) != 0;
5666  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5667 
5668  if (dest_always_aligned && FLAG_debug_code) {
5669  // Check that destination is actually word aligned if the flag says
5670  // that it is.
5671  __ tst(dest, Operand(kPointerAlignmentMask));
5672  __ Check(eq, "Destination of copy not aligned.");
5673  }
5674 
5675  const int kReadAlignment = 4;
5676  const int kReadAlignmentMask = kReadAlignment - 1;
5677  // Ensure that reading an entire aligned word containing the last character
5678  // of a string will not read outside the allocated area (because we pad up
5679  // to kObjectAlignment).
5680  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5681  // Assumes word reads and writes are little endian.
5682  // Nothing to do for zero characters.
5683  Label done;
5684  if (!ascii) {
5685  __ add(count, count, Operand(count), SetCC);
5686  } else {
5687  __ cmp(count, Operand(0, RelocInfo::NONE));
5688  }
5689  __ b(eq, &done);
5690 
5691  // Assume that you cannot read (or write) unaligned.
5692  Label byte_loop;
5693  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5694  __ cmp(count, Operand(8));
5695  __ add(count, dest, Operand(count));
5696  Register limit = count; // Read until src equals this.
5697  __ b(lt, &byte_loop);
5698 
5699  if (!dest_always_aligned) {
5700  // Align dest by byte copying. Copies between zero and three bytes.
5701  __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
5702  Label dest_aligned;
5703  __ b(eq, &dest_aligned);
5704  __ cmp(scratch4, Operand(2));
5705  __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
5706  __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
5707  __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
5708  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5709  __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
5710  __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
5711  __ bind(&dest_aligned);
5712  }
5713 
5714  Label simple_loop;
5715 
5716  __ sub(scratch4, dest, Operand(src));
5717  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
5718  __ b(eq, &simple_loop);
5719  // Shift register is number of bits in a source word that
5720  // must be combined with bits in the next source word in order
5721  // to create a destination word.
5722 
5723  // Complex loop for src/dst that are not aligned the same way.
5724  {
5725  Label loop;
5726  __ mov(scratch4, Operand(scratch4, LSL, 3));
5727  Register left_shift = scratch4;
5728  __ and_(src, src, Operand(~3)); // Round down to load previous word.
5729  __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5730  // Store the "shift" most significant bits of scratch in the least
5731  // signficant bits (i.e., shift down by (32-shift)).
5732  __ rsb(scratch2, left_shift, Operand(32));
5733  Register right_shift = scratch2;
5734  __ mov(scratch1, Operand(scratch1, LSR, right_shift));
5735 
5736  __ bind(&loop);
5737  __ ldr(scratch3, MemOperand(src, 4, PostIndex));
5738  __ sub(scratch5, limit, Operand(dest));
5739  __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
5740  __ str(scratch1, MemOperand(dest, 4, PostIndex));
5741  __ mov(scratch1, Operand(scratch3, LSR, right_shift));
5742  // Loop if four or more bytes left to copy.
5743  // Compare to eight, because we did the subtract before increasing dst.
5744  __ sub(scratch5, scratch5, Operand(8), SetCC);
5745  __ b(ge, &loop);
5746  }
5747  // There is now between zero and three bytes left to copy (negative that
5748  // number is in scratch5), and between one and three bytes already read into
5749  // scratch1 (eight times that number in scratch4). We may have read past
5750  // the end of the string, but because objects are aligned, we have not read
5751  // past the end of the object.
5752  // Find the minimum of remaining characters to move and preloaded characters
5753  // and write those as bytes.
5754  __ add(scratch5, scratch5, Operand(4), SetCC);
5755  __ b(eq, &done);
5756  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
5757  // Move minimum of bytes read and bytes left to copy to scratch4.
5758  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
5759  // Between one and three (value in scratch5) characters already read into
5760  // scratch ready to write.
5761  __ cmp(scratch5, Operand(2));
5762  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5763  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
5764  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
5765  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
5766  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
5767  // Copy any remaining bytes.
5768  __ b(&byte_loop);
5769 
5770  // Simple loop.
5771  // Copy words from src to dst, until less than four bytes left.
5772  // Both src and dest are word aligned.
5773  __ bind(&simple_loop);
5774  {
5775  Label loop;
5776  __ bind(&loop);
5777  __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5778  __ sub(scratch3, limit, Operand(dest));
5779  __ str(scratch1, MemOperand(dest, 4, PostIndex));
5780  // Compare to 8, not 4, because we do the substraction before increasing
5781  // dest.
5782  __ cmp(scratch3, Operand(8));
5783  __ b(ge, &loop);
5784  }
5785 
5786  // Copy bytes from src to dst until dst hits limit.
5787  __ bind(&byte_loop);
5788  __ cmp(dest, Operand(limit));
5789  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
5790  __ b(ge, &done);
5791  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5792  __ b(&byte_loop);
5793 
5794  __ bind(&done);
5795 }
5796 
5797 
5798 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5799  Register c1,
5800  Register c2,
5801  Register scratch1,
5802  Register scratch2,
5803  Register scratch3,
5804  Register scratch4,
5805  Register scratch5,
5806  Label* not_found) {
5807  // Register scratch3 is the general scratch register in this function.
5808  Register scratch = scratch3;
5809 
5810  // Make sure that both characters are not digits as such strings has a
5811  // different hash algorithm. Don't try to look for these in the symbol table.
5812  Label not_array_index;
5813  __ sub(scratch, c1, Operand(static_cast<int>('0')));
5814  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5815  __ b(hi, &not_array_index);
5816  __ sub(scratch, c2, Operand(static_cast<int>('0')));
5817  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5818 
5819  // If check failed combine both characters into single halfword.
5820  // This is required by the contract of the method: code at the
5821  // not_found branch expects this combination in c1 register
5822  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
5823  __ b(ls, not_found);
5824 
5825  __ bind(&not_array_index);
5826  // Calculate the two character string hash.
5827  Register hash = scratch1;
5828  StringHelper::GenerateHashInit(masm, hash, c1);
5831 
5832  // Collect the two characters in a register.
5833  Register chars = c1;
5834  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
5835 
5836  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5837  // hash: hash of two character string.
5838 
5839  // Load symbol table
5840  // Load address of first element of the symbol table.
5841  Register symbol_table = c2;
5842  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5843 
5844  Register undefined = scratch4;
5845  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5846 
5847  // Calculate capacity mask from the symbol table capacity.
5848  Register mask = scratch2;
5849  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5850  __ mov(mask, Operand(mask, ASR, 1));
5851  __ sub(mask, mask, Operand(1));
5852 
5853  // Calculate untagged address of the first element of the symbol table.
5854  Register first_symbol_table_element = symbol_table;
5855  __ add(first_symbol_table_element, symbol_table,
5857 
5858  // Registers
5859  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5860  // hash: hash of two character string
5861  // mask: capacity mask
5862  // first_symbol_table_element: address of the first element of
5863  // the symbol table
5864  // undefined: the undefined object
5865  // scratch: -
5866 
5867  // Perform a number of probes in the symbol table.
5868  const int kProbes = 4;
5869  Label found_in_symbol_table;
5870  Label next_probe[kProbes];
5871  Register candidate = scratch5; // Scratch register contains candidate.
5872  for (int i = 0; i < kProbes; i++) {
5873  // Calculate entry in symbol table.
5874  if (i > 0) {
5875  __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5876  } else {
5877  __ mov(candidate, hash);
5878  }
5879 
5880  __ and_(candidate, candidate, Operand(mask));
5881 
5882  // Load the entry from the symble table.
5884  __ ldr(candidate,
5885  MemOperand(first_symbol_table_element,
5886  candidate,
5887  LSL,
5888  kPointerSizeLog2));
5889 
5890  // If entry is undefined no string with this hash can be found.
5891  Label is_string;
5892  __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
5893  __ b(ne, &is_string);
5894 
5895  __ cmp(undefined, candidate);
5896  __ b(eq, not_found);
5897  // Must be the hole (deleted entry).
5898  if (FLAG_debug_code) {
5899  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
5900  __ cmp(ip, candidate);
5901  __ Assert(eq, "oddball in symbol table is not undefined or the hole");
5902  }
5903  __ jmp(&next_probe[i]);
5904 
5905  __ bind(&is_string);
5906 
5907  // Check that the candidate is a non-external ASCII string. The instance
5908  // type is still in the scratch register from the CompareObjectType
5909  // operation.
5910  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5911 
5912  // If length is not 2 the string is not a candidate.
5913  __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5914  __ cmp(scratch, Operand(Smi::FromInt(2)));
5915  __ b(ne, &next_probe[i]);
5916 
5917  // Check if the two characters match.
5918  // Assumes that word load is little endian.
5919  __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5920  __ cmp(chars, scratch);
5921  __ b(eq, &found_in_symbol_table);
5922  __ bind(&next_probe[i]);
5923  }
5924 
5925  // No matching 2 character string found by probing.
5926  __ jmp(not_found);
5927 
5928  // Scratch register contains result when we fall through to here.
5929  Register result = candidate;
5930  __ bind(&found_in_symbol_table);
5931  __ Move(r0, result);
5932 }
5933 
5934 
5935 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5936  Register hash,
5937  Register character) {
5938  // hash = character + (character << 10);
5939  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
5940  // Untag smi seed and add the character.
5941  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
5942  // hash += hash << 10;
5943  __ add(hash, hash, Operand(hash, LSL, 10));
5944  // hash ^= hash >> 6;
5945  __ eor(hash, hash, Operand(hash, LSR, 6));
5946 }
5947 
5948 
5949 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5950  Register hash,
5951  Register character) {
5952  // hash += character;
5953  __ add(hash, hash, Operand(character));
5954  // hash += hash << 10;
5955  __ add(hash, hash, Operand(hash, LSL, 10));
5956  // hash ^= hash >> 6;
5957  __ eor(hash, hash, Operand(hash, LSR, 6));
5958 }
5959 
5960 
5961 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5962  Register hash) {
5963  // hash += hash << 3;
5964  __ add(hash, hash, Operand(hash, LSL, 3));
5965  // hash ^= hash >> 11;
5966  __ eor(hash, hash, Operand(hash, LSR, 11));
5967  // hash += hash << 15;
5968  __ add(hash, hash, Operand(hash, LSL, 15));
5969 
5970  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
5971 
5972  // if (hash == 0) hash = 27;
5973  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
5974 }
5975 
5976 
5977 void SubStringStub::Generate(MacroAssembler* masm) {
5978  Label runtime;
5979 
5980  // Stack frame on entry.
5981  // lr: return address
5982  // sp[0]: to
5983  // sp[4]: from
5984  // sp[8]: string
5985 
5986  // This stub is called from the native-call %_SubString(...), so
5987  // nothing can be assumed about the arguments. It is tested that:
5988  // "string" is a sequential string,
5989  // both "from" and "to" are smis, and
5990  // 0 <= from <= to <= string.length.
5991  // If any of these assumptions fail, we call the runtime system.
5992 
5993  const int kToOffset = 0 * kPointerSize;
5994  const int kFromOffset = 1 * kPointerSize;
5995  const int kStringOffset = 2 * kPointerSize;
5996 
5997  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
5998  STATIC_ASSERT(kFromOffset == kToOffset + 4);
5999  STATIC_ASSERT(kSmiTag == 0);
6001 
6002  // I.e., arithmetic shift right by one un-smi-tags.
6003  __ mov(r2, Operand(r2, ASR, 1), SetCC);
6004  __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
6005  // If either to or from had the smi tag bit set, then carry is set now.
6006  __ b(cs, &runtime); // Either "from" or "to" is not a smi.
6007  // We want to bailout to runtime here if From is negative. In that case, the
6008  // next instruction is not executed and we fall through to bailing out to
6009  // runtime. pl is the opposite of mi.
6010  // Both r2 and r3 are untagged integers.
6011  __ sub(r2, r2, Operand(r3), SetCC, pl);
6012  __ b(mi, &runtime); // Fail if from > to.
6013 
6014  // Make sure first argument is a string.
6015  __ ldr(r0, MemOperand(sp, kStringOffset));
6016  STATIC_ASSERT(kSmiTag == 0);
6017  __ JumpIfSmi(r0, &runtime);
6018  Condition is_string = masm->IsObjectStringType(r0, r1);
6019  __ b(NegateCondition(is_string), &runtime);
6020 
6021  // Short-cut for the case of trivial substring.
6022  Label return_r0;
6023  // r0: original string
6024  // r2: result string length
6026  __ cmp(r2, Operand(r4, ASR, 1));
6027  // Return original string.
6028  __ b(eq, &return_r0);
6029  // Longer than original string's length or negative: unsafe arguments.
6030  __ b(hi, &runtime);
6031  // Shorter than original string's length: an actual substring.
6032 
6033  // Deal with different string types: update the index if necessary
6034  // and put the underlying string into r5.
6035  // r0: original string
6036  // r1: instance type
6037  // r2: length
6038  // r3: from index (untagged)
6039  Label underlying_unpacked, sliced_string, seq_or_external_string;
6040  // If the string is not indirect, it can only be sequential or external.
6043  __ tst(r1, Operand(kIsIndirectStringMask));
6044  __ b(eq, &seq_or_external_string);
6045 
6046  __ tst(r1, Operand(kSlicedNotConsMask));
6047  __ b(ne, &sliced_string);
6048  // Cons string. Check whether it is flat, then fetch first part.
6050  __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
6051  __ b(ne, &runtime);
6053  // Update instance type.
6056  __ jmp(&underlying_unpacked);
6057 
6058  __ bind(&sliced_string);
6059  // Sliced string. Fetch parent and correct start index by offset.
6062  __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
6063  // Update instance type.
6066  __ jmp(&underlying_unpacked);
6067 
6068  __ bind(&seq_or_external_string);
6069  // Sequential or external string. Just move string to the expected register.
6070  __ mov(r5, r0);
6071 
6072  __ bind(&underlying_unpacked);
6073 
6074  if (FLAG_string_slices) {
6075  Label copy_routine;
6076  // r5: underlying subject string
6077  // r1: instance type of underlying subject string
6078  // r2: length
6079  // r3: adjusted start index (untagged)
6080  __ cmp(r2, Operand(SlicedString::kMinLength));
6081  // Short slice. Copy instead of slicing.
6082  __ b(lt, &copy_routine);
6083  // Allocate new sliced string. At this point we do not reload the instance
6084  // type including the string encoding because we simply rely on the info
6085  // provided by the original string. It does not matter if the original
6086  // string's encoding is wrong because we always have to recheck encoding of
6087  // the newly created string's parent anyways due to externalized strings.
6088  Label two_byte_slice, set_slice_header;
6091  __ tst(r1, Operand(kStringEncodingMask));
6092  __ b(eq, &two_byte_slice);
6093  __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
6094  __ jmp(&set_slice_header);
6095  __ bind(&two_byte_slice);
6096  __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
6097  __ bind(&set_slice_header);
6098  __ mov(r3, Operand(r3, LSL, 1));
6101  __ jmp(&return_r0);
6102 
6103  __ bind(&copy_routine);
6104  }
6105 
6106  // r5: underlying subject string
6107  // r1: instance type of underlying subject string
6108  // r2: length
6109  // r3: adjusted start index (untagged)
6110  Label two_byte_sequential, sequential_string, allocate_result;
6113  __ tst(r1, Operand(kExternalStringTag));
6114  __ b(eq, &sequential_string);
6115 
6116  // Handle external string.
6117  // Rule out short external strings.
6119  __ tst(r1, Operand(kShortExternalStringTag));
6120  __ b(ne, &runtime);
6122  // r5 already points to the first character of underlying string.
6123  __ jmp(&allocate_result);
6124 
6125  __ bind(&sequential_string);
6126  // Locate first character of underlying subject string.
6129 
6130  __ bind(&allocate_result);
6131  // Sequential acii string. Allocate the result.
6133  __ tst(r1, Operand(kStringEncodingMask));
6134  __ b(eq, &two_byte_sequential);
6135 
6136  // Allocate and copy the resulting ASCII string.
6137  __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
6138 
6139  // Locate first character of substring to copy.
6140  __ add(r5, r5, r3);
6141  // Locate first character of result.
6143 
6144  // r0: result string
6145  // r1: first character of result string
6146  // r2: result string length
6147  // r5: first character of substring to copy
6150  COPY_ASCII | DEST_ALWAYS_ALIGNED);
6151  __ jmp(&return_r0);
6152 
6153  // Allocate and copy the resulting two-byte string.
6154  __ bind(&two_byte_sequential);
6155  __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
6156 
6157  // Locate first character of substring to copy.
6158  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6159  __ add(r5, r5, Operand(r3, LSL, 1));
6160  // Locate first character of result.
6162 
6163  // r0: result string.
6164  // r1: first character of result.
6165  // r2: result length.
6166  // r5: first character of substring to copy.
6169  masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
6170 
6171  __ bind(&return_r0);
6172  Counters* counters = masm->isolate()->counters();
6173  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
6174  __ add(sp, sp, Operand(3 * kPointerSize));
6175  __ Ret();
6176 
6177  // Just jump to runtime to create the sub string.
6178  __ bind(&runtime);
6179  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6180 }
6181 
6182 
6183 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6184  Register left,
6185  Register right,
6186  Register scratch1,
6187  Register scratch2,
6188  Register scratch3) {
6189  Register length = scratch1;
6190 
6191  // Compare lengths.
6192  Label strings_not_equal, check_zero_length;
6193  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
6194  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6195  __ cmp(length, scratch2);
6196  __ b(eq, &check_zero_length);
6197  __ bind(&strings_not_equal);
6198  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
6199  __ Ret();
6200 
6201  // Check if the length is zero.
6202  Label compare_chars;
6203  __ bind(&check_zero_length);
6204  STATIC_ASSERT(kSmiTag == 0);
6205  __ cmp(length, Operand(0));
6206  __ b(ne, &compare_chars);
6207  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6208  __ Ret();
6209 
6210  // Compare characters.
6211  __ bind(&compare_chars);
6212  GenerateAsciiCharsCompareLoop(masm,
6213  left, right, length, scratch2, scratch3,
6214  &strings_not_equal);
6215 
6216  // Characters are equal.
6217  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6218  __ Ret();
6219 }
6220 
6221 
6222 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6223  Register left,
6224  Register right,
6225  Register scratch1,
6226  Register scratch2,
6227  Register scratch3,
6228  Register scratch4) {
6229  Label result_not_equal, compare_lengths;
6230  // Find minimum length and length difference.
6231  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6232  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6233  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6234  Register length_delta = scratch3;
6235  __ mov(scratch1, scratch2, LeaveCC, gt);
6236  Register min_length = scratch1;
6237  STATIC_ASSERT(kSmiTag == 0);
6238  __ cmp(min_length, Operand(0));
6239  __ b(eq, &compare_lengths);
6240 
6241  // Compare loop.
6242  GenerateAsciiCharsCompareLoop(masm,
6243  left, right, min_length, scratch2, scratch4,
6244  &result_not_equal);
6245 
6246  // Compare lengths - strings up to min-length are equal.
6247  __ bind(&compare_lengths);
6248  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6249  // Use length_delta as result if it's zero.
6250  __ mov(r0, Operand(length_delta), SetCC);
6251  __ bind(&result_not_equal);
6252  // Conditionally update the result based either on length_delta or
6253  // the last comparion performed in the loop above.
6254  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
6255  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
6256  __ Ret();
6257 }
6258 
6259 
6260 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6261  MacroAssembler* masm,
6262  Register left,
6263  Register right,
6264  Register length,
6265  Register scratch1,
6266  Register scratch2,
6267  Label* chars_not_equal) {
6268  // Change index to run from -length to -1 by adding length to string
6269  // start. This means that loop ends when index reaches zero, which
6270  // doesn't need an additional compare.
6271  __ SmiUntag(length);
6272  __ add(scratch1, length,
6274  __ add(left, left, Operand(scratch1));
6275  __ add(right, right, Operand(scratch1));
6276  __ rsb(length, length, Operand::Zero());
6277  Register index = length; // index = -length;
6278 
6279  // Compare loop.
6280  Label loop;
6281  __ bind(&loop);
6282  __ ldrb(scratch1, MemOperand(left, index));
6283  __ ldrb(scratch2, MemOperand(right, index));
6284  __ cmp(scratch1, scratch2);
6285  __ b(ne, chars_not_equal);
6286  __ add(index, index, Operand(1), SetCC);
6287  __ b(ne, &loop);
6288 }
6289 
6290 
6291 void StringCompareStub::Generate(MacroAssembler* masm) {
6292  Label runtime;
6293 
6294  Counters* counters = masm->isolate()->counters();
6295 
6296  // Stack frame on entry.
6297  // sp[0]: right string
6298  // sp[4]: left string
6299  __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
6300 
6301  Label not_same;
6302  __ cmp(r0, r1);
6303  __ b(ne, &not_same);
6304  STATIC_ASSERT(EQUAL == 0);
6305  STATIC_ASSERT(kSmiTag == 0);
6306  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6307  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
6308  __ add(sp, sp, Operand(2 * kPointerSize));
6309  __ Ret();
6310 
6311  __ bind(&not_same);
6312 
6313  // Check that both objects are sequential ASCII strings.
6314  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
6315 
6316  // Compare flat ASCII strings natively. Remove arguments from stack first.
6317  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
6318  __ add(sp, sp, Operand(2 * kPointerSize));
6320 
6321  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6322  // tagged as a small integer.
6323  __ bind(&runtime);
6324  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6325 }
6326 
6327 
6328 void StringAddStub::Generate(MacroAssembler* masm) {
6329  Label call_runtime, call_builtin;
6330  Builtins::JavaScript builtin_id = Builtins::ADD;
6331 
6332  Counters* counters = masm->isolate()->counters();
6333 
6334  // Stack on entry:
6335  // sp[0]: second argument (right).
6336  // sp[4]: first argument (left).
6337 
6338  // Load the two arguments.
6339  __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6340  __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6341 
6342  // Make sure that both arguments are strings if not known in advance.
6343  if (flags_ == NO_STRING_ADD_FLAGS) {
6344  __ JumpIfEitherSmi(r0, r1, &call_runtime);
6345  // Load instance types.
6350  STATIC_ASSERT(kStringTag == 0);
6351  // If either is not a string, go to runtime.
6352  __ tst(r4, Operand(kIsNotStringMask));
6353  __ tst(r5, Operand(kIsNotStringMask), eq);
6354  __ b(ne, &call_runtime);
6355  } else {
6356  // Here at least one of the arguments is definitely a string.
6357  // We convert the one that is not known to be a string.
6358  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6359  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6360  GenerateConvertArgument(
6361  masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
6362  builtin_id = Builtins::STRING_ADD_RIGHT;
6363  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6364  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6365  GenerateConvertArgument(
6366  masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
6367  builtin_id = Builtins::STRING_ADD_LEFT;
6368  }
6369  }
6370 
6371  // Both arguments are strings.
6372  // r0: first string
6373  // r1: second string
6374  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6375  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6376  {
6377  Label strings_not_empty;
6378  // Check if either of the strings are empty. In that case return the other.
6381  STATIC_ASSERT(kSmiTag == 0);
6382  __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
6383  __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
6384  STATIC_ASSERT(kSmiTag == 0);
6385  // Else test if second string is empty.
6386  __ cmp(r3, Operand(Smi::FromInt(0)), ne);
6387  __ b(ne, &strings_not_empty); // If either string was empty, return r0.
6388 
6389  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6390  __ add(sp, sp, Operand(2 * kPointerSize));
6391  __ Ret();
6392 
6393  __ bind(&strings_not_empty);
6394  }
6395 
6396  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
6397  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
6398  // Both strings are non-empty.
6399  // r0: first string
6400  // r1: second string
6401  // r2: length of first string
6402  // r3: length of second string
6403  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6404  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6405  // Look at the length of the result of adding the two strings.
6406  Label string_add_flat_result, longer_than_two;
6407  // Adding two lengths can't overflow.
6409  __ add(r6, r2, Operand(r3));
6410  // Use the symbol table when adding two one character strings, as it
6411  // helps later optimizations to return a symbol here.
6412  __ cmp(r6, Operand(2));
6413  __ b(ne, &longer_than_two);
6414 
6415  // Check that both strings are non-external ASCII strings.
6416  if (flags_ != NO_STRING_ADD_FLAGS) {
6421  }
6422  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
6423  &call_runtime);
6424 
6425  // Get the two characters forming the sub string.
6428 
6429  // Try to lookup two character string in symbol table. If it is not found
6430  // just allocate a new one.
6431  Label make_two_character_string;
6433  masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
6434  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6435  __ add(sp, sp, Operand(2 * kPointerSize));
6436  __ Ret();
6437 
6438  __ bind(&make_two_character_string);
6439  // Resulting string has length 2 and first chars of two strings
6440  // are combined into single halfword in r2 register.
6441  // So we can fill resulting string without two loops by a single
6442  // halfword store instruction (which assumes that processor is
6443  // in a little endian mode)
6444  __ mov(r6, Operand(2));
6445  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6447  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6448  __ add(sp, sp, Operand(2 * kPointerSize));
6449  __ Ret();
6450 
6451  __ bind(&longer_than_two);
6452  // Check if resulting string will be flat.
6453  __ cmp(r6, Operand(ConsString::kMinLength));
6454  __ b(lt, &string_add_flat_result);
6455  // Handle exceptionally long strings in the runtime system.
6456  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6458  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6459  __ cmp(r6, Operand(String::kMaxLength + 1));
6460  __ b(hs, &call_runtime);
6461 
6462  // If result is not supposed to be flat, allocate a cons string object.
6463  // If both strings are ASCII the result is an ASCII cons string.
6464  if (flags_ != NO_STRING_ADD_FLAGS) {
6469  }
6470  Label non_ascii, allocated, ascii_data;
6472  __ tst(r4, Operand(kStringEncodingMask));
6473  __ tst(r5, Operand(kStringEncodingMask), ne);
6474  __ b(eq, &non_ascii);
6475 
6476  // Allocate an ASCII cons string.
6477  __ bind(&ascii_data);
6478  __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
6479  __ bind(&allocated);
6480  // Fill the fields of the cons string.
6483  __ mov(r0, Operand(r7));
6484  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6485  __ add(sp, sp, Operand(2 * kPointerSize));
6486  __ Ret();
6487 
6488  __ bind(&non_ascii);
6489  // At least one of the strings is two-byte. Check whether it happens
6490  // to contain only ASCII characters.
6491  // r4: first instance type.
6492  // r5: second instance type.
6493  __ tst(r4, Operand(kAsciiDataHintMask));
6494  __ tst(r5, Operand(kAsciiDataHintMask), ne);
6495  __ b(ne, &ascii_data);
6496  __ eor(r4, r4, Operand(r5));
6498  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6499  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6500  __ b(eq, &ascii_data);
6501 
6502  // Allocate a two byte cons string.
6503  __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
6504  __ jmp(&allocated);
6505 
6506  // We cannot encounter sliced strings or cons strings here since:
6508  // Handle creating a flat result from either external or sequential strings.
6509  // Locate the first characters' locations.
6510  // r0: first string
6511  // r1: second string
6512  // r2: length of first string
6513  // r3: length of second string
6514  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6515  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6516  // r6: sum of lengths.
6517  Label first_prepared, second_prepared;
6518  __ bind(&string_add_flat_result);
6519  if (flags_ != NO_STRING_ADD_FLAGS) {
6524  }
6525 
6526  // Check whether both strings have same encoding
6527  __ eor(r7, r4, Operand(r5));
6528  __ tst(r7, Operand(kStringEncodingMask));
6529  __ b(ne, &call_runtime);
6530 
6532  __ tst(r4, Operand(kStringRepresentationMask));
6534  __ add(r7,
6535  r0,
6537  LeaveCC,
6538  eq);
6539  __ b(eq, &first_prepared);
6540  // External string: rule out short external string and load string resource.
6542  __ tst(r4, Operand(kShortExternalStringMask));
6543  __ b(ne, &call_runtime);
6545  __ bind(&first_prepared);
6546 
6548  __ tst(r5, Operand(kStringRepresentationMask));
6550  __ add(r1,
6551  r1,
6553  LeaveCC,
6554  eq);
6555  __ b(eq, &second_prepared);
6556  // External string: rule out short external string and load string resource.
6558  __ tst(r5, Operand(kShortExternalStringMask));
6559  __ b(ne, &call_runtime);
6561  __ bind(&second_prepared);
6562 
6563  Label non_ascii_string_add_flat_result;
6564  // r7: first character of first string
6565  // r1: first character of second string
6566  // r2: length of first string.
6567  // r3: length of second string.
6568  // r6: sum of lengths.
6569  // Both strings have the same encoding.
6571  __ tst(r5, Operand(kStringEncodingMask));
6572  __ b(eq, &non_ascii_string_add_flat_result);
6573 
6574  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6576  // r0: result string.
6577  // r7: first character of first string.
6578  // r1: first character of second string.
6579  // r2: length of first string.
6580  // r3: length of second string.
6581  // r6: first character of result.
6583  // r6: next character of result.
6585  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6586  __ add(sp, sp, Operand(2 * kPointerSize));
6587  __ Ret();
6588 
6589  __ bind(&non_ascii_string_add_flat_result);
6590  __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
6592  // r0: result string.
6593  // r7: first character of first string.
6594  // r1: first character of second string.
6595  // r2: length of first string.
6596  // r3: length of second string.
6597  // r6: first character of result.
6598  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
6599  // r6: next character of result.
6600  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
6601  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6602  __ add(sp, sp, Operand(2 * kPointerSize));
6603  __ Ret();
6604 
6605  // Just jump to runtime to add the two strings.
6606  __ bind(&call_runtime);
6607  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6608 
6609  if (call_builtin.is_linked()) {
6610  __ bind(&call_builtin);
6611  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6612  }
6613 }
6614 
6615 
6616 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6617  int stack_offset,
6618  Register arg,
6619  Register scratch1,
6620  Register scratch2,
6621  Register scratch3,
6622  Register scratch4,
6623  Label* slow) {
6624  // First check if the argument is already a string.
6625  Label not_string, done;
6626  __ JumpIfSmi(arg, &not_string);
6627  __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
6628  __ b(lt, &done);
6629 
6630  // Check the number to string cache.
6631  Label not_cached;
6632  __ bind(&not_string);
6633  // Puts the cached result into scratch1.
6635  arg,
6636  scratch1,
6637  scratch2,
6638  scratch3,
6639  scratch4,
6640  false,
6641  &not_cached);
6642  __ mov(arg, scratch1);
6643  __ str(arg, MemOperand(sp, stack_offset));
6644  __ jmp(&done);
6645 
6646  // Check if the argument is a safe string wrapper.
6647  __ bind(&not_cached);
6648  __ JumpIfSmi(arg, slow);
6649  __ CompareObjectType(
6650  arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
6651  __ b(ne, slow);
6652  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6653  __ and_(scratch2,
6654  scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6655  __ cmp(scratch2,
6657  __ b(ne, slow);
6658  __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6659  __ str(arg, MemOperand(sp, stack_offset));
6660 
6661  __ bind(&done);
6662 }
6663 
6664 
6665 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6666  ASSERT(state_ == CompareIC::SMIS);
6667  Label miss;
6668  __ orr(r2, r1, r0);
6669  __ JumpIfNotSmi(r2, &miss);
6670 
6671  if (GetCondition() == eq) {
6672  // For equality we do not care about the sign of the result.
6673  __ sub(r0, r0, r1, SetCC);
6674  } else {
6675  // Untag before subtracting to avoid handling overflow.
6676  __ SmiUntag(r1);
6677  __ sub(r0, r1, SmiUntagOperand(r0));
6678  }
6679  __ Ret();
6680 
6681  __ bind(&miss);
6682  GenerateMiss(masm);
6683 }
6684 
6685 
6686 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6687  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6688 
6689  Label generic_stub;
6690  Label unordered, maybe_undefined1, maybe_undefined2;
6691  Label miss;
6692  __ and_(r2, r1, Operand(r0));
6693  __ JumpIfSmi(r2, &generic_stub);
6694 
6695  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6696  __ b(ne, &maybe_undefined1);
6697  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6698  __ b(ne, &maybe_undefined2);
6699 
6700  // Inlining the double comparison and falling back to the general compare
6701  // stub if NaN is involved or VFP3 is unsupported.
6703  CpuFeatures::Scope scope(VFP2);
6704 
6705  // Load left and right operand
6706  __ sub(r2, r1, Operand(kHeapObjectTag));
6707  __ vldr(d0, r2, HeapNumber::kValueOffset);
6708  __ sub(r2, r0, Operand(kHeapObjectTag));
6709  __ vldr(d1, r2, HeapNumber::kValueOffset);
6710 
6711  // Compare operands
6712  __ VFPCompareAndSetFlags(d0, d1);
6713 
6714  // Don't base result on status bits when a NaN is involved.
6715  __ b(vs, &unordered);
6716 
6717  // Return a result of -1, 0, or 1, based on status bits.
6718  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
6719  __ mov(r0, Operand(LESS), LeaveCC, lt);
6720  __ mov(r0, Operand(GREATER), LeaveCC, gt);
6721  __ Ret();
6722  }
6723 
6724  __ bind(&unordered);
6725  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
6726  __ bind(&generic_stub);
6727  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6728 
6729  __ bind(&maybe_undefined1);
6731  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
6732  __ b(ne, &miss);
6733  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6734  __ b(ne, &maybe_undefined2);
6735  __ jmp(&unordered);
6736  }
6737 
6738  __ bind(&maybe_undefined2);
6740  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
6741  __ b(eq, &unordered);
6742  }
6743 
6744  __ bind(&miss);
6745  GenerateMiss(masm);
6746 }
6747 
6748 
6749 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6750  ASSERT(state_ == CompareIC::SYMBOLS);
6751  Label miss;
6752 
6753  // Registers containing left and right operands respectively.
6754  Register left = r1;
6755  Register right = r0;
6756  Register tmp1 = r2;
6757  Register tmp2 = r3;
6758 
6759  // Check that both operands are heap objects.
6760  __ JumpIfEitherSmi(left, right, &miss);
6761 
6762  // Check that both operands are symbols.
6763  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6764  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6765  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6766  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6767  STATIC_ASSERT(kSymbolTag != 0);
6768  __ and_(tmp1, tmp1, Operand(tmp2));
6769  __ tst(tmp1, Operand(kIsSymbolMask));
6770  __ b(eq, &miss);
6771 
6772  // Symbols are compared by identity.
6773  __ cmp(left, right);
6774  // Make sure r0 is non-zero. At this point input operands are
6775  // guaranteed to be non-zero.
6776  ASSERT(right.is(r0));
6777  STATIC_ASSERT(EQUAL == 0);
6778  STATIC_ASSERT(kSmiTag == 0);
6779  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6780  __ Ret();
6781 
6782  __ bind(&miss);
6783  GenerateMiss(masm);
6784 }
6785 
6786 
6787 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6788  ASSERT(state_ == CompareIC::STRINGS);
6789  Label miss;
6790 
6791  bool equality = Token::IsEqualityOp(op_);
6792 
6793  // Registers containing left and right operands respectively.
6794  Register left = r1;
6795  Register right = r0;
6796  Register tmp1 = r2;
6797  Register tmp2 = r3;
6798  Register tmp3 = r4;
6799  Register tmp4 = r5;
6800 
6801  // Check that both operands are heap objects.
6802  __ JumpIfEitherSmi(left, right, &miss);
6803 
6804  // Check that both operands are strings. This leaves the instance
6805  // types loaded in tmp1 and tmp2.
6806  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6807  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6808  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6809  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6811  __ orr(tmp3, tmp1, tmp2);
6812  __ tst(tmp3, Operand(kIsNotStringMask));
6813  __ b(ne, &miss);
6814 
6815  // Fast check for identical strings.
6816  __ cmp(left, right);
6817  STATIC_ASSERT(EQUAL == 0);
6818  STATIC_ASSERT(kSmiTag == 0);
6819  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6820  __ Ret(eq);
6821 
6822  // Handle not identical strings.
6823 
6824  // Check that both strings are symbols. If they are, we're done
6825  // because we already know they are not identical.
6826  if (equality) {
6827  ASSERT(GetCondition() == eq);
6828  STATIC_ASSERT(kSymbolTag != 0);
6829  __ and_(tmp3, tmp1, Operand(tmp2));
6830  __ tst(tmp3, Operand(kIsSymbolMask));
6831  // Make sure r0 is non-zero. At this point input operands are
6832  // guaranteed to be non-zero.
6833  ASSERT(right.is(r0));
6834  __ Ret(ne);
6835  }
6836 
6837  // Check that both strings are sequential ASCII.
6838  Label runtime;
6839  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6840  tmp1, tmp2, tmp3, tmp4, &runtime);
6841 
6842  // Compare flat ASCII strings. Returns when done.
6843  if (equality) {
6845  masm, left, right, tmp1, tmp2, tmp3);
6846  } else {
6848  masm, left, right, tmp1, tmp2, tmp3, tmp4);
6849  }
6850 
6851  // Handle more complex cases in runtime.
6852  __ bind(&runtime);
6853  __ Push(left, right);
6854  if (equality) {
6855  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6856  } else {
6857  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6858  }
6859 
6860  __ bind(&miss);
6861  GenerateMiss(masm);
6862 }
6863 
6864 
6865 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6866  ASSERT(state_ == CompareIC::OBJECTS);
6867  Label miss;
6868  __ and_(r2, r1, Operand(r0));
6869  __ JumpIfSmi(r2, &miss);
6870 
6871  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
6872  __ b(ne, &miss);
6873  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
6874  __ b(ne, &miss);
6875 
6876  ASSERT(GetCondition() == eq);
6877  __ sub(r0, r0, Operand(r1));
6878  __ Ret();
6879 
6880  __ bind(&miss);
6881  GenerateMiss(masm);
6882 }
6883 
6884 
6885 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6886  Label miss;
6887  __ and_(r2, r1, Operand(r0));
6888  __ JumpIfSmi(r2, &miss);
6891  __ cmp(r2, Operand(known_map_));
6892  __ b(ne, &miss);
6893  __ cmp(r3, Operand(known_map_));
6894  __ b(ne, &miss);
6895 
6896  __ sub(r0, r0, Operand(r1));
6897  __ Ret();
6898 
6899  __ bind(&miss);
6900  GenerateMiss(masm);
6901 }
6902 
6903 
6904 
6905 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6906  {
6907  // Call the runtime system in a fresh internal frame.
6908  ExternalReference miss =
6909  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
6910 
6911  FrameScope scope(masm, StackFrame::INTERNAL);
6912  __ Push(r1, r0);
6913  __ push(lr);
6914  __ Push(r1, r0);
6915  __ mov(ip, Operand(Smi::FromInt(op_)));
6916  __ push(ip);
6917  __ CallExternalReference(miss, 3);
6918  // Compute the entry point of the rewritten stub.
6919  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
6920  // Restore registers.
6921  __ pop(lr);
6922  __ pop(r0);
6923  __ pop(r1);
6924  }
6925 
6926  __ Jump(r2);
6927 }
6928 
6929 
6930 void DirectCEntryStub::Generate(MacroAssembler* masm) {
6931  __ ldr(pc, MemOperand(sp, 0));
6932 }
6933 
6934 
6935 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6936  ExternalReference function) {
6937  __ mov(r2, Operand(function));
6938  GenerateCall(masm, r2);
6939 }
6940 
6941 
6942 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6943  Register target) {
6944  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6945  RelocInfo::CODE_TARGET));
6946 
6947  // Prevent literal pool emission during calculation of return address.
6948  Assembler::BlockConstPoolScope block_const_pool(masm);
6949 
6950  // Push return address (accessible to GC through exit frame pc).
6951  // Note that using pc with str is deprecated.
6952  Label start;
6953  __ bind(&start);
6954  __ add(ip, pc, Operand(Assembler::kInstrSize));
6955  __ str(ip, MemOperand(sp, 0));
6956  __ Jump(target); // Call the C++ function.
6958  masm->SizeOfCodeGeneratedSince(&start));
6959 }
6960 
6961 
6962 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6963  Label* miss,
6964  Label* done,
6965  Register receiver,
6966  Register properties,
6967  Handle<String> name,
6968  Register scratch0) {
6969  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6970  // not equal to the name and kProbes-th slot is not used (its name is the
6971  // undefined value), it guarantees the hash table doesn't contain the
6972  // property. It's true even if some slots represent deleted properties
6973  // (their names are the hole value).
6974  for (int i = 0; i < kInlinedProbes; i++) {
6975  // scratch0 points to properties hash.
6976  // Compute the masked index: (hash + i + i * i) & mask.
6977  Register index = scratch0;
6978  // Capacity is smi 2^n.
6979  __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
6980  __ sub(index, index, Operand(1));
6981  __ and_(index, index, Operand(
6982  Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6983 
6984  // Scale the index by multiplying by the entry size.
6986  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
6987 
6988  Register entity_name = scratch0;
6989  // Having undefined at this place means the name is not contained.
6990  ASSERT_EQ(kSmiTagSize, 1);
6991  Register tmp = properties;
6992  __ add(tmp, properties, Operand(index, LSL, 1));
6993  __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6994 
6995  ASSERT(!tmp.is(entity_name));
6996  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6997  __ cmp(entity_name, tmp);
6998  __ b(eq, done);
6999 
7000  if (i != kInlinedProbes - 1) {
7001  // Load the hole ready for use below:
7002  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7003 
7004  // Stop if found the property.
7005  __ cmp(entity_name, Operand(Handle<String>(name)));
7006  __ b(eq, miss);
7007 
7008  Label the_hole;
7009  __ cmp(entity_name, tmp);
7010  __ b(eq, &the_hole);
7011 
7012  // Check if the entry name is not a symbol.
7013  __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7014  __ ldrb(entity_name,
7016  __ tst(entity_name, Operand(kIsSymbolMask));
7017  __ b(eq, miss);
7018 
7019  __ bind(&the_hole);
7020 
7021  // Restore the properties.
7022  __ ldr(properties,
7024  }
7025  }
7026 
7027  const int spill_mask =
7028  (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
7029  r2.bit() | r1.bit() | r0.bit());
7030 
7031  __ stm(db_w, sp, spill_mask);
7033  __ mov(r1, Operand(Handle<String>(name)));
7035  __ CallStub(&stub);
7036  __ cmp(r0, Operand(0));
7037  __ ldm(ia_w, sp, spill_mask);
7038 
7039  __ b(eq, done);
7040  __ b(ne, miss);
7041 }
7042 
7043 
7044 // Probe the string dictionary in the |elements| register. Jump to the
7045 // |done| label if a property with the given name is found. Jump to
7046 // the |miss| label otherwise.
7047 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
7048 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7049  Label* miss,
7050  Label* done,
7051  Register elements,
7052  Register name,
7053  Register scratch1,
7054  Register scratch2) {
7055  ASSERT(!elements.is(scratch1));
7056  ASSERT(!elements.is(scratch2));
7057  ASSERT(!name.is(scratch1));
7058  ASSERT(!name.is(scratch2));
7059 
7060  __ AssertString(name);
7061 
7062  // Compute the capacity mask.
7063  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
7064  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
7065  __ sub(scratch1, scratch1, Operand(1));
7066 
7067  // Generate an unrolled loop that performs a few probes before
7068  // giving up. Measurements done on Gmail indicate that 2 probes
7069  // cover ~93% of loads from dictionaries.
7070  for (int i = 0; i < kInlinedProbes; i++) {
7071  // Compute the masked index: (hash + i + i * i) & mask.
7072  __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7073  if (i > 0) {
7074  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7075  // the hash in a separate instruction. The value hash + i + i * i is right
7076  // shifted in the following and instruction.
7077  ASSERT(StringDictionary::GetProbeOffset(i) <
7078  1 << (32 - String::kHashFieldOffset));
7079  __ add(scratch2, scratch2, Operand(
7080  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7081  }
7082  __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
7083 
7084  // Scale the index by multiplying by the element size.
7086  // scratch2 = scratch2 * 3.
7087  __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
7088 
7089  // Check if the key is identical to the name.
7090  __ add(scratch2, elements, Operand(scratch2, LSL, 2));
7091  __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
7092  __ cmp(name, Operand(ip));
7093  __ b(eq, done);
7094  }
7095 
7096  const int spill_mask =
7097  (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
7098  r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
7099  ~(scratch1.bit() | scratch2.bit());
7100 
7101  __ stm(db_w, sp, spill_mask);
7102  if (name.is(r0)) {
7103  ASSERT(!elements.is(r1));
7104  __ Move(r1, name);
7105  __ Move(r0, elements);
7106  } else {
7107  __ Move(r0, elements);
7108  __ Move(r1, name);
7109  }
7111  __ CallStub(&stub);
7112  __ cmp(r0, Operand(0));
7113  __ mov(scratch2, Operand(r2));
7114  __ ldm(ia_w, sp, spill_mask);
7115 
7116  __ b(ne, done);
7117  __ b(eq, miss);
7118 }
7119 
7120 
7121 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7122  // This stub overrides SometimesSetsUpAFrame() to return false. That means
7123  // we cannot call anything that could cause a GC from this stub.
7124  // Registers:
7125  // result: StringDictionary to probe
7126  // r1: key
7127  // : StringDictionary to probe.
7128  // index_: will hold an index of entry if lookup is successful.
7129  // might alias with result_.
7130  // Returns:
7131  // result_ is zero if lookup failed, non zero otherwise.
7132 
7133  Register result = r0;
7134  Register dictionary = r0;
7135  Register key = r1;
7136  Register index = r2;
7137  Register mask = r3;
7138  Register hash = r4;
7139  Register undefined = r5;
7140  Register entry_key = r6;
7141 
7142  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7143 
7144  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
7145  __ mov(mask, Operand(mask, ASR, kSmiTagSize));
7146  __ sub(mask, mask, Operand(1));
7147 
7148  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
7149 
7150  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7151 
7152  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7153  // Compute the masked index: (hash + i + i * i) & mask.
7154  // Capacity is smi 2^n.
7155  if (i > 0) {
7156  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7157  // the hash in a separate instruction. The value hash + i + i * i is right
7158  // shifted in the following and instruction.
7159  ASSERT(StringDictionary::GetProbeOffset(i) <
7160  1 << (32 - String::kHashFieldOffset));
7161  __ add(index, hash, Operand(
7162  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7163  } else {
7164  __ mov(index, Operand(hash));
7165  }
7166  __ and_(index, mask, Operand(index, LSR, String::kHashShift));
7167 
7168  // Scale the index by multiplying by the entry size.
7170  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
7171 
7172  ASSERT_EQ(kSmiTagSize, 1);
7173  __ add(index, dictionary, Operand(index, LSL, 2));
7174  __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
7175 
7176  // Having undefined at this place means the name is not contained.
7177  __ cmp(entry_key, Operand(undefined));
7178  __ b(eq, &not_in_dictionary);
7179 
7180  // Stop if found the property.
7181  __ cmp(entry_key, Operand(key));
7182  __ b(eq, &in_dictionary);
7183 
7184  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7185  // Check if the entry name is not a symbol.
7186  __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7187  __ ldrb(entry_key,
7189  __ tst(entry_key, Operand(kIsSymbolMask));
7190  __ b(eq, &maybe_in_dictionary);
7191  }
7192  }
7193 
7194  __ bind(&maybe_in_dictionary);
7195  // If we are doing negative lookup then probing failure should be
7196  // treated as a lookup success. For positive lookup probing failure
7197  // should be treated as lookup failure.
7198  if (mode_ == POSITIVE_LOOKUP) {
7199  __ mov(result, Operand::Zero());
7200  __ Ret();
7201  }
7202 
7203  __ bind(&in_dictionary);
7204  __ mov(result, Operand(1));
7205  __ Ret();
7206 
7207  __ bind(&not_in_dictionary);
7208  __ mov(result, Operand::Zero());
7209  __ Ret();
7210 }
7211 
7212 
7213 struct AheadOfTimeWriteBarrierStubList {
7214  Register object, value, address;
7215  RememberedSetAction action;
7216 };
7217 
7218 #define REG(Name) { kRegister_ ## Name ## _Code }
7219 
7220 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7221  // Used in RegExpExecStub.
7222  { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
7223  { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
7224  // Used in CompileArrayPushCall.
7225  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7226  // Also used in KeyedStoreIC::GenerateGeneric.
7227  { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
7228  // Used in CompileStoreGlobal.
7229  { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
7230  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7231  { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
7232  { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
7233  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7234  { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
7235  { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
7236  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7237  { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
7238  { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
7239  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7240  // and ElementsTransitionGenerator::GenerateSmiToDouble
7241  // and ElementsTransitionGenerator::GenerateDoubleToObject
7242  { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
7243  { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
7244  // ElementsTransitionGenerator::GenerateDoubleToObject
7245  { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
7246  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
7247  // StoreArrayLiteralElementStub::Generate
7248  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
7249  // FastNewClosureStub::Generate
7250  { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
7251  // Null termination.
7252  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7253 };
7254 
7255 #undef REG
7256 
7257 
7259  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7260  !entry->object.is(no_reg);
7261  entry++) {
7262  if (object_.is(entry->object) &&
7263  value_.is(entry->value) &&
7264  address_.is(entry->address) &&
7265  remembered_set_action_ == entry->action &&
7266  save_fp_regs_mode_ == kDontSaveFPRegs) {
7267  return true;
7268  }
7269  }
7270  return false;
7271 }
7272 
7273 
7275  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7276 }
7277 
7278 
7281  stub1.GetCode()->set_is_pregenerated(true);
7282 }
7283 
7284 
7286  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7287  !entry->object.is(no_reg);
7288  entry++) {
7289  RecordWriteStub stub(entry->object,
7290  entry->value,
7291  entry->address,
7292  entry->action,
7293  kDontSaveFPRegs);
7294  stub.GetCode()->set_is_pregenerated(true);
7295  }
7296 }
7297 
7298 
7299 bool CodeStub::CanUseFPRegisters() {
7301 }
7302 
7303 
7304 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7305 // the value has just been written into the object, now this stub makes sure
7306 // we keep the GC informed. The word in the object where the value has been
7307 // written is in the address register.
7308 void RecordWriteStub::Generate(MacroAssembler* masm) {
7309  Label skip_to_incremental_noncompacting;
7310  Label skip_to_incremental_compacting;
7311 
7312  // The first two instructions are generated with labels so as to get the
7313  // offset fixed up correctly by the bind(Label*) call. We patch it back and
7314  // forth between a compare instructions (a nop in this position) and the
7315  // real branch when we start and stop incremental heap marking.
7316  // See RecordWriteStub::Patch for details.
7317  {
7318  // Block literal pool emission, as the position of these two instructions
7319  // is assumed by the patching code.
7320  Assembler::BlockConstPoolScope block_const_pool(masm);
7321  __ b(&skip_to_incremental_noncompacting);
7322  __ b(&skip_to_incremental_compacting);
7323  }
7324 
7325  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7326  __ RememberedSetHelper(object_,
7327  address_,
7328  value_,
7329  save_fp_regs_mode_,
7331  }
7332  __ Ret();
7333 
7334  __ bind(&skip_to_incremental_noncompacting);
7335  GenerateIncremental(masm, INCREMENTAL);
7336 
7337  __ bind(&skip_to_incremental_compacting);
7338  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7339 
7340  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7341  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7342  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
7343  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
7344  PatchBranchIntoNop(masm, 0);
7346 }
7347 
7348 
7349 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7350  regs_.Save(masm);
7351 
7352  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7353  Label dont_need_remembered_set;
7354 
7355  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7356  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7357  regs_.scratch0(),
7358  &dont_need_remembered_set);
7359 
7360  __ CheckPageFlag(regs_.object(),
7361  regs_.scratch0(),
7363  ne,
7364  &dont_need_remembered_set);
7365 
7366  // First notify the incremental marker if necessary, then update the
7367  // remembered set.
7368  CheckNeedsToInformIncrementalMarker(
7369  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7370  InformIncrementalMarker(masm, mode);
7371  regs_.Restore(masm);
7372  __ RememberedSetHelper(object_,
7373  address_,
7374  value_,
7375  save_fp_regs_mode_,
7377 
7378  __ bind(&dont_need_remembered_set);
7379  }
7380 
7381  CheckNeedsToInformIncrementalMarker(
7382  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7383  InformIncrementalMarker(masm, mode);
7384  regs_.Restore(masm);
7385  __ Ret();
7386 }
7387 
7388 
7389 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7390  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7391  int argument_count = 3;
7392  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7393  Register address =
7394  r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7395  ASSERT(!address.is(regs_.object()));
7396  ASSERT(!address.is(r0));
7397  __ Move(address, regs_.address());
7398  __ Move(r0, regs_.object());
7399  if (mode == INCREMENTAL_COMPACTION) {
7400  __ Move(r1, address);
7401  } else {
7402  ASSERT(mode == INCREMENTAL);
7403  __ ldr(r1, MemOperand(address, 0));
7404  }
7405  __ mov(r2, Operand(ExternalReference::isolate_address()));
7406 
7407  AllowExternalCallThatCantCauseGC scope(masm);
7408  if (mode == INCREMENTAL_COMPACTION) {
7409  __ CallCFunction(
7410  ExternalReference::incremental_evacuation_record_write_function(
7411  masm->isolate()),
7412  argument_count);
7413  } else {
7414  ASSERT(mode == INCREMENTAL);
7415  __ CallCFunction(
7416  ExternalReference::incremental_marking_record_write_function(
7417  masm->isolate()),
7418  argument_count);
7419  }
7420  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7421 }
7422 
7423 
7424 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7425  MacroAssembler* masm,
7426  OnNoNeedToInformIncrementalMarker on_no_need,
7427  Mode mode) {
7428  Label on_black;
7429  Label need_incremental;
7430  Label need_incremental_pop_scratch;
7431 
7432  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
7433  __ ldr(regs_.scratch1(),
7434  MemOperand(regs_.scratch0(),
7436  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
7437  __ str(regs_.scratch1(),
7438  MemOperand(regs_.scratch0(),
7440  __ b(mi, &need_incremental);
7441 
7442  // Let's look at the color of the object: If it is not black we don't have
7443  // to inform the incremental marker.
7444  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7445 
7446  regs_.Restore(masm);
7447  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7448  __ RememberedSetHelper(object_,
7449  address_,
7450  value_,
7451  save_fp_regs_mode_,
7453  } else {
7454  __ Ret();
7455  }
7456 
7457  __ bind(&on_black);
7458 
7459  // Get the value from the slot.
7460  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7461 
7462  if (mode == INCREMENTAL_COMPACTION) {
7463  Label ensure_not_white;
7464 
7465  __ CheckPageFlag(regs_.scratch0(), // Contains value.
7466  regs_.scratch1(), // Scratch.
7468  eq,
7469  &ensure_not_white);
7470 
7471  __ CheckPageFlag(regs_.object(),
7472  regs_.scratch1(), // Scratch.
7474  eq,
7475  &need_incremental);
7476 
7477  __ bind(&ensure_not_white);
7478  }
7479 
7480  // We need extra registers for this, so we push the object and the address
7481  // register temporarily.
7482  __ Push(regs_.object(), regs_.address());
7483  __ EnsureNotWhite(regs_.scratch0(), // The value.
7484  regs_.scratch1(), // Scratch.
7485  regs_.object(), // Scratch.
7486  regs_.address(), // Scratch.
7487  &need_incremental_pop_scratch);
7488  __ Pop(regs_.object(), regs_.address());
7489 
7490  regs_.Restore(masm);
7491  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7492  __ RememberedSetHelper(object_,
7493  address_,
7494  value_,
7495  save_fp_regs_mode_,
7497  } else {
7498  __ Ret();
7499  }
7500 
7501  __ bind(&need_incremental_pop_scratch);
7502  __ Pop(regs_.object(), regs_.address());
7503 
7504  __ bind(&need_incremental);
7505 
7506  // Fall through when we need to inform the incremental marker.
7507 }
7508 
7509 
7510 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7511  // ----------- S t a t e -------------
7512  // -- r0 : element value to store
7513  // -- r1 : array literal
7514  // -- r2 : map of array literal
7515  // -- r3 : element index as smi
7516  // -- r4 : array literal index in function as smi
7517  // -----------------------------------
7518 
7519  Label element_done;
7520  Label double_elements;
7521  Label smi_element;
7522  Label slow_elements;
7523  Label fast_elements;
7524 
7525  __ CheckFastElements(r2, r5, &double_elements);
7526  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
7527  __ JumpIfSmi(r0, &smi_element);
7528  __ CheckFastSmiElements(r2, r5, &fast_elements);
7529 
7530  // Store into the array literal requires a elements transition. Call into
7531  // the runtime.
7532  __ bind(&slow_elements);
7533  // call.
7534  __ Push(r1, r3, r0);
7537  __ Push(r5, r4);
7538  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7539 
7540  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7541  __ bind(&fast_elements);
7543  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7544  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7545  __ str(r0, MemOperand(r6, 0));
7546  // Update the write barrier for the array store.
7547  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
7549  __ Ret();
7550 
7551  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7552  // and value is Smi.
7553  __ bind(&smi_element);
7555  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7557  __ Ret();
7558 
7559  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7560  __ bind(&double_elements);
7562  __ StoreNumberToDoubleElements(r0, r3, r1,
7563  // Overwrites all regs after this.
7564  r5, r6, r7, r9, r2,
7565  &slow_elements);
7566  __ Ret();
7567 }
7568 
7569 
7570 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
7571  if (entry_hook_ != NULL) {
7572  PredictableCodeSizeScope predictable(masm);
7573  ProfileEntryHookStub stub;
7574  __ push(lr);
7575  __ CallStub(&stub);
7576  __ pop(lr);
7577  }
7578 }
7579 
7580 
7581 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
7582  // The entry hook is a "push lr" instruction, followed by a call.
7583  const int32_t kReturnAddressDistanceFromFunctionStart =
7585 
7586  // Save live volatile registers.
7587  __ Push(lr, r5, r1);
7588  const int32_t kNumSavedRegs = 3;
7589 
7590  // Compute the function's address for the first argument.
7591  __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
7592 
7593  // The caller's return address is above the saved temporaries.
7594  // Grab that for the second argument to the hook.
7595  __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
7596 
7597  // Align the stack if necessary.
7598  int frame_alignment = masm->ActivationFrameAlignment();
7599  if (frame_alignment > kPointerSize) {
7600  __ mov(r5, sp);
7601  ASSERT(IsPowerOf2(frame_alignment));
7602  __ and_(sp, sp, Operand(-frame_alignment));
7603  }
7604 
7605 #if defined(V8_HOST_ARCH_ARM)
7606  __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
7607  __ ldr(ip, MemOperand(ip));
7608 #else
7609  // Under the simulator we need to indirect the entry hook through a
7610  // trampoline function at a known address.
7611  Address trampoline_address = reinterpret_cast<Address>(
7612  reinterpret_cast<intptr_t>(EntryHookTrampoline));
7613  ApiFunction dispatcher(trampoline_address);
7614  __ mov(ip, Operand(ExternalReference(&dispatcher,
7615  ExternalReference::BUILTIN_CALL,
7616  masm->isolate())));
7617 #endif
7618  __ Call(ip);
7619 
7620  // Restore the stack pointer if needed.
7621  if (frame_alignment > kPointerSize) {
7622  __ mov(sp, r5);
7623  }
7624 
7625  __ Pop(lr, r5, r1);
7626  __ Ret();
7627 }
7628 
7629 #undef __
7630 
7631 } } // namespace v8::internal
7632 
7633 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:157
static const int kResourceDataOffset
Definition: objects.h:7747
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:5160
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static const int kNumRegisters
const intptr_t kSmiTagMask
Definition: v8.h:4016
static int GetBranchOffset(Instr instr)
static const int kCodeOffset
Definition: objects.h:5796
static const int kEvacuationCandidateMask
Definition: spaces.h:411
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
const RegList kCallerSaved
Definition: frames-arm.h:75
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:425
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
const Register r3
static const int kArgumentsObjectSize
Definition: heap.h:895
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:469
const int kFailureTypeTagSize
Definition: objects.h:1081
const Register cp
static const uint32_t kExponentMask
Definition: objects.h:1352
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2241
static Failure * InternalError()
Definition: objects-inl.h:1019
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void LoadNumberAsInt32Double(MacroAssembler *masm, Register object, Destination destination, DwVfpRegister double_dst, DwVfpRegister double_scratch, Register dst1, Register dst2, Register heap_number_map, Register scratch1, Register scratch2, SwVfpRegister single_scratch, Label *not_int32)
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:981
const DwVfpRegister d8
void Generate(MacroAssembler *masm)
static void DoubleIs32BitInteger(MacroAssembler *masm, Register src1, Register src2, Register dst, Register scratch, Label *not_int32)
const DwVfpRegister d5
static const int kOptimizedCodeMapOffset
Definition: objects.h:5797
const DwVfpRegister d0
static const int kDataOffset
Definition: objects.h:6624
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:994
const Register r6
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, int flags)
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1029
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static const intptr_t kPageAlignmentMask
Definition: spaces.h:720
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5339
SwVfpRegister high() const
static bool IsSupported(CpuFeature f)
static Failure * Exception()
Definition: objects-inl.h:1024
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
const DwVfpRegister d6
#define ASSERT(condition)
Definition: checks.h:270
static void LoadOperands(MacroAssembler *masm, FloatingPointHelper::Destination destination, Register heap_number_map, Register scratch1, Register scratch2, Label *not_number)
friend class BlockConstPoolScope
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5344
#define kFirstCalleeSavedDoubleReg
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2306
const uint32_t kStringRepresentationMask
Definition: objects.h:474
MemOperand GlobalObjectOperand()
const Register r2
static DwVfpRegister from_code(int code)
static const int kSize
Definition: objects.h:8355
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
const uint32_t kAsciiDataHintTag
Definition: objects.h:498
const uint32_t kShortExternalStringMask
Definition: objects.h:502
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:191
ProfileEntryHookStub()
Definition: code-stubs.h:1161
const int kIntSize
Definition: globals.h:217
static const int kZeroHash
Definition: objects.h:7017
#define V8_INFINITY
Definition: globals.h:32
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7319
static const int kSize
Definition: objects.h:8333
static const int kLastCaptureCountOffset
Definition: jsregexp.h:189
static const int kFirstOffset
Definition: objects.h:7653
static const int kMinLength
Definition: objects.h:7666
const int kNumDoubleCalleeSaved
Definition: frames-arm.h:86
const uint32_t kNotStringTag
Definition: objects.h:457
const Register sp
static const int kParentOffset
Definition: objects.h:7705
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1359
static const int kLiteralsOffset
Definition: objects.h:6188
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
static const int kArgumentsObjectSizeStrict
Definition: heap.h:898
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static int ActivationFrameAlignment()
static const int kLengthOffset
Definition: objects.h:7318
const uint32_t kIsSymbolMask
Definition: objects.h:462
static const int kExponentShift
Definition: objects.h:1357
const intptr_t kFailureTagMask
Definition: v8globals.h:64
static const int kValueOffset
Definition: objects.h:1342
const int kFailureTagSize
Definition: v8globals.h:63
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:218
#define kLastCalleeSavedDoubleReg
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6670
static const int kInputOffset
Definition: objects.h:8354
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:504
static bool IsBitOp(Value op)
Definition: token.h:256
const SwVfpRegister s13
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const Register ip
void Generate(MacroAssembler *masm)
const Register r9
const int kPointerSize
Definition: globals.h:220
static void LoadSmis(MacroAssembler *masm, Destination destination, Register scratch1, Register scratch2)
static void CallCCodeForDoubleOperation(MacroAssembler *masm, Token::Value op, Register heap_number_result, Register scratch)
static const int kPcLoadDelta
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5177
static void MaybeCallEntryHook(MacroAssembler *masm)
const DwVfpRegister d7
const int kHeapObjectTag
Definition: v8.h:4009
const RegList kCalleeSaved
Definition: frames-arm.h:63
const uint32_t kAsciiDataHintMask
Definition: objects.h:497
#define __
const Register pc
static void ConvertNumberToInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2171
static void LoadNumberAsInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch0, DwVfpRegister double_scratch1, Label *not_int32)
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static const int kMinLength
Definition: objects.h:7717
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:503
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const DwVfpRegister d3
static const int kHeaderSize
Definition: objects.h:7517
static const int kNextFunctionLinkOffset
Definition: objects.h:6190
void Generate(MacroAssembler *masm)
const int kBitsPerByte
Definition: globals.h:237
static int SizeFor(int length)
Definition: objects.h:2434
const Register r0
static const int kElementsOffset
Definition: objects.h:2172
bool IsPowerOf2(T x)
Definition: utils.h:50
const uint32_t kStringTag
Definition: objects.h:456
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7706
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
Definition: jsregexp.h:186
static const int kHeaderSize
Definition: objects.h:2296
const intptr_t kPointerAlignmentMask
Definition: v8globals.h:49
void Generate(MacroAssembler *masm)
const Register lr
#define ISOLATE
Definition: isolate.h:1435
void GenerateCall(MacroAssembler *masm, ExternalReference function)
static const int kMapOffset
Definition: objects.h:1261
static const int kMantissaBitsInTopWord
Definition: objects.h:1358
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:414
const uint32_t kIsNotStringMask
Definition: objects.h:455
const int kNumCalleeSaved
Definition: frames-arm.h:83
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:492
static const int kLengthOffset
Definition: objects.h:2295
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static const int kSize
Definition: objects.h:1350
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
#define kDoubleRegZero
static const int kSecondOffset
Definition: objects.h:7654
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kCallerFPOffset
Definition: frames-arm.h:117
static const int kArgumentsLengthIndex
Definition: heap.h:901
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
Definition: v8globals.h:44
static const int kFirstCaptureOffset
Definition: jsregexp.h:195
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7345
static const uint32_t kSignMask
Definition: objects.h:1351
static const int kLastInputOffset
Definition: jsregexp.h:193
const int kSmiShiftSize
Definition: v8.h:4060
const int kSmiTagSize
Definition: v8.h:4015
static const int kHeaderSize
Definition: objects.h:4549
const Register r8
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6666
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static void GenerateAheadOfTime()
static const uint32_t kMantissaMask
Definition: objects.h:1353
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
const DwVfpRegister d2
static const int kArgumentsCalleeIndex
Definition: heap.h:903
const int kSmiTag
Definition: v8.h:4014
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
void Generate(MacroAssembler *masm)
static const int kEntryLength
Definition: objects.h:5403
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:62
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kInstrSize
static const int kDataTagOffset
Definition: objects.h:6664
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
static const int kSize
Definition: objects.h:6191
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
Definition: objects.h:7386
static const int kValueOffset
Definition: objects.h:6385
Operand SmiUntagOperand(Register object)
const DwVfpRegister d1
bool Contains(Type type) const
Definition: code-stubs.h:1055
const uint32_t kSymbolTag
Definition: objects.h:464
const Register fp
static const int kNativeContextOffset
Definition: objects.h:6286
const uint32_t kAsciiStringTag
Definition: objects.h:470
static const int kConstructStubOffset
Definition: objects.h:5799
static const int kExponentBits
Definition: objects.h:1355
static const int kHashShift
Definition: objects.h:7341
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:5161
void Generate(MacroAssembler *masm)
const SwVfpRegister s15
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:630
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1348
static const int kDataUC16CodeOffset
Definition: objects.h:6668
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
const Register r5
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:468
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kIndexOffset
Definition: objects.h:8353
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
Definition: objects.h:1347
const Register r4
const Register r7
void Generate(MacroAssembler *masm)