v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_MIPS)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "codegen.h"
35 #include "regexp-macro-assembler.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 #define __ ACCESS_MASM(masm)
42 
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44  Label* slow,
45  Condition cc,
46  bool never_nan_nan);
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48  Register lhs,
49  Register rhs,
50  Label* rhs_not_nan,
51  Label* slow,
52  bool strict);
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55  Register lhs,
56  Register rhs);
57 
58 
59 // Check if the operand is a heap number.
60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61  Register scratch1, Register scratch2,
62  Label* not_a_heap_number) {
63  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66 }
67 
68 
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70  // The ToNumber stub takes one argument in a0.
71  Label check_heap_number, call_builtin;
72  __ JumpIfNotSmi(a0, &check_heap_number);
73  __ Ret(USE_DELAY_SLOT);
74  __ mov(v0, a0);
75 
76  __ bind(&check_heap_number);
77  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78  __ Ret(USE_DELAY_SLOT);
79  __ mov(v0, a0);
80 
81  __ bind(&call_builtin);
82  __ push(a0);
83  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
84 }
85 
86 
87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88  // Create a new closure from the given function info in new
89  // space. Set the context to the current context in cp.
90  Counters* counters = masm->isolate()->counters();
91 
92  Label gc;
93 
94  // Pop the function info from the stack.
95  __ pop(a3);
96 
97  // Attempt to allocate new JSFunction in new space.
98  __ AllocateInNewSpace(JSFunction::kSize,
99  v0,
100  a1,
101  a2,
102  &gc,
103  TAG_OBJECT);
104 
105  __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
106 
107  int map_index = (language_mode_ == CLASSIC_MODE)
110 
111  // Compute the function map in the current native context and set that
112  // as the map of the allocated object.
115  __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
117 
118  // Initialize the rest of the function. We don't have to update the
119  // write barrier because the allocated object is in new space.
120  __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
121  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
128 
129  // Initialize the code pointer in the function to be the one
130  // found in the shared function info object.
131  // But first check if there is an optimized version for our context.
132  Label check_optimized;
133  Label install_unoptimized;
134  if (FLAG_cache_optimized_code) {
135  __ lw(a1,
137  __ And(at, a1, a1);
138  __ Branch(&check_optimized, ne, at, Operand(zero_reg));
139  }
140  __ bind(&install_unoptimized);
141  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
144  __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
145 
146  // Return result. The argument function info has been popped already.
148  __ Ret();
149 
150  __ bind(&check_optimized);
151 
152  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
153 
154  // a2 holds native context, a1 points to fixed array of 3-element entries
155  // (native context, optimized code, literals).
156  // The optimized code map must never be empty, so check the first elements.
157  Label install_optimized;
158  // Speculatively move code object into t0.
161  __ Branch(&install_optimized, eq, a2, Operand(t1));
162 
163  // Iterate through the rest of map backwards. t0 holds an index as a Smi.
164  Label loop;
166  __ bind(&loop);
167  // Do not double check first entry.
168 
169  __ Branch(&install_unoptimized, eq, t0,
171  __ Subu(t0, t0, Operand(
172  Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
173  __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
174  __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
175  __ Addu(t1, t1, Operand(at));
176  __ lw(t1, MemOperand(t1));
177  __ Branch(&loop, ne, a2, Operand(t1));
178  // Hit: fetch the optimized code.
179  __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
180  __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
181  __ Addu(t1, t1, Operand(at));
182  __ Addu(t1, t1, Operand(kPointerSize));
183  __ lw(t0, MemOperand(t1));
184 
185  __ bind(&install_optimized);
186  __ IncrementCounter(counters->fast_new_closure_install_optimized(),
187  1, t2, t3);
188 
189  // TODO(fschneider): Idea: store proper code pointers in the map and either
190  // unmangle them on marking or do nothing as the whole map is discarded on
191  // major GC anyway.
192  __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
194 
195  // Now link a function into a list of optimized functions.
197 
199  // No need for write barrier as JSFunction (eax) is in the new space.
200 
202  // Store JSFunction (eax) into edx before issuing write barrier as
203  // it clobbers all the registers passed.
204  __ mov(t0, v0);
205  __ RecordWriteContextSlot(
206  a2,
208  t0,
209  a1,
212 
213  // Return result. The argument function info has been popped already.
214  __ Ret();
215 
216  // Create a new closure through the slower runtime call.
217  __ bind(&gc);
218  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
219  __ Push(cp, a3, t0);
220  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
221 }
222 
223 
224 void FastNewContextStub::Generate(MacroAssembler* masm) {
225  // Try to allocate the context in new space.
226  Label gc;
227  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
228 
229  // Attempt to allocate the context in new space.
230  __ AllocateInNewSpace(FixedArray::SizeFor(length),
231  v0,
232  a1,
233  a2,
234  &gc,
235  TAG_OBJECT);
236 
237  // Load the function from the stack.
238  __ lw(a3, MemOperand(sp, 0));
239 
240  // Set up the object header.
241  __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
242  __ li(a2, Operand(Smi::FromInt(length)));
245 
246  // Set up the fixed slots, copy the global object from the previous context.
248  __ li(a1, Operand(Smi::FromInt(0)));
253 
254  // Initialize the rest of the slots to undefined.
255  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
256  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
257  __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
258  }
259 
260  // Remove the on-stack argument and return.
261  __ mov(cp, v0);
262  __ DropAndRet(1);
263 
264  // Need to collect. Call into runtime system.
265  __ bind(&gc);
266  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
267 }
268 
269 
270 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
271  // Stack layout on entry:
272  //
273  // [sp]: function.
274  // [sp + kPointerSize]: serialized scope info
275 
276  // Try to allocate the context in new space.
277  Label gc;
278  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
279  __ AllocateInNewSpace(FixedArray::SizeFor(length),
280  v0, a1, a2, &gc, TAG_OBJECT);
281 
282  // Load the function from the stack.
283  __ lw(a3, MemOperand(sp, 0));
284 
285  // Load the serialized scope info from the stack.
286  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
287 
288  // Set up the object header.
289  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
291  __ li(a2, Operand(Smi::FromInt(length)));
293 
294  // If this block context is nested in the native context we get a smi
295  // sentinel instead of a function. The block context should get the
296  // canonical empty function of the native context as its closure which
297  // we still have to look up.
298  Label after_sentinel;
299  __ JumpIfNotSmi(a3, &after_sentinel);
300  if (FLAG_debug_code) {
301  const char* message = "Expected 0 as a Smi sentinel";
302  __ Assert(eq, message, a3, Operand(zero_reg));
303  }
304  __ lw(a3, GlobalObjectOperand());
307  __ bind(&after_sentinel);
308 
309  // Set up the fixed slots, copy the global object from the previous context.
315 
316  // Initialize the rest of the slots to the hole value.
317  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
318  for (int i = 0; i < slots_; i++) {
319  __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
320  }
321 
322  // Remove the on-stack argument and return.
323  __ mov(cp, v0);
324  __ DropAndRet(2);
325 
326  // Need to collect. Call into runtime system.
327  __ bind(&gc);
328  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
329 }
330 
331 
332 static void GenerateFastCloneShallowArrayCommon(
333  MacroAssembler* masm,
334  int length,
336  Label* fail) {
337  // Registers on entry:
338  // a3: boilerplate literal array.
340 
341  // All sizes here are multiples of kPointerSize.
342  int elements_size = 0;
343  if (length > 0) {
345  ? FixedDoubleArray::SizeFor(length)
346  : FixedArray::SizeFor(length);
347  }
348  int size = JSArray::kSize + elements_size;
349 
350  // Allocate both the JS array and the elements array in one big
351  // allocation. This avoids multiple limit checks.
352  __ AllocateInNewSpace(size,
353  v0,
354  a1,
355  a2,
356  fail,
357  TAG_OBJECT);
358 
359  // Copy the JS array part.
360  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
361  if ((i != JSArray::kElementsOffset) || (length == 0)) {
362  __ lw(a1, FieldMemOperand(a3, i));
363  __ sw(a1, FieldMemOperand(v0, i));
364  }
365  }
366 
367  if (length > 0) {
368  // Get hold of the elements array of the boilerplate and setup the
369  // elements pointer in the resulting object.
371  __ Addu(a2, v0, Operand(JSArray::kSize));
373 
374  // Copy the elements array.
375  ASSERT((elements_size % kPointerSize) == 0);
376  __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
377  }
378 }
379 
380 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
381  // Stack layout on entry:
382  //
383  // [sp]: constant elements.
384  // [sp + kPointerSize]: literal index.
385  // [sp + (2 * kPointerSize)]: literals array.
386 
387  // Load boilerplate object into r3 and check if we need to create a
388  // boilerplate.
389  Label slow_case;
390  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
391  __ lw(a0, MemOperand(sp, 1 * kPointerSize));
392  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
393  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
394  __ Addu(t0, a3, t0);
395  __ lw(a3, MemOperand(t0));
396  __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
397  __ Branch(&slow_case, eq, a3, Operand(t1));
398 
399  FastCloneShallowArrayStub::Mode mode = mode_;
400  if (mode == CLONE_ANY_ELEMENTS) {
401  Label double_elements, check_fast_elements;
404  __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
405  __ Branch(&check_fast_elements, ne, v0, Operand(t1));
406  GenerateFastCloneShallowArrayCommon(masm, 0,
407  COPY_ON_WRITE_ELEMENTS, &slow_case);
408  // Return and remove the on-stack parameters.
409  __ DropAndRet(3);
410 
411  __ bind(&check_fast_elements);
412  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
413  __ Branch(&double_elements, ne, v0, Operand(t1));
414  GenerateFastCloneShallowArrayCommon(masm, length_,
415  CLONE_ELEMENTS, &slow_case);
416  // Return and remove the on-stack parameters.
417  __ DropAndRet(3);
418 
419  __ bind(&double_elements);
420  mode = CLONE_DOUBLE_ELEMENTS;
421  // Fall through to generate the code to handle double elements.
422  }
423 
424  if (FLAG_debug_code) {
425  const char* message;
426  Heap::RootListIndex expected_map_index;
427  if (mode == CLONE_ELEMENTS) {
428  message = "Expected (writable) fixed array";
429  expected_map_index = Heap::kFixedArrayMapRootIndex;
430  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
431  message = "Expected (writable) fixed double array";
432  expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
433  } else {
435  message = "Expected copy-on-write fixed array";
436  expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
437  }
438  __ push(a3);
441  __ LoadRoot(at, expected_map_index);
442  __ Assert(eq, message, a3, Operand(at));
443  __ pop(a3);
444  }
445 
446  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
447 
448  // Return and remove the on-stack parameters.
449  __ DropAndRet(3);
450 
451  __ bind(&slow_case);
452  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
453 }
454 
455 
456 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
457  // Stack layout on entry:
458  //
459  // [sp]: object literal flags.
460  // [sp + kPointerSize]: constant properties.
461  // [sp + (2 * kPointerSize)]: literal index.
462  // [sp + (3 * kPointerSize)]: literals array.
463 
464  // Load boilerplate object into a3 and check if we need to create a
465  // boilerplate.
466  Label slow_case;
467  __ lw(a3, MemOperand(sp, 3 * kPointerSize));
468  __ lw(a0, MemOperand(sp, 2 * kPointerSize));
469  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
470  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
471  __ Addu(a3, t0, a3);
472  __ lw(a3, MemOperand(a3));
473  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
474  __ Branch(&slow_case, eq, a3, Operand(t0));
475 
476  // Check that the boilerplate contains only fast properties and we can
477  // statically determine the instance size.
478  int size = JSObject::kHeaderSize + length_ * kPointerSize;
481  __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
482 
483  // Allocate the JS object and copy header together with all in-object
484  // properties from the boilerplate.
485  __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
486  for (int i = 0; i < size; i += kPointerSize) {
487  __ lw(a1, FieldMemOperand(a3, i));
488  __ sw(a1, FieldMemOperand(v0, i));
489  }
490 
491  // Return and remove the on-stack parameters.
492  __ DropAndRet(4);
493 
494  __ bind(&slow_case);
495  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
496 }
497 
498 
499 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
500 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
501 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
502 // scratch register. Destroys the source register. No GC occurs during this
503 // stub so you don't have to set up the frame.
504 class ConvertToDoubleStub : public CodeStub {
505  public:
506  ConvertToDoubleStub(Register result_reg_1,
507  Register result_reg_2,
508  Register source_reg,
509  Register scratch_reg)
510  : result1_(result_reg_1),
511  result2_(result_reg_2),
512  source_(source_reg),
513  zeros_(scratch_reg) { }
514 
515  private:
516  Register result1_;
517  Register result2_;
518  Register source_;
519  Register zeros_;
520 
521  // Minor key encoding in 16 bits.
522  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
523  class OpBits: public BitField<Token::Value, 2, 14> {};
524 
525  Major MajorKey() { return ConvertToDouble; }
526  int MinorKey() {
527  // Encode the parameters in a unique 16 bit value.
528  return result1_.code() +
529  (result2_.code() << 4) +
530  (source_.code() << 8) +
531  (zeros_.code() << 12);
532  }
533 
534  void Generate(MacroAssembler* masm);
535 };
536 
537 
538 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
539 #ifndef BIG_ENDIAN_FLOATING_POINT
540  Register exponent = result1_;
541  Register mantissa = result2_;
542 #else
543  Register exponent = result2_;
544  Register mantissa = result1_;
545 #endif
546  Label not_special;
547  // Convert from Smi to integer.
548  __ sra(source_, source_, kSmiTagSize);
549  // Move sign bit from source to destination. This works because the sign bit
550  // in the exponent word of the double has the same position and polarity as
551  // the 2's complement sign bit in a Smi.
552  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
553  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
554  // Subtract from 0 if source was negative.
555  __ subu(at, zero_reg, source_);
556  __ Movn(source_, at, exponent);
557 
558  // We have -1, 0 or 1, which we treat specially. Register source_ contains
559  // absolute value: it is either equal to 1 (special case of -1 and 1),
560  // greater than 1 (not a special case) or less than 1 (special case of 0).
561  __ Branch(&not_special, gt, source_, Operand(1));
562 
563  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
564  const uint32_t exponent_word_for_1 =
566  // Safe to use 'at' as dest reg here.
567  __ Or(at, exponent, Operand(exponent_word_for_1));
568  __ Movn(exponent, at, source_); // Write exp when source not 0.
569  // 1, 0 and -1 all have 0 for the second word.
570  __ Ret(USE_DELAY_SLOT);
571  __ mov(mantissa, zero_reg);
572 
573  __ bind(&not_special);
574  // Count leading zeros.
575  // Gets the wrong answer for 0, but we already checked for that case above.
576  __ Clz(zeros_, source_);
577  // Compute exponent and or it into the exponent register.
578  // We use mantissa as a scratch register here.
579  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
580  __ subu(mantissa, mantissa, zeros_);
581  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
582  __ Or(exponent, exponent, mantissa);
583 
584  // Shift up the source chopping the top bit off.
585  __ Addu(zeros_, zeros_, Operand(1));
586  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
587  __ sllv(source_, source_, zeros_);
588  // Compute lower part of fraction (last 12 bits).
589  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
590  // And the top (top 20 bits).
591  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
592 
593  __ Ret(USE_DELAY_SLOT);
594  __ or_(exponent, exponent, source_);
595 }
596 
597 
598 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
600  Register scratch1,
601  Register scratch2) {
603  CpuFeatures::Scope scope(FPU);
604  __ sra(scratch1, a0, kSmiTagSize);
605  __ mtc1(scratch1, f14);
606  __ cvt_d_w(f14, f14);
607  __ sra(scratch1, a1, kSmiTagSize);
608  __ mtc1(scratch1, f12);
609  __ cvt_d_w(f12, f12);
610  if (destination == kCoreRegisters) {
611  __ Move(a2, a3, f14);
612  __ Move(a0, a1, f12);
613  }
614  } else {
615  ASSERT(destination == kCoreRegisters);
616  // Write Smi from a0 to a3 and a2 in double format.
617  __ mov(scratch1, a0);
618  ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
619  __ push(ra);
620  __ Call(stub1.GetCode());
621  // Write Smi from a1 to a1 and a0 in double format.
622  __ mov(scratch1, a1);
623  ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
624  __ Call(stub2.GetCode());
625  __ pop(ra);
626  }
627 }
628 
629 
631  MacroAssembler* masm,
633  Register heap_number_map,
634  Register scratch1,
635  Register scratch2,
636  Label* slow) {
637 
638  // Load right operand (a0) to f12 or a2/a3.
639  LoadNumber(masm, destination,
640  a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
641 
642  // Load left operand (a1) to f14 or a0/a1.
643  LoadNumber(masm, destination,
644  a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
645 }
646 
647 
648 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
649  Destination destination,
650  Register object,
651  FPURegister dst,
652  Register dst1,
653  Register dst2,
654  Register heap_number_map,
655  Register scratch1,
656  Register scratch2,
657  Label* not_number) {
658  __ AssertRootValue(heap_number_map,
659  Heap::kHeapNumberMapRootIndex,
660  "HeapNumberMap register clobbered.");
661 
662  Label is_smi, done;
663 
664  // Smi-check
665  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
666  // Heap number check
667  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
668 
669  // Handle loading a double from a heap number.
671  destination == kFPURegisters) {
672  CpuFeatures::Scope scope(FPU);
673  // Load the double from tagged HeapNumber to double register.
674 
675  // ARM uses a workaround here because of the unaligned HeapNumber
676  // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
677  // point in generating even more instructions.
678  __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
679  } else {
680  ASSERT(destination == kCoreRegisters);
681  // Load the double from heap number to dst1 and dst2 in double format.
682  __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
683  __ lw(dst2, FieldMemOperand(object,
684  HeapNumber::kValueOffset + kPointerSize));
685  }
686  __ Branch(&done);
687 
688  // Handle loading a double from a smi.
689  __ bind(&is_smi);
691  CpuFeatures::Scope scope(FPU);
692  // Convert smi to double using FPU instructions.
693  __ mtc1(scratch1, dst);
694  __ cvt_d_w(dst, dst);
695  if (destination == kCoreRegisters) {
696  // Load the converted smi to dst1 and dst2 in double format.
697  __ Move(dst1, dst2, dst);
698  }
699  } else {
700  ASSERT(destination == kCoreRegisters);
701  // Write smi to dst1 and dst2 double format.
702  __ mov(scratch1, object);
703  ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
704  __ push(ra);
705  __ Call(stub.GetCode());
706  __ pop(ra);
707  }
708 
709  __ bind(&done);
710 }
711 
712 
713 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
714  Register object,
715  Register dst,
716  Register heap_number_map,
717  Register scratch1,
718  Register scratch2,
719  Register scratch3,
720  FPURegister double_scratch,
721  Label* not_number) {
722  __ AssertRootValue(heap_number_map,
723  Heap::kHeapNumberMapRootIndex,
724  "HeapNumberMap register clobbered.");
725  Label done;
726  Label not_in_int32_range;
727 
728  __ UntagAndJumpIfSmi(dst, object, &done);
729  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
730  __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
731  __ ConvertToInt32(object,
732  dst,
733  scratch1,
734  scratch2,
735  double_scratch,
736  &not_in_int32_range);
737  __ jmp(&done);
738 
739  __ bind(&not_in_int32_range);
740  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
741  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
742 
743  __ EmitOutOfInt32RangeTruncate(dst,
744  scratch1,
745  scratch2,
746  scratch3);
747 
748  __ bind(&done);
749 }
750 
751 
752 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
753  Register int_scratch,
754  Destination destination,
755  FPURegister double_dst,
756  Register dst1,
757  Register dst2,
758  Register scratch2,
759  FPURegister single_scratch) {
760  ASSERT(!int_scratch.is(scratch2));
761  ASSERT(!int_scratch.is(dst1));
762  ASSERT(!int_scratch.is(dst2));
763 
764  Label done;
765 
767  CpuFeatures::Scope scope(FPU);
768  __ mtc1(int_scratch, single_scratch);
769  __ cvt_d_w(double_dst, single_scratch);
770  if (destination == kCoreRegisters) {
771  __ Move(dst1, dst2, double_dst);
772  }
773  } else {
774  Label fewer_than_20_useful_bits;
775  // Expected output:
776  // | dst2 | dst1 |
777  // | s | exp | mantissa |
778 
779  // Check for zero.
780  __ mov(dst2, int_scratch);
781  __ mov(dst1, int_scratch);
782  __ Branch(&done, eq, int_scratch, Operand(zero_reg));
783 
784  // Preload the sign of the value.
785  __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
786  // Get the absolute value of the object (as an unsigned integer).
787  Label skip_sub;
788  __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
789  __ Subu(int_scratch, zero_reg, int_scratch);
790  __ bind(&skip_sub);
791 
792  // Get mantissa[51:20].
793 
794  // Get the position of the first set bit.
795  __ Clz(dst1, int_scratch);
796  __ li(scratch2, 31);
797  __ Subu(dst1, scratch2, dst1);
798 
799  // Set the exponent.
800  __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
801  __ Ins(dst2, scratch2,
802  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
803 
804  // Clear the first non null bit.
805  __ li(scratch2, Operand(1));
806  __ sllv(scratch2, scratch2, dst1);
807  __ li(at, -1);
808  __ Xor(scratch2, scratch2, at);
809  __ And(int_scratch, int_scratch, scratch2);
810 
811  // Get the number of bits to set in the lower part of the mantissa.
812  __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
813  __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
814  // Set the higher 20 bits of the mantissa.
815  __ srlv(at, int_scratch, scratch2);
816  __ or_(dst2, dst2, at);
817  __ li(at, 32);
818  __ subu(scratch2, at, scratch2);
819  __ sllv(dst1, int_scratch, scratch2);
820  __ Branch(&done);
821 
822  __ bind(&fewer_than_20_useful_bits);
824  __ subu(scratch2, at, dst1);
825  __ sllv(scratch2, int_scratch, scratch2);
826  __ Or(dst2, dst2, scratch2);
827  // Set dst1 to 0.
828  __ mov(dst1, zero_reg);
829  }
830  __ bind(&done);
831 }
832 
833 
834 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
835  Register object,
836  Destination destination,
837  DoubleRegister double_dst,
838  Register dst1,
839  Register dst2,
840  Register heap_number_map,
841  Register scratch1,
842  Register scratch2,
843  FPURegister single_scratch,
844  Label* not_int32) {
845  ASSERT(!scratch1.is(object) && !scratch2.is(object));
846  ASSERT(!scratch1.is(scratch2));
847  ASSERT(!heap_number_map.is(object) &&
848  !heap_number_map.is(scratch1) &&
849  !heap_number_map.is(scratch2));
850 
851  Label done, obj_is_not_smi;
852 
853  __ JumpIfNotSmi(object, &obj_is_not_smi);
854  __ SmiUntag(scratch1, object);
855  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
856  scratch2, single_scratch);
857  __ Branch(&done);
858 
859  __ bind(&obj_is_not_smi);
860  __ AssertRootValue(heap_number_map,
861  Heap::kHeapNumberMapRootIndex,
862  "HeapNumberMap register clobbered.");
863  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
864 
865  // Load the number.
867  CpuFeatures::Scope scope(FPU);
868  // Load the double value.
869  __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
870 
871  Register except_flag = scratch2;
872  __ EmitFPUTruncate(kRoundToZero,
873  single_scratch,
874  double_dst,
875  scratch1,
876  except_flag,
878 
879  // Jump to not_int32 if the operation did not succeed.
880  __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
881 
882  if (destination == kCoreRegisters) {
883  __ Move(dst1, dst2, double_dst);
884  }
885 
886  } else {
887  ASSERT(!scratch1.is(object) && !scratch2.is(object));
888  // Load the double value in the destination registers.
891 
892  // Check for 0 and -0.
893  __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
894  __ Or(scratch1, scratch1, Operand(dst2));
895  __ Branch(&done, eq, scratch1, Operand(zero_reg));
896 
897  // Check that the value can be exactly represented by a 32-bit integer.
898  // Jump to not_int32 if that's not the case.
899  DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
900 
901  // dst1 and dst2 were trashed. Reload the double value.
904  }
905 
906  __ bind(&done);
907 }
908 
909 
910 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
911  Register object,
912  Register dst,
913  Register heap_number_map,
914  Register scratch1,
915  Register scratch2,
916  Register scratch3,
917  DoubleRegister double_scratch,
918  Label* not_int32) {
919  ASSERT(!dst.is(object));
920  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
921  ASSERT(!scratch1.is(scratch2) &&
922  !scratch1.is(scratch3) &&
923  !scratch2.is(scratch3));
924 
925  Label done;
926 
927  __ UntagAndJumpIfSmi(dst, object, &done);
928 
929  __ AssertRootValue(heap_number_map,
930  Heap::kHeapNumberMapRootIndex,
931  "HeapNumberMap register clobbered.");
932  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
933 
934  // Object is a heap number.
935  // Convert the floating point value to a 32-bit integer.
937  CpuFeatures::Scope scope(FPU);
938  // Load the double value.
939  __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
940 
941  FPURegister single_scratch = double_scratch.low();
942  Register except_flag = scratch2;
943  __ EmitFPUTruncate(kRoundToZero,
944  single_scratch,
945  double_scratch,
946  scratch1,
947  except_flag,
949 
950  // Jump to not_int32 if the operation did not succeed.
951  __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
952  // Get the result in the destination register.
953  __ mfc1(dst, single_scratch);
954 
955  } else {
956  // Load the double value in the destination registers.
957  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
958  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
959 
960  // Check for 0 and -0.
961  __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
962  __ Or(dst, scratch2, Operand(dst));
963  __ Branch(&done, eq, dst, Operand(zero_reg));
964 
965  DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
966 
967  // Registers state after DoubleIs32BitInteger.
968  // dst: mantissa[51:20].
969  // scratch2: 1
970 
971  // Shift back the higher bits of the mantissa.
972  __ srlv(dst, dst, scratch3);
973  // Set the implicit first bit.
974  __ li(at, 32);
975  __ subu(scratch3, at, scratch3);
976  __ sllv(scratch2, scratch2, scratch3);
977  __ Or(dst, dst, scratch2);
978  // Set the sign.
979  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
980  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
981  Label skip_sub;
982  __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
983  __ Subu(dst, zero_reg, dst);
984  __ bind(&skip_sub);
985  }
986 
987  __ bind(&done);
988 }
989 
990 
991 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
992  Register src1,
993  Register src2,
994  Register dst,
995  Register scratch,
996  Label* not_int32) {
997  // Get exponent alone in scratch.
998  __ Ext(scratch,
999  src1,
1000  HeapNumber::kExponentShift,
1002 
1003  // Substract the bias from the exponent.
1004  __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
1005 
1006  // src1: higher (exponent) part of the double value.
1007  // src2: lower (mantissa) part of the double value.
1008  // scratch: unbiased exponent.
1009 
1010  // Fast cases. Check for obvious non 32-bit integer values.
1011  // Negative exponent cannot yield 32-bit integers.
1012  __ Branch(not_int32, lt, scratch, Operand(zero_reg));
1013  // Exponent greater than 31 cannot yield 32-bit integers.
1014  // Also, a positive value with an exponent equal to 31 is outside of the
1015  // signed 32-bit integer range.
1016  // Another way to put it is that if (exponent - signbit) > 30 then the
1017  // number cannot be represented as an int32.
1018  Register tmp = dst;
1019  __ srl(at, src1, 31);
1020  __ subu(tmp, scratch, at);
1021  __ Branch(not_int32, gt, tmp, Operand(30));
1022  // - Bits [21:0] in the mantissa are not null.
1023  __ And(tmp, src2, 0x3fffff);
1024  __ Branch(not_int32, ne, tmp, Operand(zero_reg));
1025 
1026  // Otherwise the exponent needs to be big enough to shift left all the
1027  // non zero bits left. So we need the (30 - exponent) last bits of the
1028  // 31 higher bits of the mantissa to be null.
1029  // Because bits [21:0] are null, we can check instead that the
1030  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
1031 
1032  // Get the 32 higher bits of the mantissa in dst.
1033  __ Ext(dst,
1034  src2,
1038  __ or_(dst, dst, at);
1039 
1040  // Create the mask and test the lower bits (of the higher bits).
1041  __ li(at, 32);
1042  __ subu(scratch, at, scratch);
1043  __ li(src2, 1);
1044  __ sllv(src1, src2, scratch);
1045  __ Subu(src1, src1, Operand(1));
1046  __ And(src1, dst, src1);
1047  __ Branch(not_int32, ne, src1, Operand(zero_reg));
1048 }
1049 
1050 
1052  MacroAssembler* masm,
1053  Token::Value op,
1054  Register heap_number_result,
1055  Register scratch) {
1056  // Using core registers:
1057  // a0: Left value (least significant part of mantissa).
1058  // a1: Left value (sign, exponent, top of mantissa).
1059  // a2: Right value (least significant part of mantissa).
1060  // a3: Right value (sign, exponent, top of mantissa).
1061 
1062  // Assert that heap_number_result is saved.
1063  // We currently always use s0 to pass it.
1064  ASSERT(heap_number_result.is(s0));
1065 
1066  // Push the current return address before the C call.
1067  __ push(ra);
1068  __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
1069  if (!IsMipsSoftFloatABI) {
1070  CpuFeatures::Scope scope(FPU);
1071  // We are not using MIPS FPU instructions, and parameters for the runtime
1072  // function call are prepaired in a0-a3 registers, but function we are
1073  // calling is compiled with hard-float flag and expecting hard float ABI
1074  // (parameters in f12/f14 registers). We need to copy parameters from
1075  // a0-a3 registers to f12/f14 register pairs.
1076  __ Move(f12, a0, a1);
1077  __ Move(f14, a2, a3);
1078  }
1079  {
1080  AllowExternalCallThatCantCauseGC scope(masm);
1081  __ CallCFunction(
1082  ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1083  }
1084  // Store answer in the overwritable heap number.
1085  if (!IsMipsSoftFloatABI) {
1086  CpuFeatures::Scope scope(FPU);
1087  // Double returned in register f0.
1088  __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1089  } else {
1090  // Double returned in registers v0 and v1.
1091  __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1092  __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1093  }
1094  // Place heap_number_result in v0 and return to the pushed return address.
1095  __ pop(ra);
1096  __ Ret(USE_DELAY_SLOT);
1097  __ mov(v0, heap_number_result);
1098 }
1099 
1100 
1102  // These variants are compiled ahead of time. See next method.
1103  if (the_int_.is(a1) &&
1104  the_heap_number_.is(v0) &&
1105  scratch_.is(a2) &&
1106  sign_.is(a3)) {
1107  return true;
1108  }
1109  if (the_int_.is(a2) &&
1110  the_heap_number_.is(v0) &&
1111  scratch_.is(a3) &&
1112  sign_.is(a0)) {
1113  return true;
1114  }
1115  // Other register combinations are generated as and when they are needed,
1116  // so it is unsafe to call them from stubs (we can't generate a stub while
1117  // we are generating a stub).
1118  return false;
1119 }
1120 
1121 
1123  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1124  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1125  stub1.GetCode()->set_is_pregenerated(true);
1126  stub2.GetCode()->set_is_pregenerated(true);
1127 }
1128 
1129 
1130 // See comment for class, this does NOT work for int32's that are in Smi range.
1131 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1132  Label max_negative_int;
1133  // the_int_ has the answer which is a signed int32 but not a Smi.
1134  // We test for the special value that has a different exponent.
1135  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1136  // Test sign, and save for later conditionals.
1137  __ And(sign_, the_int_, Operand(0x80000000u));
1138  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1139 
1140  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1141  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1142  uint32_t non_smi_exponent =
1143  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1144  __ li(scratch_, Operand(non_smi_exponent));
1145  // Set the sign bit in scratch_ if the value was negative.
1146  __ or_(scratch_, scratch_, sign_);
1147  // Subtract from 0 if the value was negative.
1148  __ subu(at, zero_reg, the_int_);
1149  __ Movn(the_int_, at, sign_);
1150  // We should be masking the implict first digit of the mantissa away here,
1151  // but it just ends up combining harmlessly with the last digit of the
1152  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1153  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1154  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1155  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1156  __ srl(at, the_int_, shift_distance);
1157  __ or_(scratch_, scratch_, at);
1158  __ sw(scratch_, FieldMemOperand(the_heap_number_,
1160  __ sll(scratch_, the_int_, 32 - shift_distance);
1161  __ sw(scratch_, FieldMemOperand(the_heap_number_,
1163  __ Ret();
1164 
1165  __ bind(&max_negative_int);
1166  // The max negative int32 is stored as a positive number in the mantissa of
1167  // a double because it uses a sign bit instead of using two's complement.
1168  // The actual mantissa bits stored are all 0 because the implicit most
1169  // significant 1 bit is not stored.
1170  non_smi_exponent += 1 << HeapNumber::kExponentShift;
1171  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1172  __ sw(scratch_,
1173  FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1174  __ mov(scratch_, zero_reg);
1175  __ sw(scratch_,
1176  FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1177  __ Ret();
1178 }
1179 
1180 
1181 // Handle the case where the lhs and rhs are the same object.
1182 // Equality is almost reflexive (everything but NaN), so this is a test
1183 // for "identity and not NaN".
1184 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1185  Label* slow,
1186  Condition cc,
1187  bool never_nan_nan) {
1188  Label not_identical;
1189  Label heap_number, return_equal;
1190  Register exp_mask_reg = t5;
1191 
1192  __ Branch(&not_identical, ne, a0, Operand(a1));
1193 
1194  // The two objects are identical. If we know that one of them isn't NaN then
1195  // we now know they test equal.
1196  if (cc != eq || !never_nan_nan) {
1197  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1198 
1199  // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1200  // so we do the second best thing - test it ourselves.
1201  // They are both equal and they are not both Smis so both of them are not
1202  // Smis. If it's not a heap number, then return equal.
1203  if (cc == less || cc == greater) {
1204  __ GetObjectType(a0, t4, t4);
1205  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1206  } else {
1207  __ GetObjectType(a0, t4, t4);
1208  __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1209  // Comparing JS objects with <=, >= is complicated.
1210  if (cc != eq) {
1211  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1212  // Normally here we fall through to return_equal, but undefined is
1213  // special: (undefined == undefined) == true, but
1214  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1215  if (cc == less_equal || cc == greater_equal) {
1216  __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1217  __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1218  __ Branch(&return_equal, ne, a0, Operand(t2));
1219  if (cc == le) {
1220  // undefined <= undefined should fail.
1221  __ li(v0, Operand(GREATER));
1222  } else {
1223  // undefined >= undefined should fail.
1224  __ li(v0, Operand(LESS));
1225  }
1226  __ Ret();
1227  }
1228  }
1229  }
1230  }
1231 
1232  __ bind(&return_equal);
1233 
1234  if (cc == less) {
1235  __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1236  } else if (cc == greater) {
1237  __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1238  } else {
1239  __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1240  }
1241  __ Ret();
1242 
1243  if (cc != eq || !never_nan_nan) {
1244  // For less and greater we don't have to check for NaN since the result of
1245  // x < x is false regardless. For the others here is some code to check
1246  // for NaN.
1247  if (cc != lt && cc != gt) {
1248  __ bind(&heap_number);
1249  // It is a heap number, so return non-equal if it's NaN and equal if it's
1250  // not NaN.
1251 
1252  // The representation of NaN values has all exponent bits (52..62) set,
1253  // and not all mantissa bits (0..51) clear.
1254  // Read top bits of double representation (second word of value).
1256  // Test that exponent bits are all set.
1257  __ And(t3, t2, Operand(exp_mask_reg));
1258  // If all bits not set (ne cond), then not a NaN, objects are equal.
1259  __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1260 
1261  // Shift out flag and all exponent bits, retaining only mantissa.
1263  // Or with all low-bits of mantissa.
1265  __ Or(v0, t3, Operand(t2));
1266  // For equal we already have the right value in v0: Return zero (equal)
1267  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1268  // not (it's a NaN). For <= and >= we need to load v0 with the failing
1269  // value if it's a NaN.
1270  if (cc != eq) {
1271  // All-zero means Infinity means equal.
1272  __ Ret(eq, v0, Operand(zero_reg));
1273  if (cc == le) {
1274  __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1275  } else {
1276  __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1277  }
1278  }
1279  __ Ret();
1280  }
1281  // No fall through here.
1282  }
1283 
1284  __ bind(&not_identical);
1285 }
1286 
1287 
1288 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1289  Register lhs,
1290  Register rhs,
1291  Label* both_loaded_as_doubles,
1292  Label* slow,
1293  bool strict) {
1294  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1295  (lhs.is(a1) && rhs.is(a0)));
1296 
1297  Label lhs_is_smi;
1298  __ JumpIfSmi(lhs, &lhs_is_smi);
1299  // Rhs is a Smi.
1300  // Check whether the non-smi is a heap number.
1301  __ GetObjectType(lhs, t4, t4);
1302  if (strict) {
1303  // If lhs was not a number and rhs was a Smi then strict equality cannot
1304  // succeed. Return non-equal (lhs is already not zero).
1305  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1306  __ mov(v0, lhs);
1307  } else {
1308  // Smi compared non-strictly with a non-Smi non-heap-number. Call
1309  // the runtime.
1310  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1311  }
1312 
1313  // Rhs is a smi, lhs is a number.
1314  // Convert smi rhs to double.
1316  CpuFeatures::Scope scope(FPU);
1317  __ sra(at, rhs, kSmiTagSize);
1318  __ mtc1(at, f14);
1319  __ cvt_d_w(f14, f14);
1321  } else {
1322  // Load lhs to a double in a2, a3.
1323  __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1325 
1326  // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1327  __ mov(t6, rhs);
1328  ConvertToDoubleStub stub1(a1, a0, t6, t5);
1329  __ push(ra);
1330  __ Call(stub1.GetCode());
1331 
1332  __ pop(ra);
1333  }
1334 
1335  // We now have both loaded as doubles.
1336  __ jmp(both_loaded_as_doubles);
1337 
1338  __ bind(&lhs_is_smi);
1339  // Lhs is a Smi. Check whether the non-smi is a heap number.
1340  __ GetObjectType(rhs, t4, t4);
1341  if (strict) {
1342  // If lhs was not a number and rhs was a Smi then strict equality cannot
1343  // succeed. Return non-equal.
1344  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1345  __ li(v0, Operand(1));
1346  } else {
1347  // Smi compared non-strictly with a non-Smi non-heap-number. Call
1348  // the runtime.
1349  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1350  }
1351 
1352  // Lhs is a smi, rhs is a number.
1353  // Convert smi lhs to double.
1355  CpuFeatures::Scope scope(FPU);
1356  __ sra(at, lhs, kSmiTagSize);
1357  __ mtc1(at, f12);
1358  __ cvt_d_w(f12, f12);
1360  } else {
1361  // Convert lhs to a double format. t5 is scratch.
1362  __ mov(t6, lhs);
1363  ConvertToDoubleStub stub2(a3, a2, t6, t5);
1364  __ push(ra);
1365  __ Call(stub2.GetCode());
1366  __ pop(ra);
1367  // Load rhs to a double in a1, a0.
1368  if (rhs.is(a0)) {
1369  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1371  } else {
1373  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1374  }
1375  }
1376  // Fall through to both_loaded_as_doubles.
1377 }
1378 
1379 
1380 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1383  CpuFeatures::Scope scope(FPU);
1384  // Lhs and rhs are already loaded to f12 and f14 register pairs.
1385  __ Move(t0, t1, f14);
1386  __ Move(t2, t3, f12);
1387  } else {
1388  // Lhs and rhs are already loaded to GP registers.
1389  __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1390  __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1391  __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1392  __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1393  }
1394  Register rhs_exponent = exp_first ? t0 : t1;
1395  Register lhs_exponent = exp_first ? t2 : t3;
1396  Register rhs_mantissa = exp_first ? t1 : t0;
1397  Register lhs_mantissa = exp_first ? t3 : t2;
1398  Label one_is_nan, neither_is_nan;
1399  Label lhs_not_nan_exp_mask_is_loaded;
1400 
1401  Register exp_mask_reg = t4;
1402  __ li(exp_mask_reg, HeapNumber::kExponentMask);
1403  __ and_(t5, lhs_exponent, exp_mask_reg);
1404  __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1405 
1406  __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1407  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1408 
1409  __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1410 
1411  __ li(exp_mask_reg, HeapNumber::kExponentMask);
1412  __ bind(&lhs_not_nan_exp_mask_is_loaded);
1413  __ and_(t5, rhs_exponent, exp_mask_reg);
1414 
1415  __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1416 
1417  __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1418  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1419 
1420  __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1421 
1422  __ bind(&one_is_nan);
1423  // NaN comparisons always fail.
1424  // Load whatever we need in v0 to make the comparison fail.
1425 
1426  if (cc == lt || cc == le) {
1427  __ li(v0, Operand(GREATER));
1428  } else {
1429  __ li(v0, Operand(LESS));
1430  }
1431  __ Ret();
1432 
1433  __ bind(&neither_is_nan);
1434 }
1435 
1436 
1437 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1438  // f12 and f14 have the two doubles. Neither is a NaN.
1439  // Call a native function to do a comparison between two non-NaNs.
1440  // Call C routine that may not cause GC or other trouble.
1441  // We use a call_was and return manually because we need arguments slots to
1442  // be freed.
1443 
1444  Label return_result_not_equal, return_result_equal;
1445  if (cc == eq) {
1446  // Doubles are not equal unless they have the same bit pattern.
1447  // Exception: 0 and -0.
1450  CpuFeatures::Scope scope(FPU);
1451  // Lhs and rhs are already loaded to f12 and f14 register pairs.
1452  __ Move(t0, t1, f14);
1453  __ Move(t2, t3, f12);
1454  } else {
1455  // Lhs and rhs are already loaded to GP registers.
1456  __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1457  __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1458  __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1459  __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1460  }
1461  Register rhs_exponent = exp_first ? t0 : t1;
1462  Register lhs_exponent = exp_first ? t2 : t3;
1463  Register rhs_mantissa = exp_first ? t1 : t0;
1464  Register lhs_mantissa = exp_first ? t3 : t2;
1465 
1466  __ xor_(v0, rhs_mantissa, lhs_mantissa);
1467  __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1468 
1469  __ subu(v0, rhs_exponent, lhs_exponent);
1470  __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1471  // 0, -0 case.
1472  __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1473  __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1474  __ or_(t4, rhs_exponent, lhs_exponent);
1475  __ or_(t4, t4, rhs_mantissa);
1476 
1477  __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1478 
1479  __ bind(&return_result_equal);
1480 
1481  __ li(v0, Operand(EQUAL));
1482  __ Ret();
1483  }
1484 
1485  __ bind(&return_result_not_equal);
1486 
1487  if (!CpuFeatures::IsSupported(FPU)) {
1488  __ push(ra);
1489  __ PrepareCallCFunction(0, 2, t4);
1490  if (!IsMipsSoftFloatABI) {
1491  // We are not using MIPS FPU instructions, and parameters for the runtime
1492  // function call are prepaired in a0-a3 registers, but function we are
1493  // calling is compiled with hard-float flag and expecting hard float ABI
1494  // (parameters in f12/f14 registers). We need to copy parameters from
1495  // a0-a3 registers to f12/f14 register pairs.
1496  __ Move(f12, a0, a1);
1497  __ Move(f14, a2, a3);
1498  }
1499 
1500  AllowExternalCallThatCantCauseGC scope(masm);
1501  __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1502  0, 2);
1503  __ pop(ra); // Because this function returns int, result is in v0.
1504  __ Ret();
1505  } else {
1506  CpuFeatures::Scope scope(FPU);
1507  Label equal, less_than;
1508  __ BranchF(&equal, NULL, eq, f12, f14);
1509  __ BranchF(&less_than, NULL, lt, f12, f14);
1510 
1511  // Not equal, not less, not NaN, must be greater.
1512 
1513  __ li(v0, Operand(GREATER));
1514  __ Ret();
1515 
1516  __ bind(&equal);
1517  __ li(v0, Operand(EQUAL));
1518  __ Ret();
1519 
1520  __ bind(&less_than);
1521  __ li(v0, Operand(LESS));
1522  __ Ret();
1523  }
1524 }
1525 
1526 
1527 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1528  Register lhs,
1529  Register rhs) {
1530  // If either operand is a JS object or an oddball value, then they are
1531  // not equal since their pointers are different.
1532  // There is no test for undetectability in strict equality.
1534  Label first_non_object;
1535  // Get the type of the first operand into a2 and compare it with
1536  // FIRST_SPEC_OBJECT_TYPE.
1537  __ GetObjectType(lhs, a2, a2);
1538  __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1539 
1540  // Return non-zero.
1541  Label return_not_equal;
1542  __ bind(&return_not_equal);
1543  __ Ret(USE_DELAY_SLOT);
1544  __ li(v0, Operand(1));
1545 
1546  __ bind(&first_non_object);
1547  // Check for oddballs: true, false, null, undefined.
1548  __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1549 
1550  __ GetObjectType(rhs, a3, a3);
1551  __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1552 
1553  // Check for oddballs: true, false, null, undefined.
1554  __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1555 
1556  // Now that we have the types we might as well check for symbol-symbol.
1557  // Ensure that no non-strings have the symbol bit set.
1559  STATIC_ASSERT(kSymbolTag != 0);
1560  __ And(t2, a2, Operand(a3));
1561  __ And(t0, t2, Operand(kIsSymbolMask));
1562  __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1563 }
1564 
1565 
1566 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1567  Register lhs,
1568  Register rhs,
1569  Label* both_loaded_as_doubles,
1570  Label* not_heap_numbers,
1571  Label* slow) {
1572  __ GetObjectType(lhs, a3, a2);
1573  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1575  // If first was a heap number & second wasn't, go to slow case.
1576  __ Branch(slow, ne, a3, Operand(a2));
1577 
1578  // Both are heap numbers. Load them up then jump to the code we have
1579  // for that.
1581  CpuFeatures::Scope scope(FPU);
1584  } else {
1586  __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1587  if (rhs.is(a0)) {
1588  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1590  } else {
1592  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1593  }
1594  }
1595  __ jmp(both_loaded_as_doubles);
1596 }
1597 
1598 
1599 // Fast negative check for symbol-to-symbol equality.
1600 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1601  Register lhs,
1602  Register rhs,
1603  Label* possible_strings,
1604  Label* not_both_strings) {
1605  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1606  (lhs.is(a1) && rhs.is(a0)));
1607 
1608  // a2 is object type of lhs.
1609  // Ensure that no non-strings have the symbol bit set.
1610  Label object_test;
1611  STATIC_ASSERT(kSymbolTag != 0);
1612  __ And(at, a2, Operand(kIsNotStringMask));
1613  __ Branch(&object_test, ne, at, Operand(zero_reg));
1614  __ And(at, a2, Operand(kIsSymbolMask));
1615  __ Branch(possible_strings, eq, at, Operand(zero_reg));
1616  __ GetObjectType(rhs, a3, a3);
1617  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1618  __ And(at, a3, Operand(kIsSymbolMask));
1619  __ Branch(possible_strings, eq, at, Operand(zero_reg));
1620 
1621  // Both are symbols. We already checked they weren't the same pointer
1622  // so they are not equal.
1623  __ Ret(USE_DELAY_SLOT);
1624  __ li(v0, Operand(1)); // Non-zero indicates not equal.
1625 
1626  __ bind(&object_test);
1627  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1628  __ GetObjectType(rhs, a2, a3);
1629  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1630 
1631  // If both objects are undetectable, they are equal. Otherwise, they
1632  // are not equal, since they are different objects and an object is not
1633  // equal to undefined.
1635  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1636  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1637  __ and_(a0, a2, a3);
1638  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1639  __ Ret(USE_DELAY_SLOT);
1640  __ xori(v0, a0, 1 << Map::kIsUndetectable);
1641 }
1642 
1643 
1644 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1645  Register object,
1646  Register result,
1647  Register scratch1,
1648  Register scratch2,
1649  Register scratch3,
1650  bool object_is_smi,
1651  Label* not_found) {
1652  // Use of registers. Register result is used as a temporary.
1653  Register number_string_cache = result;
1654  Register mask = scratch3;
1655 
1656  // Load the number string cache.
1657  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1658 
1659  // Make the hash mask from the length of the number string cache. It
1660  // contains two elements (number and string) for each cache entry.
1661  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1662  // Divide length by two (length is a smi).
1663  __ sra(mask, mask, kSmiTagSize + 1);
1664  __ Addu(mask, mask, -1); // Make mask.
1665 
1666  // Calculate the entry in the number string cache. The hash value in the
1667  // number string cache for smis is just the smi value, and the hash for
1668  // doubles is the xor of the upper and lower words. See
1669  // Heap::GetNumberStringCache.
1670  Isolate* isolate = masm->isolate();
1671  Label is_smi;
1672  Label load_result_from_cache;
1673  if (!object_is_smi) {
1674  __ JumpIfSmi(object, &is_smi);
1676  CpuFeatures::Scope scope(FPU);
1677  __ CheckMap(object,
1678  scratch1,
1679  Heap::kHeapNumberMapRootIndex,
1680  not_found,
1682 
1683  STATIC_ASSERT(8 == kDoubleSize);
1684  __ Addu(scratch1,
1685  object,
1687  __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1688  __ lw(scratch1, MemOperand(scratch1, 0));
1689  __ Xor(scratch1, scratch1, Operand(scratch2));
1690  __ And(scratch1, scratch1, Operand(mask));
1691 
1692  // Calculate address of entry in string cache: each entry consists
1693  // of two pointer sized fields.
1694  __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1695  __ Addu(scratch1, number_string_cache, scratch1);
1696 
1697  Register probe = mask;
1698  __ lw(probe,
1700  __ JumpIfSmi(probe, not_found);
1703  __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1704  __ Branch(not_found);
1705  } else {
1706  // Note that there is no cache check for non-FPU case, even though
1707  // it seems there could be. May be a tiny opimization for non-FPU
1708  // cores.
1709  __ Branch(not_found);
1710  }
1711  }
1712 
1713  __ bind(&is_smi);
1714  Register scratch = scratch1;
1715  __ sra(scratch, object, 1); // Shift away the tag.
1716  __ And(scratch, mask, Operand(scratch));
1717 
1718  // Calculate address of entry in string cache: each entry consists
1719  // of two pointer sized fields.
1720  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1721  __ Addu(scratch, number_string_cache, scratch);
1722 
1723  // Check if the entry is the smi we are looking for.
1724  Register probe = mask;
1725  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1726  __ Branch(not_found, ne, object, Operand(probe));
1727 
1728  // Get the result from the cache.
1729  __ bind(&load_result_from_cache);
1730  __ lw(result,
1731  FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1732 
1733  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1734  1,
1735  scratch1,
1736  scratch2);
1737 }
1738 
1739 
1740 void NumberToStringStub::Generate(MacroAssembler* masm) {
1741  Label runtime;
1742 
1743  __ lw(a1, MemOperand(sp, 0));
1744 
1745  // Generate code to lookup number in the number string cache.
1746  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1747  __ DropAndRet(1);
1748 
1749  __ bind(&runtime);
1750  // Handle number to string in the runtime system if not found in the cache.
1751  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1752 }
1753 
1754 
1755 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1756 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1757 // of the comparison.
1758 void CompareStub::Generate(MacroAssembler* masm) {
1759  Label slow; // Call builtin.
1760  Label not_smis, both_loaded_as_doubles;
1761 
1762 
1763  if (include_smi_compare_) {
1764  Label not_two_smis, smi_done;
1765  __ Or(a2, a1, a0);
1766  __ JumpIfNotSmi(a2, &not_two_smis);
1767  __ sra(a1, a1, 1);
1768  __ sra(a0, a0, 1);
1769  __ Ret(USE_DELAY_SLOT);
1770  __ subu(v0, a1, a0);
1771  __ bind(&not_two_smis);
1772  } else if (FLAG_debug_code) {
1773  __ Or(a2, a1, a0);
1774  __ And(a2, a2, kSmiTagMask);
1775  __ Assert(ne, "CompareStub: unexpected smi operands.",
1776  a2, Operand(zero_reg));
1777  }
1778 
1779 
1780  // NOTICE! This code is only reached after a smi-fast-case check, so
1781  // it is certain that at least one operand isn't a smi.
1782 
1783  // Handle the case where the objects are identical. Either returns the answer
1784  // or goes to slow. Only falls through if the objects were not identical.
1785  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1786 
1787  // If either is a Smi (we know that not both are), then they can only
1788  // be strictly equal if the other is a HeapNumber.
1789  STATIC_ASSERT(kSmiTag == 0);
1790  ASSERT_EQ(0, Smi::FromInt(0));
1791  __ And(t2, lhs_, Operand(rhs_));
1792  __ JumpIfNotSmi(t2, &not_smis, t0);
1793  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1794  // 1) Return the answer.
1795  // 2) Go to slow.
1796  // 3) Fall through to both_loaded_as_doubles.
1797  // 4) Jump to rhs_not_nan.
1798  // In cases 3 and 4 we have found out we were dealing with a number-number
1799  // comparison and the numbers have been loaded into f12 and f14 as doubles,
1800  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1801  EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1802  &both_loaded_as_doubles, &slow, strict_);
1803 
1804  __ bind(&both_loaded_as_doubles);
1805  // f12, f14 are the double representations of the left hand side
1806  // and the right hand side if we have FPU. Otherwise a2, a3 represent
1807  // left hand side and a0, a1 represent right hand side.
1808 
1809  Isolate* isolate = masm->isolate();
1811  CpuFeatures::Scope scope(FPU);
1812  Label nan;
1813  __ li(t0, Operand(LESS));
1814  __ li(t1, Operand(GREATER));
1815  __ li(t2, Operand(EQUAL));
1816 
1817  // Check if either rhs or lhs is NaN.
1818  __ BranchF(NULL, &nan, eq, f12, f14);
1819 
1820  // Check if LESS condition is satisfied. If true, move conditionally
1821  // result to v0.
1822  __ c(OLT, D, f12, f14);
1823  __ Movt(v0, t0);
1824  // Use previous check to store conditionally to v0 oposite condition
1825  // (GREATER). If rhs is equal to lhs, this will be corrected in next
1826  // check.
1827  __ Movf(v0, t1);
1828  // Check if EQUAL condition is satisfied. If true, move conditionally
1829  // result to v0.
1830  __ c(EQ, D, f12, f14);
1831  __ Movt(v0, t2);
1832 
1833  __ Ret();
1834 
1835  __ bind(&nan);
1836  // NaN comparisons always fail.
1837  // Load whatever we need in v0 to make the comparison fail.
1838  if (cc_ == lt || cc_ == le) {
1839  __ li(v0, Operand(GREATER));
1840  } else {
1841  __ li(v0, Operand(LESS));
1842  }
1843  __ Ret();
1844  } else {
1845  // Checks for NaN in the doubles we have loaded. Can return the answer or
1846  // fall through if neither is a NaN. Also binds rhs_not_nan.
1847  EmitNanCheck(masm, cc_);
1848 
1849  // Compares two doubles that are not NaNs. Returns the answer.
1850  // Never falls through.
1851  EmitTwoNonNanDoubleComparison(masm, cc_);
1852  }
1853 
1854  __ bind(&not_smis);
1855  // At this point we know we are dealing with two different objects,
1856  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1857  if (strict_) {
1858  // This returns non-equal for some object types, or falls through if it
1859  // was not lucky.
1860  EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1861  }
1862 
1863  Label check_for_symbols;
1864  Label flat_string_check;
1865  // Check for heap-number-heap-number comparison. Can jump to slow case,
1866  // or load both doubles and jump to the code that handles
1867  // that case. If the inputs are not doubles then jumps to check_for_symbols.
1868  // In this case a2 will contain the type of lhs_.
1869  EmitCheckForTwoHeapNumbers(masm,
1870  lhs_,
1871  rhs_,
1872  &both_loaded_as_doubles,
1873  &check_for_symbols,
1874  &flat_string_check);
1875 
1876  __ bind(&check_for_symbols);
1877  if (cc_ == eq && !strict_) {
1878  // Returns an answer for two symbols or two detectable objects.
1879  // Otherwise jumps to string case or not both strings case.
1880  // Assumes that a2 is the type of lhs_ on entry.
1881  EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1882  }
1883 
1884  // Check for both being sequential ASCII strings, and inline if that is the
1885  // case.
1886  __ bind(&flat_string_check);
1887 
1888  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1889 
1890  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1891  if (cc_ == eq) {
1893  lhs_,
1894  rhs_,
1895  a2,
1896  a3,
1897  t0);
1898  } else {
1900  lhs_,
1901  rhs_,
1902  a2,
1903  a3,
1904  t0,
1905  t1);
1906  }
1907  // Never falls through to here.
1908 
1909  __ bind(&slow);
1910  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1911  // a1 (rhs) second.
1912  __ Push(lhs_, rhs_);
1913  // Figure out which native to call and setup the arguments.
1914  Builtins::JavaScript native;
1915  if (cc_ == eq) {
1916  native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1917  } else {
1918  native = Builtins::COMPARE;
1919  int ncr; // NaN compare result.
1920  if (cc_ == lt || cc_ == le) {
1921  ncr = GREATER;
1922  } else {
1923  ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1924  ncr = LESS;
1925  }
1926  __ li(a0, Operand(Smi::FromInt(ncr)));
1927  __ push(a0);
1928  }
1929 
1930  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1931  // tagged as a small integer.
1932  __ InvokeBuiltin(native, JUMP_FUNCTION);
1933 }
1934 
1935 
1936 // The stub expects its argument in the tos_ register and returns its result in
1937 // it, too: zero for false, and a non-zero value for true.
1938 void ToBooleanStub::Generate(MacroAssembler* masm) {
1939  // This stub uses FPU instructions.
1940  CpuFeatures::Scope scope(FPU);
1941 
1942  Label patch;
1943  const Register map = t5.is(tos_) ? t3 : t5;
1944 
1945  // undefined -> false.
1946  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1947 
1948  // Boolean -> its value.
1949  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1950  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1951 
1952  // 'null' -> false.
1953  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1954 
1955  if (types_.Contains(SMI)) {
1956  // Smis: 0 -> false, all other -> true
1957  __ And(at, tos_, kSmiTagMask);
1958  // tos_ contains the correct return value already
1959  __ Ret(eq, at, Operand(zero_reg));
1960  } else if (types_.NeedsMap()) {
1961  // If we need a map later and have a Smi -> patch.
1962  __ JumpIfSmi(tos_, &patch);
1963  }
1964 
1965  if (types_.NeedsMap()) {
1966  __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1967 
1968  if (types_.CanBeUndetectable()) {
1969  __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1970  __ And(at, at, Operand(1 << Map::kIsUndetectable));
1971  // Undetectable -> false.
1972  __ Movn(tos_, zero_reg, at);
1973  __ Ret(ne, at, Operand(zero_reg));
1974  }
1975  }
1976 
1977  if (types_.Contains(SPEC_OBJECT)) {
1978  // Spec object -> true.
1980  // tos_ contains the correct non-zero return value already.
1981  __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1982  }
1983 
1984  if (types_.Contains(STRING)) {
1985  // String value -> false iff empty.
1987  Label skip;
1988  __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1989  __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
1990  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1991  __ bind(&skip);
1992  }
1993 
1994  if (types_.Contains(HEAP_NUMBER)) {
1995  // Heap number -> false iff +0, -0, or NaN.
1996  Label not_heap_number;
1997  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1998  __ Branch(&not_heap_number, ne, map, Operand(at));
1999  Label zero_or_nan, number;
2001  __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
2002  // "tos_" is a register, and contains a non zero value by default.
2003  // Hence we only need to overwrite "tos_" with zero to return false for
2004  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
2005  __ bind(&zero_or_nan);
2006  __ mov(tos_, zero_reg);
2007  __ bind(&number);
2008  __ Ret();
2009  __ bind(&not_heap_number);
2010  }
2011 
2012  __ bind(&patch);
2013  GenerateTypeTransition(masm);
2014 }
2015 
2016 
2017 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
2018  Type type,
2019  Heap::RootListIndex value,
2020  bool result) {
2021  if (types_.Contains(type)) {
2022  // If we see an expected oddball, return its ToBoolean value tos_.
2023  __ LoadRoot(at, value);
2024  __ Subu(at, at, tos_); // This is a check for equality for the movz below.
2025  // The value of a root is never NULL, so we can avoid loading a non-null
2026  // value into tos_ when we want to return 'true'.
2027  if (!result) {
2028  __ Movz(tos_, zero_reg, at);
2029  }
2030  __ Ret(eq, at, Operand(zero_reg));
2031  }
2032 }
2033 
2034 
2035 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
2036  __ Move(a3, tos_);
2037  __ li(a2, Operand(Smi::FromInt(tos_.code())));
2038  __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
2039  __ Push(a3, a2, a1);
2040  // Patch the caller to an appropriate specialized stub and return the
2041  // operation result to the caller of the stub.
2042  __ TailCallExternalReference(
2043  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
2044  3,
2045  1);
2046 }
2047 
2048 
2049 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
2050  // We don't allow a GC during a store buffer overflow so there is no need to
2051  // store the registers in any particular way, but we do have to store and
2052  // restore them.
2053  __ MultiPush(kJSCallerSaved | ra.bit());
2054  if (save_doubles_ == kSaveFPRegs) {
2055  CpuFeatures::Scope scope(FPU);
2056  __ MultiPushFPU(kCallerSavedFPU);
2057  }
2058  const int argument_count = 1;
2059  const int fp_argument_count = 0;
2060  const Register scratch = a1;
2061 
2062  AllowExternalCallThatCantCauseGC scope(masm);
2063  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
2064  __ li(a0, Operand(ExternalReference::isolate_address()));
2065  __ CallCFunction(
2066  ExternalReference::store_buffer_overflow_function(masm->isolate()),
2067  argument_count);
2068  if (save_doubles_ == kSaveFPRegs) {
2069  CpuFeatures::Scope scope(FPU);
2070  __ MultiPopFPU(kCallerSavedFPU);
2071  }
2072 
2073  __ MultiPop(kJSCallerSaved | ra.bit());
2074  __ Ret();
2075 }
2076 
2077 
2078 void UnaryOpStub::PrintName(StringStream* stream) {
2079  const char* op_name = Token::Name(op_);
2080  const char* overwrite_name = NULL; // Make g++ happy.
2081  switch (mode_) {
2082  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2083  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2084  }
2085  stream->Add("UnaryOpStub_%s_%s_%s",
2086  op_name,
2087  overwrite_name,
2088  UnaryOpIC::GetName(operand_type_));
2089 }
2090 
2091 
2092 // TODO(svenpanne): Use virtual functions instead of switch.
2093 void UnaryOpStub::Generate(MacroAssembler* masm) {
2094  switch (operand_type_) {
2096  GenerateTypeTransition(masm);
2097  break;
2098  case UnaryOpIC::SMI:
2099  GenerateSmiStub(masm);
2100  break;
2102  GenerateHeapNumberStub(masm);
2103  break;
2104  case UnaryOpIC::GENERIC:
2105  GenerateGenericStub(masm);
2106  break;
2107  }
2108 }
2109 
2110 
2111 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2112  // Argument is in a0 and v0 at this point, so we can overwrite a0.
2113  __ li(a2, Operand(Smi::FromInt(op_)));
2114  __ li(a1, Operand(Smi::FromInt(mode_)));
2115  __ li(a0, Operand(Smi::FromInt(operand_type_)));
2116  __ Push(v0, a2, a1, a0);
2117 
2118  __ TailCallExternalReference(
2119  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2120 }
2121 
2122 
2123 // TODO(svenpanne): Use virtual functions instead of switch.
2124 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2125  switch (op_) {
2126  case Token::SUB:
2127  GenerateSmiStubSub(masm);
2128  break;
2129  case Token::BIT_NOT:
2130  GenerateSmiStubBitNot(masm);
2131  break;
2132  default:
2133  UNREACHABLE();
2134  }
2135 }
2136 
2137 
2138 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2139  Label non_smi, slow;
2140  GenerateSmiCodeSub(masm, &non_smi, &slow);
2141  __ bind(&non_smi);
2142  __ bind(&slow);
2143  GenerateTypeTransition(masm);
2144 }
2145 
2146 
2147 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2148  Label non_smi;
2149  GenerateSmiCodeBitNot(masm, &non_smi);
2150  __ bind(&non_smi);
2151  GenerateTypeTransition(masm);
2152 }
2153 
2154 
2155 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2156  Label* non_smi,
2157  Label* slow) {
2158  __ JumpIfNotSmi(a0, non_smi);
2159 
2160  // The result of negating zero or the smallest negative smi is not a smi.
2161  __ And(t0, a0, ~0x80000000);
2162  __ Branch(slow, eq, t0, Operand(zero_reg));
2163 
2164  // Return '0 - value'.
2165  __ Ret(USE_DELAY_SLOT);
2166  __ subu(v0, zero_reg, a0);
2167 }
2168 
2169 
2170 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2171  Label* non_smi) {
2172  __ JumpIfNotSmi(a0, non_smi);
2173 
2174  // Flip bits and revert inverted smi-tag.
2175  __ Neg(v0, a0);
2176  __ And(v0, v0, ~kSmiTagMask);
2177  __ Ret();
2178 }
2179 
2180 
2181 // TODO(svenpanne): Use virtual functions instead of switch.
2182 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2183  switch (op_) {
2184  case Token::SUB:
2185  GenerateHeapNumberStubSub(masm);
2186  break;
2187  case Token::BIT_NOT:
2188  GenerateHeapNumberStubBitNot(masm);
2189  break;
2190  default:
2191  UNREACHABLE();
2192  }
2193 }
2194 
2195 
2196 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2197  Label non_smi, slow, call_builtin;
2198  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2199  __ bind(&non_smi);
2200  GenerateHeapNumberCodeSub(masm, &slow);
2201  __ bind(&slow);
2202  GenerateTypeTransition(masm);
2203  __ bind(&call_builtin);
2204  GenerateGenericCodeFallback(masm);
2205 }
2206 
2207 
2208 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2209  Label non_smi, slow;
2210  GenerateSmiCodeBitNot(masm, &non_smi);
2211  __ bind(&non_smi);
2212  GenerateHeapNumberCodeBitNot(masm, &slow);
2213  __ bind(&slow);
2214  GenerateTypeTransition(masm);
2215 }
2216 
2217 
2218 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2219  Label* slow) {
2220  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2221  // a0 is a heap number. Get a new heap number in a1.
2222  if (mode_ == UNARY_OVERWRITE) {
2224  __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2226  } else {
2227  Label slow_allocate_heapnumber, heapnumber_allocated;
2228  __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2229  __ jmp(&heapnumber_allocated);
2230 
2231  __ bind(&slow_allocate_heapnumber);
2232  {
2233  FrameScope scope(masm, StackFrame::INTERNAL);
2234  __ push(a0);
2235  __ CallRuntime(Runtime::kNumberAlloc, 0);
2236  __ mov(a1, v0);
2237  __ pop(a0);
2238  }
2239 
2240  __ bind(&heapnumber_allocated);
2244  __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2246  __ mov(v0, a1);
2247  }
2248  __ Ret();
2249 }
2250 
2251 
2252 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2253  MacroAssembler* masm,
2254  Label* slow) {
2255  Label impossible;
2256 
2257  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2258  // Convert the heap number in a0 to an untagged integer in a1.
2259  __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2260 
2261  // Do the bitwise operation and check if the result fits in a smi.
2262  Label try_float;
2263  __ Neg(a1, a1);
2264  __ Addu(a2, a1, Operand(0x40000000));
2265  __ Branch(&try_float, lt, a2, Operand(zero_reg));
2266 
2267  // Tag the result as a smi and we're done.
2268  __ SmiTag(v0, a1);
2269  __ Ret();
2270 
2271  // Try to store the result in a heap number.
2272  __ bind(&try_float);
2273  if (mode_ == UNARY_NO_OVERWRITE) {
2274  Label slow_allocate_heapnumber, heapnumber_allocated;
2275  // Allocate a new heap number without zapping v0, which we need if it fails.
2276  __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2277  __ jmp(&heapnumber_allocated);
2278 
2279  __ bind(&slow_allocate_heapnumber);
2280  {
2281  FrameScope scope(masm, StackFrame::INTERNAL);
2282  __ push(v0); // Push the heap number, not the untagged int32.
2283  __ CallRuntime(Runtime::kNumberAlloc, 0);
2284  __ mov(a2, v0); // Move the new heap number into a2.
2285  // Get the heap number into v0, now that the new heap number is in a2.
2286  __ pop(v0);
2287  }
2288 
2289  // Convert the heap number in v0 to an untagged integer in a1.
2290  // This can't go slow-case because it's the same number we already
2291  // converted once again.
2292  __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2293  // Negate the result.
2294  __ Xor(a1, a1, -1);
2295 
2296  __ bind(&heapnumber_allocated);
2297  __ mov(v0, a2); // Move newly allocated heap number to v0.
2298  }
2299 
2301  // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2302  CpuFeatures::Scope scope(FPU);
2303  __ mtc1(a1, f0);
2304  __ cvt_d_w(f0, f0);
2306  __ Ret();
2307  } else {
2308  // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2309  // have to set up a frame.
2310  WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2311  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2312  }
2313 
2314  __ bind(&impossible);
2315  if (FLAG_debug_code) {
2316  __ stop("Incorrect assumption in bit-not stub");
2317  }
2318 }
2319 
2320 
2321 // TODO(svenpanne): Use virtual functions instead of switch.
2322 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2323  switch (op_) {
2324  case Token::SUB:
2325  GenerateGenericStubSub(masm);
2326  break;
2327  case Token::BIT_NOT:
2328  GenerateGenericStubBitNot(masm);
2329  break;
2330  default:
2331  UNREACHABLE();
2332  }
2333 }
2334 
2335 
2336 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2337  Label non_smi, slow;
2338  GenerateSmiCodeSub(masm, &non_smi, &slow);
2339  __ bind(&non_smi);
2340  GenerateHeapNumberCodeSub(masm, &slow);
2341  __ bind(&slow);
2342  GenerateGenericCodeFallback(masm);
2343 }
2344 
2345 
2346 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2347  Label non_smi, slow;
2348  GenerateSmiCodeBitNot(masm, &non_smi);
2349  __ bind(&non_smi);
2350  GenerateHeapNumberCodeBitNot(masm, &slow);
2351  __ bind(&slow);
2352  GenerateGenericCodeFallback(masm);
2353 }
2354 
2355 
2356 void UnaryOpStub::GenerateGenericCodeFallback(
2357  MacroAssembler* masm) {
2358  // Handle the slow case by jumping to the JavaScript builtin.
2359  __ push(a0);
2360  switch (op_) {
2361  case Token::SUB:
2362  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2363  break;
2364  case Token::BIT_NOT:
2365  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2366  break;
2367  default:
2368  UNREACHABLE();
2369  }
2370 }
2371 
2372 
2373 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2374  Label get_result;
2375 
2376  __ Push(a1, a0);
2377 
2378  __ li(a2, Operand(Smi::FromInt(MinorKey())));
2379  __ li(a1, Operand(Smi::FromInt(op_)));
2380  __ li(a0, Operand(Smi::FromInt(operands_type_)));
2381  __ Push(a2, a1, a0);
2382 
2383  __ TailCallExternalReference(
2384  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2385  masm->isolate()),
2386  5,
2387  1);
2388 }
2389 
2390 
2391 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2392  MacroAssembler* masm) {
2393  UNIMPLEMENTED();
2394 }
2395 
2396 
2397 void BinaryOpStub::Generate(MacroAssembler* masm) {
2398  // Explicitly allow generation of nested stubs. It is safe here because
2399  // generation code does not use any raw pointers.
2400  AllowStubCallsScope allow_stub_calls(masm, true);
2401  switch (operands_type_) {
2403  GenerateTypeTransition(masm);
2404  break;
2405  case BinaryOpIC::SMI:
2406  GenerateSmiStub(masm);
2407  break;
2408  case BinaryOpIC::INT32:
2409  GenerateInt32Stub(masm);
2410  break;
2412  GenerateHeapNumberStub(masm);
2413  break;
2414  case BinaryOpIC::ODDBALL:
2415  GenerateOddballStub(masm);
2416  break;
2418  GenerateBothStringStub(masm);
2419  break;
2420  case BinaryOpIC::STRING:
2421  GenerateStringStub(masm);
2422  break;
2423  case BinaryOpIC::GENERIC:
2424  GenerateGeneric(masm);
2425  break;
2426  default:
2427  UNREACHABLE();
2428  }
2429 }
2430 
2431 
2432 void BinaryOpStub::PrintName(StringStream* stream) {
2433  const char* op_name = Token::Name(op_);
2434  const char* overwrite_name;
2435  switch (mode_) {
2436  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2437  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2438  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2439  default: overwrite_name = "UnknownOverwrite"; break;
2440  }
2441  stream->Add("BinaryOpStub_%s_%s_%s",
2442  op_name,
2443  overwrite_name,
2444  BinaryOpIC::GetName(operands_type_));
2445 }
2446 
2447 
2448 
2449 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2450  Register left = a1;
2451  Register right = a0;
2452 
2453  Register scratch1 = t0;
2454  Register scratch2 = t1;
2455 
2456  ASSERT(right.is(a0));
2457  STATIC_ASSERT(kSmiTag == 0);
2458 
2459  Label not_smi_result;
2460  switch (op_) {
2461  case Token::ADD:
2462  __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2463  __ RetOnNoOverflow(scratch1);
2464  // No need to revert anything - right and left are intact.
2465  break;
2466  case Token::SUB:
2467  __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2468  __ RetOnNoOverflow(scratch1);
2469  // No need to revert anything - right and left are intact.
2470  break;
2471  case Token::MUL: {
2472  // Remove tag from one of the operands. This way the multiplication result
2473  // will be a smi if it fits the smi range.
2474  __ SmiUntag(scratch1, right);
2475  // Do multiplication.
2476  // lo = lower 32 bits of scratch1 * left.
2477  // hi = higher 32 bits of scratch1 * left.
2478  __ Mult(left, scratch1);
2479  // Check for overflowing the smi range - no overflow if higher 33 bits of
2480  // the result are identical.
2481  __ mflo(scratch1);
2482  __ mfhi(scratch2);
2483  __ sra(scratch1, scratch1, 31);
2484  __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2485  // Go slow on zero result to handle -0.
2486  __ mflo(v0);
2487  __ Ret(ne, v0, Operand(zero_reg));
2488  // We need -0 if we were multiplying a negative number with 0 to get 0.
2489  // We know one of them was zero.
2490  __ Addu(scratch2, right, left);
2491  Label skip;
2492  // ARM uses the 'pl' condition, which is 'ge'.
2493  // Negating it results in 'lt'.
2494  __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2495  ASSERT(Smi::FromInt(0) == 0);
2496  __ Ret(USE_DELAY_SLOT);
2497  __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
2498  __ bind(&skip);
2499  // We fall through here if we multiplied a negative number with 0, because
2500  // that would mean we should produce -0.
2501  }
2502  break;
2503  case Token::DIV: {
2504  Label done;
2505  __ SmiUntag(scratch2, right);
2506  __ SmiUntag(scratch1, left);
2507  __ Div(scratch1, scratch2);
2508  // A minor optimization: div may be calculated asynchronously, so we check
2509  // for division by zero before getting the result.
2510  __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2511  // If the result is 0, we need to make sure the dividsor (right) is
2512  // positive, otherwise it is a -0 case.
2513  // Quotient is in 'lo', remainder is in 'hi'.
2514  // Check for no remainder first.
2515  __ mfhi(scratch1);
2516  __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2517  __ mflo(scratch1);
2518  __ Branch(&done, ne, scratch1, Operand(zero_reg));
2519  __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2520  __ bind(&done);
2521  // Check that the signed result fits in a Smi.
2522  __ Addu(scratch2, scratch1, Operand(0x40000000));
2523  __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2524  __ SmiTag(v0, scratch1);
2525  __ Ret();
2526  }
2527  break;
2528  case Token::MOD: {
2529  Label done;
2530  __ SmiUntag(scratch2, right);
2531  __ SmiUntag(scratch1, left);
2532  __ Div(scratch1, scratch2);
2533  // A minor optimization: div may be calculated asynchronously, so we check
2534  // for division by 0 before calling mfhi.
2535  // Check for zero on the right hand side.
2536  __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2537  // If the result is 0, we need to make sure the dividend (left) is
2538  // positive (or 0), otherwise it is a -0 case.
2539  // Remainder is in 'hi'.
2540  __ mfhi(scratch2);
2541  __ Branch(&done, ne, scratch2, Operand(zero_reg));
2542  __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2543  __ bind(&done);
2544  // Check that the signed result fits in a Smi.
2545  __ Addu(scratch1, scratch2, Operand(0x40000000));
2546  __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2547  __ SmiTag(v0, scratch2);
2548  __ Ret();
2549  }
2550  break;
2551  case Token::BIT_OR:
2552  __ Ret(USE_DELAY_SLOT);
2553  __ or_(v0, left, right);
2554  break;
2555  case Token::BIT_AND:
2556  __ Ret(USE_DELAY_SLOT);
2557  __ and_(v0, left, right);
2558  break;
2559  case Token::BIT_XOR:
2560  __ Ret(USE_DELAY_SLOT);
2561  __ xor_(v0, left, right);
2562  break;
2563  case Token::SAR:
2564  // Remove tags from right operand.
2565  __ GetLeastBitsFromSmi(scratch1, right, 5);
2566  __ srav(scratch1, left, scratch1);
2567  // Smi tag result.
2568  __ And(v0, scratch1, ~kSmiTagMask);
2569  __ Ret();
2570  break;
2571  case Token::SHR:
2572  // Remove tags from operands. We can't do this on a 31 bit number
2573  // because then the 0s get shifted into bit 30 instead of bit 31.
2574  __ SmiUntag(scratch1, left);
2575  __ GetLeastBitsFromSmi(scratch2, right, 5);
2576  __ srlv(v0, scratch1, scratch2);
2577  // Unsigned shift is not allowed to produce a negative number, so
2578  // check the sign bit and the sign bit after Smi tagging.
2579  __ And(scratch1, v0, Operand(0xc0000000));
2580  __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2581  // Smi tag result.
2582  __ SmiTag(v0);
2583  __ Ret();
2584  break;
2585  case Token::SHL:
2586  // Remove tags from operands.
2587  __ SmiUntag(scratch1, left);
2588  __ GetLeastBitsFromSmi(scratch2, right, 5);
2589  __ sllv(scratch1, scratch1, scratch2);
2590  // Check that the signed result fits in a Smi.
2591  __ Addu(scratch2, scratch1, Operand(0x40000000));
2592  __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2593  __ SmiTag(v0, scratch1);
2594  __ Ret();
2595  break;
2596  default:
2597  UNREACHABLE();
2598  }
2599  __ bind(&not_smi_result);
2600 }
2601 
2602 
2603 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2604  bool smi_operands,
2605  Label* not_numbers,
2606  Label* gc_required) {
2607  Register left = a1;
2608  Register right = a0;
2609  Register scratch1 = t3;
2610  Register scratch2 = t5;
2611  Register scratch3 = t0;
2612 
2613  ASSERT(smi_operands || (not_numbers != NULL));
2614  if (smi_operands) {
2615  __ AssertSmi(left);
2616  __ AssertSmi(right);
2617  }
2618 
2619  Register heap_number_map = t2;
2620  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2621 
2622  switch (op_) {
2623  case Token::ADD:
2624  case Token::SUB:
2625  case Token::MUL:
2626  case Token::DIV:
2627  case Token::MOD: {
2628  // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2629  // depending on whether FPU is available or not.
2630  FloatingPointHelper::Destination destination =
2632  op_ != Token::MOD ?
2635 
2636  // Allocate new heap number for result.
2637  Register result = s0;
2638  GenerateHeapResultAllocation(
2639  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2640 
2641  // Load the operands.
2642  if (smi_operands) {
2643  FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2644  } else {
2646  destination,
2647  heap_number_map,
2648  scratch1,
2649  scratch2,
2650  not_numbers);
2651  }
2652 
2653  // Calculate the result.
2654  if (destination == FloatingPointHelper::kFPURegisters) {
2655  // Using FPU registers:
2656  // f12: Left value.
2657  // f14: Right value.
2658  CpuFeatures::Scope scope(FPU);
2659  switch (op_) {
2660  case Token::ADD:
2661  __ add_d(f10, f12, f14);
2662  break;
2663  case Token::SUB:
2664  __ sub_d(f10, f12, f14);
2665  break;
2666  case Token::MUL:
2667  __ mul_d(f10, f12, f14);
2668  break;
2669  case Token::DIV:
2670  __ div_d(f10, f12, f14);
2671  break;
2672  default:
2673  UNREACHABLE();
2674  }
2675 
2676  // ARM uses a workaround here because of the unaligned HeapNumber
2677  // kValueOffset. On MIPS this workaround is built into sdc1 so
2678  // there's no point in generating even more instructions.
2680  __ Ret(USE_DELAY_SLOT);
2681  __ mov(v0, result);
2682  } else {
2683  // Call the C function to handle the double operation.
2685  op_,
2686  result,
2687  scratch1);
2688  if (FLAG_debug_code) {
2689  __ stop("Unreachable code.");
2690  }
2691  }
2692  break;
2693  }
2694  case Token::BIT_OR:
2695  case Token::BIT_XOR:
2696  case Token::BIT_AND:
2697  case Token::SAR:
2698  case Token::SHR:
2699  case Token::SHL: {
2700  if (smi_operands) {
2701  __ SmiUntag(a3, left);
2702  __ SmiUntag(a2, right);
2703  } else {
2704  // Convert operands to 32-bit integers. Right in a2 and left in a3.
2706  left,
2707  a3,
2708  heap_number_map,
2709  scratch1,
2710  scratch2,
2711  scratch3,
2712  f0,
2713  not_numbers);
2715  right,
2716  a2,
2717  heap_number_map,
2718  scratch1,
2719  scratch2,
2720  scratch3,
2721  f0,
2722  not_numbers);
2723  }
2724  Label result_not_a_smi;
2725  switch (op_) {
2726  case Token::BIT_OR:
2727  __ Or(a2, a3, Operand(a2));
2728  break;
2729  case Token::BIT_XOR:
2730  __ Xor(a2, a3, Operand(a2));
2731  break;
2732  case Token::BIT_AND:
2733  __ And(a2, a3, Operand(a2));
2734  break;
2735  case Token::SAR:
2736  // Use only the 5 least significant bits of the shift count.
2737  __ GetLeastBitsFromInt32(a2, a2, 5);
2738  __ srav(a2, a3, a2);
2739  break;
2740  case Token::SHR:
2741  // Use only the 5 least significant bits of the shift count.
2742  __ GetLeastBitsFromInt32(a2, a2, 5);
2743  __ srlv(a2, a3, a2);
2744  // SHR is special because it is required to produce a positive answer.
2745  // The code below for writing into heap numbers isn't capable of
2746  // writing the register as an unsigned int so we go to slow case if we
2747  // hit this case.
2749  __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2750  } else {
2751  __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2752  }
2753  break;
2754  case Token::SHL:
2755  // Use only the 5 least significant bits of the shift count.
2756  __ GetLeastBitsFromInt32(a2, a2, 5);
2757  __ sllv(a2, a3, a2);
2758  break;
2759  default:
2760  UNREACHABLE();
2761  }
2762  // Check that the *signed* result fits in a smi.
2763  __ Addu(a3, a2, Operand(0x40000000));
2764  __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2765  __ SmiTag(v0, a2);
2766  __ Ret();
2767 
2768  // Allocate new heap number for result.
2769  __ bind(&result_not_a_smi);
2770  Register result = t1;
2771  if (smi_operands) {
2772  __ AllocateHeapNumber(
2773  result, scratch1, scratch2, heap_number_map, gc_required);
2774  } else {
2775  GenerateHeapResultAllocation(
2776  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2777  }
2778 
2779  // a2: Answer as signed int32.
2780  // t1: Heap number to write answer into.
2781 
2782  // Nothing can go wrong now, so move the heap number to v0, which is the
2783  // result.
2784  __ mov(v0, t1);
2785 
2787  // Convert the int32 in a2 to the heap number in a0. As
2788  // mentioned above SHR needs to always produce a positive result.
2789  CpuFeatures::Scope scope(FPU);
2790  __ mtc1(a2, f0);
2791  if (op_ == Token::SHR) {
2792  __ Cvt_d_uw(f0, f0, f22);
2793  } else {
2794  __ cvt_d_w(f0, f0);
2795  }
2796  // ARM uses a workaround here because of the unaligned HeapNumber
2797  // kValueOffset. On MIPS this workaround is built into sdc1 so
2798  // there's no point in generating even more instructions.
2800  __ Ret();
2801  } else {
2802  // Tail call that writes the int32 in a2 to the heap number in v0, using
2803  // a3 and a0 as scratch. v0 is preserved and returned.
2804  WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2805  __ TailCallStub(&stub);
2806  }
2807  break;
2808  }
2809  default:
2810  UNREACHABLE();
2811  }
2812 }
2813 
2814 
2815 // Generate the smi code. If the operation on smis are successful this return is
2816 // generated. If the result is not a smi and heap number allocation is not
2817 // requested the code falls through. If number allocation is requested but a
2818 // heap number cannot be allocated the code jumps to the lable gc_required.
2819 void BinaryOpStub::GenerateSmiCode(
2820  MacroAssembler* masm,
2821  Label* use_runtime,
2822  Label* gc_required,
2823  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2824  Label not_smis;
2825 
2826  Register left = a1;
2827  Register right = a0;
2828  Register scratch1 = t3;
2829 
2830  // Perform combined smi check on both operands.
2831  __ Or(scratch1, left, Operand(right));
2832  STATIC_ASSERT(kSmiTag == 0);
2833  __ JumpIfNotSmi(scratch1, &not_smis);
2834 
2835  // If the smi-smi operation results in a smi return is generated.
2836  GenerateSmiSmiOperation(masm);
2837 
2838  // If heap number results are possible generate the result in an allocated
2839  // heap number.
2840  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2841  GenerateFPOperation(masm, true, use_runtime, gc_required);
2842  }
2843  __ bind(&not_smis);
2844 }
2845 
2846 
2847 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2848  Label not_smis, call_runtime;
2849 
2850  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2851  result_type_ == BinaryOpIC::SMI) {
2852  // Only allow smi results.
2853  GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2854  } else {
2855  // Allow heap number result and don't make a transition if a heap number
2856  // cannot be allocated.
2857  GenerateSmiCode(masm,
2858  &call_runtime,
2859  &call_runtime,
2860  ALLOW_HEAPNUMBER_RESULTS);
2861  }
2862 
2863  // Code falls through if the result is not returned as either a smi or heap
2864  // number.
2865  GenerateTypeTransition(masm);
2866 
2867  __ bind(&call_runtime);
2868  GenerateCallRuntime(masm);
2869 }
2870 
2871 
2872 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2873  ASSERT(operands_type_ == BinaryOpIC::STRING);
2874  // Try to add arguments as strings, otherwise, transition to the generic
2875  // BinaryOpIC type.
2876  GenerateAddStrings(masm);
2877  GenerateTypeTransition(masm);
2878 }
2879 
2880 
2881 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2882  Label call_runtime;
2883  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2884  ASSERT(op_ == Token::ADD);
2885  // If both arguments are strings, call the string add stub.
2886  // Otherwise, do a transition.
2887 
2888  // Registers containing left and right operands respectively.
2889  Register left = a1;
2890  Register right = a0;
2891 
2892  // Test if left operand is a string.
2893  __ JumpIfSmi(left, &call_runtime);
2894  __ GetObjectType(left, a2, a2);
2895  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2896 
2897  // Test if right operand is a string.
2898  __ JumpIfSmi(right, &call_runtime);
2899  __ GetObjectType(right, a2, a2);
2900  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2901 
2902  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2903  GenerateRegisterArgsPush(masm);
2904  __ TailCallStub(&string_add_stub);
2905 
2906  __ bind(&call_runtime);
2907  GenerateTypeTransition(masm);
2908 }
2909 
2910 
2911 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2912  ASSERT(operands_type_ == BinaryOpIC::INT32);
2913 
2914  Register left = a1;
2915  Register right = a0;
2916  Register scratch1 = t3;
2917  Register scratch2 = t5;
2918  FPURegister double_scratch = f0;
2919  FPURegister single_scratch = f6;
2920 
2921  Register heap_number_result = no_reg;
2922  Register heap_number_map = t2;
2923  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2924 
2925  Label call_runtime;
2926  // Labels for type transition, used for wrong input or output types.
2927  // Both label are currently actually bound to the same position. We use two
2928  // different label to differentiate the cause leading to type transition.
2929  Label transition;
2930 
2931  // Smi-smi fast case.
2932  Label skip;
2933  __ Or(scratch1, left, right);
2934  __ JumpIfNotSmi(scratch1, &skip);
2935  GenerateSmiSmiOperation(masm);
2936  // Fall through if the result is not a smi.
2937  __ bind(&skip);
2938 
2939  switch (op_) {
2940  case Token::ADD:
2941  case Token::SUB:
2942  case Token::MUL:
2943  case Token::DIV:
2944  case Token::MOD: {
2945  // Load both operands and check that they are 32-bit integer.
2946  // Jump to type transition if they are not. The registers a0 and a1 (right
2947  // and left) are preserved for the runtime call.
2948  FloatingPointHelper::Destination destination =
2949  (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2952 
2954  right,
2955  destination,
2956  f14,
2957  a2,
2958  a3,
2959  heap_number_map,
2960  scratch1,
2961  scratch2,
2962  f2,
2963  &transition);
2965  left,
2966  destination,
2967  f12,
2968  t0,
2969  t1,
2970  heap_number_map,
2971  scratch1,
2972  scratch2,
2973  f2,
2974  &transition);
2975 
2976  if (destination == FloatingPointHelper::kFPURegisters) {
2977  CpuFeatures::Scope scope(FPU);
2978  Label return_heap_number;
2979  switch (op_) {
2980  case Token::ADD:
2981  __ add_d(f10, f12, f14);
2982  break;
2983  case Token::SUB:
2984  __ sub_d(f10, f12, f14);
2985  break;
2986  case Token::MUL:
2987  __ mul_d(f10, f12, f14);
2988  break;
2989  case Token::DIV:
2990  __ div_d(f10, f12, f14);
2991  break;
2992  default:
2993  UNREACHABLE();
2994  }
2995 
2996  if (op_ != Token::DIV) {
2997  // These operations produce an integer result.
2998  // Try to return a smi if we can.
2999  // Otherwise return a heap number if allowed, or jump to type
3000  // transition.
3001 
3002  Register except_flag = scratch2;
3003  __ EmitFPUTruncate(kRoundToZero,
3004  single_scratch,
3005  f10,
3006  scratch1,
3007  except_flag);
3008 
3009  if (result_type_ <= BinaryOpIC::INT32) {
3010  // If except_flag != 0, result does not fit in a 32-bit integer.
3011  __ Branch(&transition, ne, except_flag, Operand(zero_reg));
3012  }
3013 
3014  // Check if the result fits in a smi.
3015  __ mfc1(scratch1, single_scratch);
3016  __ Addu(scratch2, scratch1, Operand(0x40000000));
3017  // If not try to return a heap number.
3018  __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
3019  // Check for minus zero. Return heap number for minus zero.
3020  Label not_zero;
3021  __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
3022  __ mfc1(scratch2, f11);
3023  __ And(scratch2, scratch2, HeapNumber::kSignMask);
3024  __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
3025  __ bind(&not_zero);
3026 
3027  // Tag the result and return.
3028  __ SmiTag(v0, scratch1);
3029  __ Ret();
3030  } else {
3031  // DIV just falls through to allocating a heap number.
3032  }
3033 
3034  __ bind(&return_heap_number);
3035  // Return a heap number, or fall through to type transition or runtime
3036  // call if we can't.
3037  if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
3038  : BinaryOpIC::INT32)) {
3039  // We are using FPU registers so s0 is available.
3040  heap_number_result = s0;
3041  GenerateHeapResultAllocation(masm,
3042  heap_number_result,
3043  heap_number_map,
3044  scratch1,
3045  scratch2,
3046  &call_runtime);
3047  __ mov(v0, heap_number_result);
3049  __ Ret();
3050  }
3051 
3052  // A DIV operation expecting an integer result falls through
3053  // to type transition.
3054 
3055  } else {
3056  // We preserved a0 and a1 to be able to call runtime.
3057  // Save the left value on the stack.
3058  __ Push(t1, t0);
3059 
3060  Label pop_and_call_runtime;
3061 
3062  // Allocate a heap number to store the result.
3063  heap_number_result = s0;
3064  GenerateHeapResultAllocation(masm,
3065  heap_number_result,
3066  heap_number_map,
3067  scratch1,
3068  scratch2,
3069  &pop_and_call_runtime);
3070 
3071  // Load the left value from the value saved on the stack.
3072  __ Pop(a1, a0);
3073 
3074  // Call the C function to handle the double operation.
3076  masm, op_, heap_number_result, scratch1);
3077  if (FLAG_debug_code) {
3078  __ stop("Unreachable code.");
3079  }
3080 
3081  __ bind(&pop_and_call_runtime);
3082  __ Drop(2);
3083  __ Branch(&call_runtime);
3084  }
3085 
3086  break;
3087  }
3088 
3089  case Token::BIT_OR:
3090  case Token::BIT_XOR:
3091  case Token::BIT_AND:
3092  case Token::SAR:
3093  case Token::SHR:
3094  case Token::SHL: {
3095  Label return_heap_number;
3096  Register scratch3 = t1;
3097  // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3098  // registers a0 and a1 (right and left) are preserved for the runtime
3099  // call.
3101  left,
3102  a3,
3103  heap_number_map,
3104  scratch1,
3105  scratch2,
3106  scratch3,
3107  f0,
3108  &transition);
3110  right,
3111  a2,
3112  heap_number_map,
3113  scratch1,
3114  scratch2,
3115  scratch3,
3116  f0,
3117  &transition);
3118 
3119  // The ECMA-262 standard specifies that, for shift operations, only the
3120  // 5 least significant bits of the shift value should be used.
3121  switch (op_) {
3122  case Token::BIT_OR:
3123  __ Or(a2, a3, Operand(a2));
3124  break;
3125  case Token::BIT_XOR:
3126  __ Xor(a2, a3, Operand(a2));
3127  break;
3128  case Token::BIT_AND:
3129  __ And(a2, a3, Operand(a2));
3130  break;
3131  case Token::SAR:
3132  __ And(a2, a2, Operand(0x1f));
3133  __ srav(a2, a3, a2);
3134  break;
3135  case Token::SHR:
3136  __ And(a2, a2, Operand(0x1f));
3137  __ srlv(a2, a3, a2);
3138  // SHR is special because it is required to produce a positive answer.
3139  // We only get a negative result if the shift value (a2) is 0.
3140  // This result cannot be respresented as a signed 32-bit integer, try
3141  // to return a heap number if we can.
3142  // The non FPU code does not support this special case, so jump to
3143  // runtime if we don't support it.
3145  __ Branch((result_type_ <= BinaryOpIC::INT32)
3146  ? &transition
3147  : &return_heap_number,
3148  lt,
3149  a2,
3150  Operand(zero_reg));
3151  } else {
3152  __ Branch((result_type_ <= BinaryOpIC::INT32)
3153  ? &transition
3154  : &call_runtime,
3155  lt,
3156  a2,
3157  Operand(zero_reg));
3158  }
3159  break;
3160  case Token::SHL:
3161  __ And(a2, a2, Operand(0x1f));
3162  __ sllv(a2, a3, a2);
3163  break;
3164  default:
3165  UNREACHABLE();
3166  }
3167 
3168  // Check if the result fits in a smi.
3169  __ Addu(scratch1, a2, Operand(0x40000000));
3170  // If not try to return a heap number. (We know the result is an int32.)
3171  __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3172  // Tag the result and return.
3173  __ SmiTag(v0, a2);
3174  __ Ret();
3175 
3176  __ bind(&return_heap_number);
3177  heap_number_result = t1;
3178  GenerateHeapResultAllocation(masm,
3179  heap_number_result,
3180  heap_number_map,
3181  scratch1,
3182  scratch2,
3183  &call_runtime);
3184 
3186  CpuFeatures::Scope scope(FPU);
3187 
3188  if (op_ != Token::SHR) {
3189  // Convert the result to a floating point value.
3190  __ mtc1(a2, double_scratch);
3191  __ cvt_d_w(double_scratch, double_scratch);
3192  } else {
3193  // The result must be interpreted as an unsigned 32-bit integer.
3194  __ mtc1(a2, double_scratch);
3195  __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3196  }
3197 
3198  // Store the result.
3199  __ mov(v0, heap_number_result);
3200  __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3201  __ Ret();
3202  } else {
3203  // Tail call that writes the int32 in a2 to the heap number in v0, using
3204  // a3 and a0 as scratch. v0 is preserved and returned.
3205  __ mov(v0, t1);
3206  WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3207  __ TailCallStub(&stub);
3208  }
3209 
3210  break;
3211  }
3212 
3213  default:
3214  UNREACHABLE();
3215  }
3216 
3217  // We never expect DIV to yield an integer result, so we always generate
3218  // type transition code for DIV operations expecting an integer result: the
3219  // code will fall through to this type transition.
3220  if (transition.is_linked() ||
3221  ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3222  __ bind(&transition);
3223  GenerateTypeTransition(masm);
3224  }
3225 
3226  __ bind(&call_runtime);
3227  GenerateCallRuntime(masm);
3228 }
3229 
3230 
3231 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3232  Label call_runtime;
3233 
3234  if (op_ == Token::ADD) {
3235  // Handle string addition here, because it is the only operation
3236  // that does not do a ToNumber conversion on the operands.
3237  GenerateAddStrings(masm);
3238  }
3239 
3240  // Convert oddball arguments to numbers.
3241  Label check, done;
3242  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3243  __ Branch(&check, ne, a1, Operand(t0));
3244  if (Token::IsBitOp(op_)) {
3245  __ li(a1, Operand(Smi::FromInt(0)));
3246  } else {
3247  __ LoadRoot(a1, Heap::kNanValueRootIndex);
3248  }
3249  __ jmp(&done);
3250  __ bind(&check);
3251  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3252  __ Branch(&done, ne, a0, Operand(t0));
3253  if (Token::IsBitOp(op_)) {
3254  __ li(a0, Operand(Smi::FromInt(0)));
3255  } else {
3256  __ LoadRoot(a0, Heap::kNanValueRootIndex);
3257  }
3258  __ bind(&done);
3259 
3260  GenerateHeapNumberStub(masm);
3261 }
3262 
3263 
3264 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3265  Label call_runtime;
3266  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3267 
3268  __ bind(&call_runtime);
3269  GenerateCallRuntime(masm);
3270 }
3271 
3272 
3273 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3274  Label call_runtime, call_string_add_or_runtime;
3275 
3276  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3277 
3278  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3279 
3280  __ bind(&call_string_add_or_runtime);
3281  if (op_ == Token::ADD) {
3282  GenerateAddStrings(masm);
3283  }
3284 
3285  __ bind(&call_runtime);
3286  GenerateCallRuntime(masm);
3287 }
3288 
3289 
3290 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3291  ASSERT(op_ == Token::ADD);
3292  Label left_not_string, call_runtime;
3293 
3294  Register left = a1;
3295  Register right = a0;
3296 
3297  // Check if left argument is a string.
3298  __ JumpIfSmi(left, &left_not_string);
3299  __ GetObjectType(left, a2, a2);
3300  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3301 
3302  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3303  GenerateRegisterArgsPush(masm);
3304  __ TailCallStub(&string_add_left_stub);
3305 
3306  // Left operand is not a string, test right.
3307  __ bind(&left_not_string);
3308  __ JumpIfSmi(right, &call_runtime);
3309  __ GetObjectType(right, a2, a2);
3310  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3311 
3312  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3313  GenerateRegisterArgsPush(masm);
3314  __ TailCallStub(&string_add_right_stub);
3315 
3316  // At least one argument is not a string.
3317  __ bind(&call_runtime);
3318 }
3319 
3320 
3321 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3322  GenerateRegisterArgsPush(masm);
3323  switch (op_) {
3324  case Token::ADD:
3325  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3326  break;
3327  case Token::SUB:
3328  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3329  break;
3330  case Token::MUL:
3331  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3332  break;
3333  case Token::DIV:
3334  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3335  break;
3336  case Token::MOD:
3337  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3338  break;
3339  case Token::BIT_OR:
3340  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3341  break;
3342  case Token::BIT_AND:
3343  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3344  break;
3345  case Token::BIT_XOR:
3346  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3347  break;
3348  case Token::SAR:
3349  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3350  break;
3351  case Token::SHR:
3352  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3353  break;
3354  case Token::SHL:
3355  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3356  break;
3357  default:
3358  UNREACHABLE();
3359  }
3360 }
3361 
3362 
3363 void BinaryOpStub::GenerateHeapResultAllocation(
3364  MacroAssembler* masm,
3365  Register result,
3366  Register heap_number_map,
3367  Register scratch1,
3368  Register scratch2,
3369  Label* gc_required) {
3370 
3371  // Code below will scratch result if allocation fails. To keep both arguments
3372  // intact for the runtime call result cannot be one of these.
3373  ASSERT(!result.is(a0) && !result.is(a1));
3374 
3375  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3376  Label skip_allocation, allocated;
3377  Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3378  // If the overwritable operand is already an object, we skip the
3379  // allocation of a heap number.
3380  __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3381  // Allocate a heap number for the result.
3382  __ AllocateHeapNumber(
3383  result, scratch1, scratch2, heap_number_map, gc_required);
3384  __ Branch(&allocated);
3385  __ bind(&skip_allocation);
3386  // Use object holding the overwritable operand for result.
3387  __ mov(result, overwritable_operand);
3388  __ bind(&allocated);
3389  } else {
3390  ASSERT(mode_ == NO_OVERWRITE);
3391  __ AllocateHeapNumber(
3392  result, scratch1, scratch2, heap_number_map, gc_required);
3393  }
3394 }
3395 
3396 
3397 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3398  __ Push(a1, a0);
3399 }
3400 
3401 
3402 
3403 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3404  // Untagged case: double input in f4, double result goes
3405  // into f4.
3406  // Tagged case: tagged input on top of stack and in a0,
3407  // tagged result (heap number) goes into v0.
3408 
3409  Label input_not_smi;
3410  Label loaded;
3411  Label calculate;
3412  Label invalid_cache;
3413  const Register scratch0 = t5;
3414  const Register scratch1 = t3;
3415  const Register cache_entry = a0;
3416  const bool tagged = (argument_type_ == TAGGED);
3417 
3419  CpuFeatures::Scope scope(FPU);
3420 
3421  if (tagged) {
3422  // Argument is a number and is on stack and in a0.
3423  // Load argument and check if it is a smi.
3424  __ JumpIfNotSmi(a0, &input_not_smi);
3425 
3426  // Input is a smi. Convert to double and load the low and high words
3427  // of the double into a2, a3.
3428  __ sra(t0, a0, kSmiTagSize);
3429  __ mtc1(t0, f4);
3430  __ cvt_d_w(f4, f4);
3431  __ Move(a2, a3, f4);
3432  __ Branch(&loaded);
3433 
3434  __ bind(&input_not_smi);
3435  // Check if input is a HeapNumber.
3436  __ CheckMap(a0,
3437  a1,
3438  Heap::kHeapNumberMapRootIndex,
3439  &calculate,
3441  // Input is a HeapNumber. Store the
3442  // low and high words into a2, a3.
3444  __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3445  } else {
3446  // Input is untagged double in f4. Output goes to f4.
3447  __ Move(a2, a3, f4);
3448  }
3449  __ bind(&loaded);
3450  // a2 = low 32 bits of double value.
3451  // a3 = high 32 bits of double value.
3452  // Compute hash (the shifts are arithmetic):
3453  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3454  __ Xor(a1, a2, a3);
3455  __ sra(t0, a1, 16);
3456  __ Xor(a1, a1, t0);
3457  __ sra(t0, a1, 8);
3458  __ Xor(a1, a1, t0);
3459  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3460  __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3461 
3462  // a2 = low 32 bits of double value.
3463  // a3 = high 32 bits of double value.
3464  // a1 = TranscendentalCache::hash(double value).
3465  __ li(cache_entry, Operand(
3466  ExternalReference::transcendental_cache_array_address(
3467  masm->isolate())));
3468  // a0 points to cache array.
3469  __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3470  Isolate::Current()->transcendental_cache()->caches_[0])));
3471  // a0 points to the cache for the type type_.
3472  // If NULL, the cache hasn't been initialized yet, so go through runtime.
3473  __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3474 
3475 #ifdef DEBUG
3476  // Check that the layout of cache elements match expectations.
3477  { TranscendentalCache::SubCache::Element test_elem[2];
3478  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3479  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3480  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3481  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3482  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3483  CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3484  CHECK_EQ(0, elem_in0 - elem_start);
3485  CHECK_EQ(kIntSize, elem_in1 - elem_start);
3486  CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3487  }
3488 #endif
3489 
3490  // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3491  __ sll(t0, a1, 1);
3492  __ Addu(a1, a1, t0);
3493  __ sll(t0, a1, 2);
3494  __ Addu(cache_entry, cache_entry, t0);
3495 
3496  // Check if cache matches: Double value is stored in uint32_t[2] array.
3497  __ lw(t0, MemOperand(cache_entry, 0));
3498  __ lw(t1, MemOperand(cache_entry, 4));
3499  __ lw(t2, MemOperand(cache_entry, 8));
3500  __ Branch(&calculate, ne, a2, Operand(t0));
3501  __ Branch(&calculate, ne, a3, Operand(t1));
3502  // Cache hit. Load result, cleanup and return.
3503  Counters* counters = masm->isolate()->counters();
3504  __ IncrementCounter(
3505  counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3506  if (tagged) {
3507  // Pop input value from stack and load result into v0.
3508  __ Drop(1);
3509  __ mov(v0, t2);
3510  } else {
3511  // Load result into f4.
3513  }
3514  __ Ret();
3515  } // if (CpuFeatures::IsSupported(FPU))
3516 
3517  __ bind(&calculate);
3518  Counters* counters = masm->isolate()->counters();
3519  __ IncrementCounter(
3520  counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3521  if (tagged) {
3522  __ bind(&invalid_cache);
3523  __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3524  masm->isolate()),
3525  1,
3526  1);
3527  } else {
3529  CpuFeatures::Scope scope(FPU);
3530 
3531  Label no_update;
3532  Label skip_cache;
3533 
3534  // Call C function to calculate the result and update the cache.
3535  // a0: precalculated cache entry address.
3536  // a2 and a3: parts of the double value.
3537  // Store a0, a2 and a3 on stack for later before calling C function.
3538  __ Push(a3, a2, cache_entry);
3539  GenerateCallCFunction(masm, scratch0);
3540  __ GetCFunctionDoubleResult(f4);
3541 
3542  // Try to update the cache. If we cannot allocate a
3543  // heap number, we return the result without updating.
3544  __ Pop(a3, a2, cache_entry);
3545  __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3546  __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3548 
3549  __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3550  __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3551  __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3552 
3553  __ Ret(USE_DELAY_SLOT);
3554  __ mov(v0, cache_entry);
3555 
3556  __ bind(&invalid_cache);
3557  // The cache is invalid. Call runtime which will recreate the
3558  // cache.
3559  __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3560  __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3562  {
3563  FrameScope scope(masm, StackFrame::INTERNAL);
3564  __ push(a0);
3565  __ CallRuntime(RuntimeFunction(), 1);
3566  }
3568  __ Ret();
3569 
3570  __ bind(&skip_cache);
3571  // Call C function to calculate the result and answer directly
3572  // without updating the cache.
3573  GenerateCallCFunction(masm, scratch0);
3574  __ GetCFunctionDoubleResult(f4);
3575  __ bind(&no_update);
3576 
3577  // We return the value in f4 without adding it to the cache, but
3578  // we cause a scavenging GC so that future allocations will succeed.
3579  {
3580  FrameScope scope(masm, StackFrame::INTERNAL);
3581 
3582  // Allocate an aligned object larger than a HeapNumber.
3583  ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3584  __ li(scratch0, Operand(4 * kPointerSize));
3585  __ push(scratch0);
3586  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3587  }
3588  __ Ret();
3589  }
3590 }
3591 
3592 
3593 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3594  Register scratch) {
3595  __ push(ra);
3596  __ PrepareCallCFunction(2, scratch);
3597  if (IsMipsSoftFloatABI) {
3598  __ Move(a0, a1, f4);
3599  } else {
3600  __ mov_d(f12, f4);
3601  }
3602  AllowExternalCallThatCantCauseGC scope(masm);
3603  Isolate* isolate = masm->isolate();
3604  switch (type_) {
3606  __ CallCFunction(
3607  ExternalReference::math_sin_double_function(isolate),
3608  0, 1);
3609  break;
3611  __ CallCFunction(
3612  ExternalReference::math_cos_double_function(isolate),
3613  0, 1);
3614  break;
3616  __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3617  0, 1);
3618  break;
3620  __ CallCFunction(
3621  ExternalReference::math_log_double_function(isolate),
3622  0, 1);
3623  break;
3624  default:
3625  UNIMPLEMENTED();
3626  break;
3627  }
3628  __ pop(ra);
3629 }
3630 
3631 
3632 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3633  switch (type_) {
3634  // Add more cases when necessary.
3635  case TranscendentalCache::SIN: return Runtime::kMath_sin;
3636  case TranscendentalCache::COS: return Runtime::kMath_cos;
3637  case TranscendentalCache::TAN: return Runtime::kMath_tan;
3638  case TranscendentalCache::LOG: return Runtime::kMath_log;
3639  default:
3640  UNIMPLEMENTED();
3641  return Runtime::kAbort;
3642  }
3643 }
3644 
3645 
3646 void StackCheckStub::Generate(MacroAssembler* masm) {
3647  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3648 }
3649 
3650 
3651 void InterruptStub::Generate(MacroAssembler* masm) {
3652  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3653 }
3654 
3655 
3656 void MathPowStub::Generate(MacroAssembler* masm) {
3657  CpuFeatures::Scope fpu_scope(FPU);
3658  const Register base = a1;
3659  const Register exponent = a2;
3660  const Register heapnumbermap = t1;
3661  const Register heapnumber = v0;
3662  const DoubleRegister double_base = f2;
3663  const DoubleRegister double_exponent = f4;
3664  const DoubleRegister double_result = f0;
3665  const DoubleRegister double_scratch = f6;
3666  const FPURegister single_scratch = f8;
3667  const Register scratch = t5;
3668  const Register scratch2 = t3;
3669 
3670  Label call_runtime, done, int_exponent;
3671  if (exponent_type_ == ON_STACK) {
3672  Label base_is_smi, unpack_exponent;
3673  // The exponent and base are supplied as arguments on the stack.
3674  // This can only happen if the stub is called from non-optimized code.
3675  // Load input parameters from stack to double registers.
3676  __ lw(base, MemOperand(sp, 1 * kPointerSize));
3677  __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3678 
3679  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3680 
3681  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3682  __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3683  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3684 
3685  __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3686  __ jmp(&unpack_exponent);
3687 
3688  __ bind(&base_is_smi);
3689  __ mtc1(scratch, single_scratch);
3690  __ cvt_d_w(double_base, single_scratch);
3691  __ bind(&unpack_exponent);
3692 
3693  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3694 
3695  __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3696  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3697  __ ldc1(double_exponent,
3699  } else if (exponent_type_ == TAGGED) {
3700  // Base is already in double_base.
3701  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3702 
3703  __ ldc1(double_exponent,
3705  }
3706 
3707  if (exponent_type_ != INTEGER) {
3708  Label int_exponent_convert;
3709  // Detect integer exponents stored as double.
3710  __ EmitFPUTruncate(kRoundToMinusInf,
3711  single_scratch,
3712  double_exponent,
3713  scratch,
3714  scratch2,
3716  // scratch2 == 0 means there was no conversion error.
3717  __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
3718 
3719  if (exponent_type_ == ON_STACK) {
3720  // Detect square root case. Crankshaft detects constant +/-0.5 at
3721  // compile time and uses DoMathPowHalf instead. We then skip this check
3722  // for non-constant cases of +/-0.5 as these hardly occur.
3723  Label not_plus_half;
3724 
3725  // Test for 0.5.
3726  __ Move(double_scratch, 0.5);
3727  __ BranchF(USE_DELAY_SLOT,
3728  &not_plus_half,
3729  NULL,
3730  ne,
3731  double_exponent,
3732  double_scratch);
3733  // double_scratch can be overwritten in the delay slot.
3734  // Calculates square root of base. Check for the special case of
3735  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3736  __ Move(double_scratch, -V8_INFINITY);
3737  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3738  __ neg_d(double_result, double_scratch);
3739 
3740  // Add +0 to convert -0 to +0.
3741  __ add_d(double_scratch, double_base, kDoubleRegZero);
3742  __ sqrt_d(double_result, double_scratch);
3743  __ jmp(&done);
3744 
3745  __ bind(&not_plus_half);
3746  __ Move(double_scratch, -0.5);
3747  __ BranchF(USE_DELAY_SLOT,
3748  &call_runtime,
3749  NULL,
3750  ne,
3751  double_exponent,
3752  double_scratch);
3753  // double_scratch can be overwritten in the delay slot.
3754  // Calculates square root of base. Check for the special case of
3755  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3756  __ Move(double_scratch, -V8_INFINITY);
3757  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3758  __ Move(double_result, kDoubleRegZero);
3759 
3760  // Add +0 to convert -0 to +0.
3761  __ add_d(double_scratch, double_base, kDoubleRegZero);
3762  __ Move(double_result, 1);
3763  __ sqrt_d(double_scratch, double_scratch);
3764  __ div_d(double_result, double_result, double_scratch);
3765  __ jmp(&done);
3766  }
3767 
3768  __ push(ra);
3769  {
3770  AllowExternalCallThatCantCauseGC scope(masm);
3771  __ PrepareCallCFunction(0, 2, scratch);
3772  __ SetCallCDoubleArguments(double_base, double_exponent);
3773  __ CallCFunction(
3774  ExternalReference::power_double_double_function(masm->isolate()),
3775  0, 2);
3776  }
3777  __ pop(ra);
3778  __ GetCFunctionDoubleResult(double_result);
3779  __ jmp(&done);
3780 
3781  __ bind(&int_exponent_convert);
3782  __ mfc1(scratch, single_scratch);
3783  }
3784 
3785  // Calculate power with integer exponent.
3786  __ bind(&int_exponent);
3787 
3788  // Get two copies of exponent in the registers scratch and exponent.
3789  if (exponent_type_ == INTEGER) {
3790  __ mov(scratch, exponent);
3791  } else {
3792  // Exponent has previously been stored into scratch as untagged integer.
3793  __ mov(exponent, scratch);
3794  }
3795 
3796  __ mov_d(double_scratch, double_base); // Back up base.
3797  __ Move(double_result, 1.0);
3798 
3799  // Get absolute value of exponent.
3800  Label positive_exponent;
3801  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
3802  __ Subu(scratch, zero_reg, scratch);
3803  __ bind(&positive_exponent);
3804 
3805  Label while_true, no_carry, loop_end;
3806  __ bind(&while_true);
3807 
3808  __ And(scratch2, scratch, 1);
3809 
3810  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
3811  __ mul_d(double_result, double_result, double_scratch);
3812  __ bind(&no_carry);
3813 
3814  __ sra(scratch, scratch, 1);
3815 
3816  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
3817  __ mul_d(double_scratch, double_scratch, double_scratch);
3818 
3819  __ Branch(&while_true);
3820 
3821  __ bind(&loop_end);
3822 
3823  __ Branch(&done, ge, exponent, Operand(zero_reg));
3824  __ Move(double_scratch, 1.0);
3825  __ div_d(double_result, double_scratch, double_result);
3826  // Test whether result is zero. Bail out to check for subnormal result.
3827  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3828  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
3829 
3830  // double_exponent may not contain the exponent value if the input was a
3831  // smi. We set it with exponent value before bailing out.
3832  __ mtc1(exponent, single_scratch);
3833  __ cvt_d_w(double_exponent, single_scratch);
3834 
3835  // Returning or bailing out.
3836  Counters* counters = masm->isolate()->counters();
3837  if (exponent_type_ == ON_STACK) {
3838  // The arguments are still on the stack.
3839  __ bind(&call_runtime);
3840  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3841 
3842  // The stub is called from non-optimized code, which expects the result
3843  // as heap number in exponent.
3844  __ bind(&done);
3845  __ AllocateHeapNumber(
3846  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3847  __ sdc1(double_result,
3849  ASSERT(heapnumber.is(v0));
3850  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3851  __ DropAndRet(2);
3852  } else {
3853  __ push(ra);
3854  {
3855  AllowExternalCallThatCantCauseGC scope(masm);
3856  __ PrepareCallCFunction(0, 2, scratch);
3857  __ SetCallCDoubleArguments(double_base, double_exponent);
3858  __ CallCFunction(
3859  ExternalReference::power_double_double_function(masm->isolate()),
3860  0, 2);
3861  }
3862  __ pop(ra);
3863  __ GetCFunctionDoubleResult(double_result);
3864 
3865  __ bind(&done);
3866  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3867  __ Ret();
3868  }
3869 }
3870 
3871 
3872 bool CEntryStub::NeedsImmovableCode() {
3873  return true;
3874 }
3875 
3876 
3878  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3879  result_size_ == 1;
3880 }
3881 
3882 
3883 void CodeStub::GenerateStubsAheadOfTime() {
3888 }
3889 
3890 
3891 void CodeStub::GenerateFPStubs() {
3892  CEntryStub save_doubles(1, kSaveFPRegs);
3893  Handle<Code> code = save_doubles.GetCode();
3894  code->set_is_pregenerated(true);
3895  StoreBufferOverflowStub stub(kSaveFPRegs);
3896  stub.GetCode()->set_is_pregenerated(true);
3897  code->GetIsolate()->set_fp_stubs_generated(true);
3898 }
3899 
3900 
3902  CEntryStub stub(1, kDontSaveFPRegs);
3903  Handle<Code> code = stub.GetCode();
3904  code->set_is_pregenerated(true);
3905 }
3906 
3907 
3908 void CEntryStub::GenerateCore(MacroAssembler* masm,
3909  Label* throw_normal_exception,
3910  Label* throw_termination_exception,
3911  Label* throw_out_of_memory_exception,
3912  bool do_gc,
3913  bool always_allocate) {
3914  // v0: result parameter for PerformGC, if any
3915  // s0: number of arguments including receiver (C callee-saved)
3916  // s1: pointer to the first argument (C callee-saved)
3917  // s2: pointer to builtin function (C callee-saved)
3918 
3919  Isolate* isolate = masm->isolate();
3920 
3921  if (do_gc) {
3922  // Move result passed in v0 into a0 to call PerformGC.
3923  __ mov(a0, v0);
3924  __ PrepareCallCFunction(1, 0, a1);
3925  __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3926  }
3927 
3928  ExternalReference scope_depth =
3929  ExternalReference::heap_always_allocate_scope_depth(isolate);
3930  if (always_allocate) {
3931  __ li(a0, Operand(scope_depth));
3932  __ lw(a1, MemOperand(a0));
3933  __ Addu(a1, a1, Operand(1));
3934  __ sw(a1, MemOperand(a0));
3935  }
3936 
3937  // Prepare arguments for C routine.
3938  // a0 = argc
3939  __ mov(a0, s0);
3940  // a1 = argv (set in the delay slot after find_ra below).
3941 
3942  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3943  // also need to reserve the 4 argument slots on the stack.
3944 
3945  __ AssertStackIsAligned();
3946 
3947  __ li(a2, Operand(ExternalReference::isolate_address()));
3948 
3949  // To let the GC traverse the return address of the exit frames, we need to
3950  // know where the return address is. The CEntryStub is unmovable, so
3951  // we can store the address on the stack to be able to find it again and
3952  // we never have to restore it, because it will not change.
3953  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3954  // This branch-and-link sequence is needed to find the current PC on mips,
3955  // saved to the ra register.
3956  // Use masm-> here instead of the double-underscore macro since extra
3957  // coverage code can interfere with the proper calculation of ra.
3958  Label find_ra;
3959  masm->bal(&find_ra); // bal exposes branch delay slot.
3960  masm->mov(a1, s1);
3961  masm->bind(&find_ra);
3962 
3963  // Adjust the value in ra to point to the correct return location, 2nd
3964  // instruction past the real call into C code (the jalr(t9)), and push it.
3965  // This is the return address of the exit frame.
3966  const int kNumInstructionsToJump = 5;
3967  masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3968  masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3969  // Stack space reservation moved to the branch delay slot below.
3970  // Stack is still aligned.
3971 
3972  // Call the C routine.
3973  masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3974  masm->jalr(t9);
3975  // Set up sp in the delay slot.
3976  masm->addiu(sp, sp, -kCArgsSlotsSize);
3977  // Make sure the stored 'ra' points to this position.
3978  ASSERT_EQ(kNumInstructionsToJump,
3979  masm->InstructionsGeneratedSince(&find_ra));
3980  }
3981 
3982  if (always_allocate) {
3983  // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3984  __ li(a2, Operand(scope_depth));
3985  __ lw(a3, MemOperand(a2));
3986  __ Subu(a3, a3, Operand(1));
3987  __ sw(a3, MemOperand(a2));
3988  }
3989 
3990  // Check for failure result.
3991  Label failure_returned;
3992  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3993  __ addiu(a2, v0, 1);
3994  __ andi(t0, a2, kFailureTagMask);
3995  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
3996  // Restore stack (remove arg slots) in branch delay slot.
3997  __ addiu(sp, sp, kCArgsSlotsSize);
3998 
3999 
4000  // Exit C frame and return.
4001  // v0:v1: result
4002  // sp: stack pointer
4003  // fp: frame pointer
4004  __ LeaveExitFrame(save_doubles_, s0, true);
4005 
4006  // Check if we should retry or throw exception.
4007  Label retry;
4008  __ bind(&failure_returned);
4010  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
4011  __ Branch(&retry, eq, t0, Operand(zero_reg));
4012 
4013  // Special handling of out of memory exceptions.
4014  Failure* out_of_memory = Failure::OutOfMemoryException();
4015  __ Branch(USE_DELAY_SLOT,
4016  throw_out_of_memory_exception,
4017  eq,
4018  v0,
4019  Operand(reinterpret_cast<int32_t>(out_of_memory)));
4020  // If we throw the OOM exception, the value of a3 doesn't matter.
4021  // Any instruction can be in the delay slot that's not a jump.
4022 
4023  // Retrieve the pending exception and clear the variable.
4024  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
4025  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4026  isolate)));
4027  __ lw(v0, MemOperand(t0));
4028  __ sw(a3, MemOperand(t0));
4029 
4030  // Special handling of termination exceptions which are uncatchable
4031  // by javascript code.
4032  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
4033  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
4034 
4035  // Handle normal exception.
4036  __ jmp(throw_normal_exception);
4037 
4038  __ bind(&retry);
4039  // Last failure (v0) will be moved to (a0) for parameter when retrying.
4040 }
4041 
4042 
4043 void CEntryStub::Generate(MacroAssembler* masm) {
4044  // Called from JavaScript; parameters are on stack as if calling JS function
4045  // s0: number of arguments including receiver
4046  // s1: size of arguments excluding receiver
4047  // s2: pointer to builtin function
4048  // fp: frame pointer (restored after C call)
4049  // sp: stack pointer (restored as callee's sp after C call)
4050  // cp: current context (C callee-saved)
4051 
4052  // NOTE: Invocations of builtins may return failure objects
4053  // instead of a proper result. The builtin entry handles
4054  // this by performing a garbage collection and retrying the
4055  // builtin once.
4056 
4057  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
4058  // The reason for this is that these arguments would need to be saved anyway
4059  // so it's faster to set them up directly.
4060  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
4061 
4062  // Compute the argv pointer in a callee-saved register.
4063  __ Addu(s1, sp, s1);
4064 
4065  // Enter the exit frame that transitions from JavaScript to C++.
4066  FrameScope scope(masm, StackFrame::MANUAL);
4067  __ EnterExitFrame(save_doubles_);
4068 
4069  // s0: number of arguments (C callee-saved)
4070  // s1: pointer to first argument (C callee-saved)
4071  // s2: pointer to builtin function (C callee-saved)
4072 
4073  Label throw_normal_exception;
4074  Label throw_termination_exception;
4075  Label throw_out_of_memory_exception;
4076 
4077  // Call into the runtime system.
4078  GenerateCore(masm,
4079  &throw_normal_exception,
4080  &throw_termination_exception,
4081  &throw_out_of_memory_exception,
4082  false,
4083  false);
4084 
4085  // Do space-specific GC and retry runtime call.
4086  GenerateCore(masm,
4087  &throw_normal_exception,
4088  &throw_termination_exception,
4089  &throw_out_of_memory_exception,
4090  true,
4091  false);
4092 
4093  // Do full GC and retry runtime call one final time.
4094  Failure* failure = Failure::InternalError();
4095  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4096  GenerateCore(masm,
4097  &throw_normal_exception,
4098  &throw_termination_exception,
4099  &throw_out_of_memory_exception,
4100  true,
4101  true);
4102 
4103  __ bind(&throw_out_of_memory_exception);
4104  // Set external caught exception to false.
4105  Isolate* isolate = masm->isolate();
4106  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4107  isolate);
4108  __ li(a0, Operand(false, RelocInfo::NONE));
4109  __ li(a2, Operand(external_caught));
4110  __ sw(a0, MemOperand(a2));
4111 
4112  // Set pending exception and v0 to out of memory exception.
4113  Failure* out_of_memory = Failure::OutOfMemoryException();
4114  __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4115  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4116  isolate)));
4117  __ sw(v0, MemOperand(a2));
4118  // Fall through to the next label.
4119 
4120  __ bind(&throw_termination_exception);
4121  __ ThrowUncatchable(v0);
4122 
4123  __ bind(&throw_normal_exception);
4124  __ Throw(v0);
4125 }
4126 
4127 
4128 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4129  Label invoke, handler_entry, exit;
4130  Isolate* isolate = masm->isolate();
4131 
4132  // Registers:
4133  // a0: entry address
4134  // a1: function
4135  // a2: receiver
4136  // a3: argc
4137  //
4138  // Stack:
4139  // 4 args slots
4140  // args
4141 
4142  // Save callee saved registers on the stack.
4143  __ MultiPush(kCalleeSaved | ra.bit());
4144 
4146  CpuFeatures::Scope scope(FPU);
4147  // Save callee-saved FPU registers.
4148  __ MultiPushFPU(kCalleeSavedFPU);
4149  // Set up the reserved register for 0.0.
4150  __ Move(kDoubleRegZero, 0.0);
4151  }
4152 
4153 
4154  // Load argv in s0 register.
4155  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4157  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
4158  }
4159 
4160  __ InitializeRootRegister();
4161  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
4162 
4163  // We build an EntryFrame.
4164  __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
4165  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4166  __ li(t2, Operand(Smi::FromInt(marker)));
4167  __ li(t1, Operand(Smi::FromInt(marker)));
4168  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4169  isolate)));
4170  __ lw(t0, MemOperand(t0));
4171  __ Push(t3, t2, t1, t0);
4172  // Set up frame pointer for the frame to be pushed.
4174 
4175  // Registers:
4176  // a0: entry_address
4177  // a1: function
4178  // a2: receiver_pointer
4179  // a3: argc
4180  // s0: argv
4181  //
4182  // Stack:
4183  // caller fp |
4184  // function slot | entry frame
4185  // context slot |
4186  // bad fp (0xff...f) |
4187  // callee saved registers + ra
4188  // 4 args slots
4189  // args
4190 
4191  // If this is the outermost JS call, set js_entry_sp value.
4192  Label non_outermost_js;
4193  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4194  __ li(t1, Operand(ExternalReference(js_entry_sp)));
4195  __ lw(t2, MemOperand(t1));
4196  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4197  __ sw(fp, MemOperand(t1));
4198  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4199  Label cont;
4200  __ b(&cont);
4201  __ nop(); // Branch delay slot nop.
4202  __ bind(&non_outermost_js);
4203  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4204  __ bind(&cont);
4205  __ push(t0);
4206 
4207  // Jump to a faked try block that does the invoke, with a faked catch
4208  // block that sets the pending exception.
4209  __ jmp(&invoke);
4210  __ bind(&handler_entry);
4211  handler_offset_ = handler_entry.pos();
4212  // Caught exception: Store result (exception) in the pending exception
4213  // field in the JSEnv and return a failure sentinel. Coming in here the
4214  // fp will be invalid because the PushTryHandler below sets it to 0 to
4215  // signal the existence of the JSEntry frame.
4216  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4217  isolate)));
4218  __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
4219  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4220  __ b(&exit); // b exposes branch delay slot.
4221  __ nop(); // Branch delay slot nop.
4222 
4223  // Invoke: Link this frame into the handler chain. There's only one
4224  // handler block in this code object, so its index is 0.
4225  __ bind(&invoke);
4226  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4227  // If an exception not caught by another handler occurs, this handler
4228  // returns control to the code after the bal(&invoke) above, which
4229  // restores all kCalleeSaved registers (including cp and fp) to their
4230  // saved values before returning a failure to C.
4231 
4232  // Clear any pending exceptions.
4233  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4234  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4235  isolate)));
4236  __ sw(t1, MemOperand(t0));
4237 
4238  // Invoke the function by calling through JS entry trampoline builtin.
4239  // Notice that we cannot store a reference to the trampoline code directly in
4240  // this stub, because runtime stubs are not traversed when doing GC.
4241 
4242  // Registers:
4243  // a0: entry_address
4244  // a1: function
4245  // a2: receiver_pointer
4246  // a3: argc
4247  // s0: argv
4248  //
4249  // Stack:
4250  // handler frame
4251  // entry frame
4252  // callee saved registers + ra
4253  // 4 args slots
4254  // args
4255 
4256  if (is_construct) {
4257  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4258  isolate);
4259  __ li(t0, Operand(construct_entry));
4260  } else {
4261  ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4262  __ li(t0, Operand(entry));
4263  }
4264  __ lw(t9, MemOperand(t0)); // Deref address.
4265 
4266  // Call JSEntryTrampoline.
4267  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4268  __ Call(t9);
4269 
4270  // Unlink this frame from the handler chain.
4271  __ PopTryHandler();
4272 
4273  __ bind(&exit); // v0 holds result
4274  // Check if the current stack frame is marked as the outermost JS frame.
4275  Label non_outermost_js_2;
4276  __ pop(t1);
4277  __ Branch(&non_outermost_js_2,
4278  ne,
4279  t1,
4280  Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4281  __ li(t1, Operand(ExternalReference(js_entry_sp)));
4282  __ sw(zero_reg, MemOperand(t1));
4283  __ bind(&non_outermost_js_2);
4284 
4285  // Restore the top frame descriptors from the stack.
4286  __ pop(t1);
4287  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4288  isolate)));
4289  __ sw(t1, MemOperand(t0));
4290 
4291  // Reset the stack to the callee saved registers.
4293 
4295  CpuFeatures::Scope scope(FPU);
4296  // Restore callee-saved fpu registers.
4297  __ MultiPopFPU(kCalleeSavedFPU);
4298  }
4299 
4300  // Restore callee saved registers from the stack.
4301  __ MultiPop(kCalleeSaved | ra.bit());
4302  // Return.
4303  __ Jump(ra);
4304 }
4305 
4306 
4307 // Uses registers a0 to t0.
4308 // Expected input (depending on whether args are in registers or on the stack):
4309 // * object: a0 or at sp + 1 * kPointerSize.
4310 // * function: a1 or at sp.
4311 //
4312 // An inlined call site may have been generated before calling this stub.
4313 // In this case the offset to the inline site to patch is passed on the stack,
4314 // in the safepoint slot for register t0.
4315 void InstanceofStub::Generate(MacroAssembler* masm) {
4316  // Call site inlining and patching implies arguments in registers.
4317  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4318  // ReturnTrueFalse is only implemented for inlined call sites.
4319  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4320 
4321  // Fixed register usage throughout the stub:
4322  const Register object = a0; // Object (lhs).
4323  Register map = a3; // Map of the object.
4324  const Register function = a1; // Function (rhs).
4325  const Register prototype = t0; // Prototype of the function.
4326  const Register inline_site = t5;
4327  const Register scratch = a2;
4328 
4329  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4330 
4331  Label slow, loop, is_instance, is_not_instance, not_js_object;
4332 
4333  if (!HasArgsInRegisters()) {
4334  __ lw(object, MemOperand(sp, 1 * kPointerSize));
4335  __ lw(function, MemOperand(sp, 0));
4336  }
4337 
4338  // Check that the left hand is a JS object and load map.
4339  __ JumpIfSmi(object, &not_js_object);
4340  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4341 
4342  // If there is a call site cache don't look in the global cache, but do the
4343  // real lookup and update the call site cache.
4344  if (!HasCallSiteInlineCheck()) {
4345  Label miss;
4346  __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4347  __ Branch(&miss, ne, function, Operand(at));
4348  __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4349  __ Branch(&miss, ne, map, Operand(at));
4350  __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4351  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4352 
4353  __ bind(&miss);
4354  }
4355 
4356  // Get the prototype of the function.
4357  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4358 
4359  // Check that the function prototype is a JS object.
4360  __ JumpIfSmi(prototype, &slow);
4361  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4362 
4363  // Update the global instanceof or call site inlined cache with the current
4364  // map and function. The cached answer will be set when it is known below.
4365  if (!HasCallSiteInlineCheck()) {
4366  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4367  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4368  } else {
4369  ASSERT(HasArgsInRegisters());
4370  // Patch the (relocated) inlined map check.
4371 
4372  // The offset was stored in t0 safepoint slot.
4373  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4374  __ LoadFromSafepointRegisterSlot(scratch, t0);
4375  __ Subu(inline_site, ra, scratch);
4376  // Get the map location in scratch and patch it.
4377  __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
4379  }
4380 
4381  // Register mapping: a3 is object map and t0 is function prototype.
4382  // Get prototype of object into a2.
4383  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4384 
4385  // We don't need map any more. Use it as a scratch register.
4386  Register scratch2 = map;
4387  map = no_reg;
4388 
4389  // Loop through the prototype chain looking for the function prototype.
4390  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4391  __ bind(&loop);
4392  __ Branch(&is_instance, eq, scratch, Operand(prototype));
4393  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4394  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4395  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4396  __ Branch(&loop);
4397 
4398  __ bind(&is_instance);
4399  ASSERT(Smi::FromInt(0) == 0);
4400  if (!HasCallSiteInlineCheck()) {
4401  __ mov(v0, zero_reg);
4402  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4403  } else {
4404  // Patch the call site to return true.
4405  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4406  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4407  // Get the boolean result location in scratch and patch it.
4408  __ PatchRelocatedValue(inline_site, scratch, v0);
4409 
4410  if (!ReturnTrueFalseObject()) {
4411  ASSERT_EQ(Smi::FromInt(0), 0);
4412  __ mov(v0, zero_reg);
4413  }
4414  }
4415  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4416 
4417  __ bind(&is_not_instance);
4418  if (!HasCallSiteInlineCheck()) {
4419  __ li(v0, Operand(Smi::FromInt(1)));
4420  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4421  } else {
4422  // Patch the call site to return false.
4423  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4424  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4425  // Get the boolean result location in scratch and patch it.
4426  __ PatchRelocatedValue(inline_site, scratch, v0);
4427 
4428  if (!ReturnTrueFalseObject()) {
4429  __ li(v0, Operand(Smi::FromInt(1)));
4430  }
4431  }
4432 
4433  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4434 
4435  Label object_not_null, object_not_null_or_smi;
4436  __ bind(&not_js_object);
4437  // Before null, smi and string value checks, check that the rhs is a function
4438  // as for a non-function rhs an exception needs to be thrown.
4439  __ JumpIfSmi(function, &slow);
4440  __ GetObjectType(function, scratch2, scratch);
4441  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4442 
4443  // Null is not instance of anything.
4444  __ Branch(&object_not_null,
4445  ne,
4446  scratch,
4447  Operand(masm->isolate()->factory()->null_value()));
4448  __ li(v0, Operand(Smi::FromInt(1)));
4449  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4450 
4451  __ bind(&object_not_null);
4452  // Smi values are not instances of anything.
4453  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4454  __ li(v0, Operand(Smi::FromInt(1)));
4455  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4456 
4457  __ bind(&object_not_null_or_smi);
4458  // String values are not instances of anything.
4459  __ IsObjectJSStringType(object, scratch, &slow);
4460  __ li(v0, Operand(Smi::FromInt(1)));
4461  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4462 
4463  // Slow-case. Tail call builtin.
4464  __ bind(&slow);
4465  if (!ReturnTrueFalseObject()) {
4466  if (HasArgsInRegisters()) {
4467  __ Push(a0, a1);
4468  }
4469  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4470  } else {
4471  {
4472  FrameScope scope(masm, StackFrame::INTERNAL);
4473  __ Push(a0, a1);
4474  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4475  }
4476  __ mov(a0, v0);
4477  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4478  __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4479  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4480  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4481  }
4482 }
4483 
4484 
4485 Register InstanceofStub::left() { return a0; }
4486 
4487 
4488 Register InstanceofStub::right() { return a1; }
4489 
4490 
4491 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4492  // The displacement is the offset of the last parameter (if any)
4493  // relative to the frame pointer.
4494  const int kDisplacement =
4496 
4497  // Check that the key is a smiGenerateReadElement.
4498  Label slow;
4499  __ JumpIfNotSmi(a1, &slow);
4500 
4501  // Check if the calling frame is an arguments adaptor frame.
4502  Label adaptor;
4505  __ Branch(&adaptor,
4506  eq,
4507  a3,
4509 
4510  // Check index (a1) against formal parameters count limit passed in
4511  // through register a0. Use unsigned comparison to get negative
4512  // check for free.
4513  __ Branch(&slow, hs, a1, Operand(a0));
4514 
4515  // Read the argument from the stack and return it.
4516  __ subu(a3, a0, a1);
4517  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4518  __ Addu(a3, fp, Operand(t3));
4519  __ lw(v0, MemOperand(a3, kDisplacement));
4520  __ Ret();
4521 
4522  // Arguments adaptor case: Check index (a1) against actual arguments
4523  // limit found in the arguments adaptor frame. Use unsigned
4524  // comparison to get negative check for free.
4525  __ bind(&adaptor);
4527  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4528 
4529  // Read the argument from the adaptor frame and return it.
4530  __ subu(a3, a0, a1);
4531  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4532  __ Addu(a3, a2, Operand(t3));
4533  __ lw(v0, MemOperand(a3, kDisplacement));
4534  __ Ret();
4535 
4536  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4537  // by calling the runtime system.
4538  __ bind(&slow);
4539  __ push(a1);
4540  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4541 }
4542 
4543 
4544 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4545  // sp[0] : number of parameters
4546  // sp[4] : receiver displacement
4547  // sp[8] : function
4548  // Check if the calling frame is an arguments adaptor frame.
4549  Label runtime;
4552  __ Branch(&runtime,
4553  ne,
4554  a2,
4556 
4557  // Patch the arguments.length and the parameters pointer in the current frame.
4559  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4560  __ sll(t3, a2, 1);
4561  __ Addu(a3, a3, Operand(t3));
4563  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4564 
4565  __ bind(&runtime);
4566  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4567 }
4568 
4569 
4570 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4571  // Stack layout:
4572  // sp[0] : number of parameters (tagged)
4573  // sp[4] : address of receiver argument
4574  // sp[8] : function
4575  // Registers used over whole function:
4576  // t2 : allocated object (tagged)
4577  // t5 : mapped parameter count (tagged)
4578 
4579  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4580  // a1 = parameter count (tagged)
4581 
4582  // Check if the calling frame is an arguments adaptor frame.
4583  Label runtime;
4584  Label adaptor_frame, try_allocate;
4587  __ Branch(&adaptor_frame,
4588  eq,
4589  a2,
4591 
4592  // No adaptor, parameter count = argument count.
4593  __ mov(a2, a1);
4594  __ b(&try_allocate);
4595  __ nop(); // Branch delay slot nop.
4596 
4597  // We have an adaptor frame. Patch the parameters pointer.
4598  __ bind(&adaptor_frame);
4600  __ sll(t6, a2, 1);
4601  __ Addu(a3, a3, Operand(t6));
4602  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4603  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4604 
4605  // a1 = parameter count (tagged)
4606  // a2 = argument count (tagged)
4607  // Compute the mapped parameter count = min(a1, a2) in a1.
4608  Label skip_min;
4609  __ Branch(&skip_min, lt, a1, Operand(a2));
4610  __ mov(a1, a2);
4611  __ bind(&skip_min);
4612 
4613  __ bind(&try_allocate);
4614 
4615  // Compute the sizes of backing store, parameter map, and arguments object.
4616  // 1. Parameter map, has 2 extra words containing context and backing store.
4617  const int kParameterMapHeaderSize =
4619  // If there are no mapped parameters, we do not need the parameter_map.
4620  Label param_map_size;
4621  ASSERT_EQ(0, Smi::FromInt(0));
4622  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4623  __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4624  __ sll(t5, a1, 1);
4625  __ addiu(t5, t5, kParameterMapHeaderSize);
4626  __ bind(&param_map_size);
4627 
4628  // 2. Backing store.
4629  __ sll(t6, a2, 1);
4630  __ Addu(t5, t5, Operand(t6));
4631  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4632 
4633  // 3. Arguments object.
4634  __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4635 
4636  // Do the allocation of all three objects in one go.
4637  __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4638 
4639  // v0 = address of new object(s) (tagged)
4640  // a2 = argument count (tagged)
4641  // Get the arguments boilerplate from the current native context into t0.
4642  const int kNormalOffset =
4644  const int kAliasedOffset =
4646 
4649  Label skip2_ne, skip2_eq;
4650  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4651  __ lw(t0, MemOperand(t0, kNormalOffset));
4652  __ bind(&skip2_ne);
4653 
4654  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4655  __ lw(t0, MemOperand(t0, kAliasedOffset));
4656  __ bind(&skip2_eq);
4657 
4658  // v0 = address of new object (tagged)
4659  // a1 = mapped parameter count (tagged)
4660  // a2 = argument count (tagged)
4661  // t0 = address of boilerplate object (tagged)
4662  // Copy the JS object part.
4663  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4664  __ lw(a3, FieldMemOperand(t0, i));
4665  __ sw(a3, FieldMemOperand(v0, i));
4666  }
4667 
4668  // Set up the callee in-object property.
4670  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4671  const int kCalleeOffset = JSObject::kHeaderSize +
4673  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4674 
4675  // Use the length (smi tagged) and set that as an in-object property too.
4677  const int kLengthOffset = JSObject::kHeaderSize +
4679  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4680 
4681  // Set up the elements pointer in the allocated arguments object.
4682  // If we allocated a parameter map, t0 will point there, otherwise
4683  // it will point to the backing store.
4684  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4686 
4687  // v0 = address of new object (tagged)
4688  // a1 = mapped parameter count (tagged)
4689  // a2 = argument count (tagged)
4690  // t0 = address of parameter map or backing store (tagged)
4691  // Initialize parameter map. If there are no mapped arguments, we're done.
4692  Label skip_parameter_map;
4693  Label skip3;
4694  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4695  // Move backing store address to a3, because it is
4696  // expected there when filling in the unmapped arguments.
4697  __ mov(a3, t0);
4698  __ bind(&skip3);
4699 
4700  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4701 
4702  __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4704  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4706  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4707  __ sll(t6, a1, 1);
4708  __ Addu(t2, t0, Operand(t6));
4709  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4710  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4711 
4712  // Copy the parameter slots and the holes in the arguments.
4713  // We need to fill in mapped_parameter_count slots. They index the context,
4714  // where parameters are stored in reverse order, at
4715  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4716  // The mapped parameter thus need to get indices
4717  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4718  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4719  // We loop from right to left.
4720  Label parameters_loop, parameters_test;
4721  __ mov(t2, a1);
4722  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4723  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4724  __ Subu(t5, t5, Operand(a1));
4725  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4726  __ sll(t6, t2, 1);
4727  __ Addu(a3, t0, Operand(t6));
4728  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4729 
4730  // t2 = loop variable (tagged)
4731  // a1 = mapping index (tagged)
4732  // a3 = address of backing store (tagged)
4733  // t0 = address of parameter map (tagged)
4734  // t1 = temporary scratch (a.o., for address calculation)
4735  // t3 = the hole value
4736  __ jmp(&parameters_test);
4737 
4738  __ bind(&parameters_loop);
4739  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4740  __ sll(t1, t2, 1);
4741  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4742  __ Addu(t6, t0, t1);
4743  __ sw(t5, MemOperand(t6));
4744  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4745  __ Addu(t6, a3, t1);
4746  __ sw(t3, MemOperand(t6));
4747  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4748  __ bind(&parameters_test);
4749  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4750 
4751  __ bind(&skip_parameter_map);
4752  // a2 = argument count (tagged)
4753  // a3 = address of backing store (tagged)
4754  // t1 = scratch
4755  // Copy arguments header and remaining slots (if there are any).
4756  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4759 
4760  Label arguments_loop, arguments_test;
4761  __ mov(t5, a1);
4762  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4763  __ sll(t6, t5, 1);
4764  __ Subu(t0, t0, Operand(t6));
4765  __ jmp(&arguments_test);
4766 
4767  __ bind(&arguments_loop);
4768  __ Subu(t0, t0, Operand(kPointerSize));
4769  __ lw(t2, MemOperand(t0, 0));
4770  __ sll(t6, t5, 1);
4771  __ Addu(t1, a3, Operand(t6));
4773  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4774 
4775  __ bind(&arguments_test);
4776  __ Branch(&arguments_loop, lt, t5, Operand(a2));
4777 
4778  // Return and remove the on-stack parameters.
4779  __ DropAndRet(3);
4780 
4781  // Do the runtime call to allocate the arguments object.
4782  // a2 = argument count (tagged)
4783  __ bind(&runtime);
4784  __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4785  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4786 }
4787 
4788 
4789 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4790  // sp[0] : number of parameters
4791  // sp[4] : receiver displacement
4792  // sp[8] : function
4793  // Check if the calling frame is an arguments adaptor frame.
4794  Label adaptor_frame, try_allocate, runtime;
4797  __ Branch(&adaptor_frame,
4798  eq,
4799  a3,
4801 
4802  // Get the length from the frame.
4803  __ lw(a1, MemOperand(sp, 0));
4804  __ Branch(&try_allocate);
4805 
4806  // Patch the arguments.length and the parameters pointer.
4807  __ bind(&adaptor_frame);
4809  __ sw(a1, MemOperand(sp, 0));
4810  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4811  __ Addu(a3, a2, Operand(at));
4812 
4813  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4814  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4815 
4816  // Try the new space allocation. Start out with computing the size
4817  // of the arguments object and the elements array in words.
4818  Label add_arguments_object;
4819  __ bind(&try_allocate);
4820  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4821  __ srl(a1, a1, kSmiTagSize);
4822 
4823  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4824  __ bind(&add_arguments_object);
4825  __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4826 
4827  // Do the allocation of both objects in one go.
4828  __ AllocateInNewSpace(a1,
4829  v0,
4830  a2,
4831  a3,
4832  &runtime,
4833  static_cast<AllocationFlags>(TAG_OBJECT |
4834  SIZE_IN_WORDS));
4835 
4836  // Get the arguments boilerplate from the current native context.
4839  __ lw(t0, MemOperand(t0, Context::SlotOffset(
4841 
4842  // Copy the JS object part.
4843  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4844 
4845  // Get the length (smi tagged) and set that as an in-object property too.
4847  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4848  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4849  Heap::kArgumentsLengthIndex * kPointerSize));
4850 
4851  Label done;
4852  __ Branch(&done, eq, a1, Operand(zero_reg));
4853 
4854  // Get the parameters pointer from the stack.
4855  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4856 
4857  // Set up the elements pointer in the allocated arguments object and
4858  // initialize the header in the elements fixed array.
4859  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4861  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4864  // Untag the length for the loop.
4865  __ srl(a1, a1, kSmiTagSize);
4866 
4867  // Copy the fixed array slots.
4868  Label loop;
4869  // Set up t0 to point to the first array slot.
4870  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4871  __ bind(&loop);
4872  // Pre-decrement a2 with kPointerSize on each iteration.
4873  // Pre-decrement in order to skip receiver.
4874  __ Addu(a2, a2, Operand(-kPointerSize));
4875  __ lw(a3, MemOperand(a2));
4876  // Post-increment t0 with kPointerSize on each iteration.
4877  __ sw(a3, MemOperand(t0));
4878  __ Addu(t0, t0, Operand(kPointerSize));
4879  __ Subu(a1, a1, Operand(1));
4880  __ Branch(&loop, ne, a1, Operand(zero_reg));
4881 
4882  // Return and remove the on-stack parameters.
4883  __ bind(&done);
4884  __ DropAndRet(3);
4885 
4886  // Do the runtime call to allocate the arguments object.
4887  __ bind(&runtime);
4888  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4889 }
4890 
4891 
4892 void RegExpExecStub::Generate(MacroAssembler* masm) {
4893  // Just jump directly to runtime if native RegExp is not selected at compile
4894  // time or if regexp entry in generated code is turned off runtime switch or
4895  // at compilation.
4896 #ifdef V8_INTERPRETED_REGEXP
4897  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4898 #else // V8_INTERPRETED_REGEXP
4899 
4900  // Stack frame on entry.
4901  // sp[0]: last_match_info (expected JSArray)
4902  // sp[4]: previous index
4903  // sp[8]: subject string
4904  // sp[12]: JSRegExp object
4905 
4906  const int kLastMatchInfoOffset = 0 * kPointerSize;
4907  const int kPreviousIndexOffset = 1 * kPointerSize;
4908  const int kSubjectOffset = 2 * kPointerSize;
4909  const int kJSRegExpOffset = 3 * kPointerSize;
4910 
4911  Isolate* isolate = masm->isolate();
4912 
4913  Label runtime, invoke_regexp;
4914 
4915  // Allocation of registers for this function. These are in callee save
4916  // registers and will be preserved by the call to the native RegExp code, as
4917  // this code is called using the normal C calling convention. When calling
4918  // directly from generated code the native RegExp code will not do a GC and
4919  // therefore the content of these registers are safe to use after the call.
4920  // MIPS - using s0..s2, since we are not using CEntry Stub.
4921  Register subject = s0;
4922  Register regexp_data = s1;
4923  Register last_match_info_elements = s2;
4924 
4925  // Ensure that a RegExp stack is allocated.
4926  ExternalReference address_of_regexp_stack_memory_address =
4927  ExternalReference::address_of_regexp_stack_memory_address(
4928  isolate);
4929  ExternalReference address_of_regexp_stack_memory_size =
4930  ExternalReference::address_of_regexp_stack_memory_size(isolate);
4931  __ li(a0, Operand(address_of_regexp_stack_memory_size));
4932  __ lw(a0, MemOperand(a0, 0));
4933  __ Branch(&runtime, eq, a0, Operand(zero_reg));
4934 
4935  // Check that the first argument is a JSRegExp object.
4936  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4937  STATIC_ASSERT(kSmiTag == 0);
4938  __ JumpIfSmi(a0, &runtime);
4939  __ GetObjectType(a0, a1, a1);
4940  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4941 
4942  // Check that the RegExp has been compiled (data contains a fixed array).
4943  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4944  if (FLAG_debug_code) {
4945  __ And(t0, regexp_data, Operand(kSmiTagMask));
4946  __ Check(nz,
4947  "Unexpected type for RegExp data, FixedArray expected",
4948  t0,
4949  Operand(zero_reg));
4950  __ GetObjectType(regexp_data, a0, a0);
4951  __ Check(eq,
4952  "Unexpected type for RegExp data, FixedArray expected",
4953  a0,
4954  Operand(FIXED_ARRAY_TYPE));
4955  }
4956 
4957  // regexp_data: RegExp data (FixedArray)
4958  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4959  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4960  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4961 
4962  // regexp_data: RegExp data (FixedArray)
4963  // Check that the number of captures fit in the static offsets vector buffer.
4964  __ lw(a2,
4966  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4967  // uses the asumption that smis are 2 * their untagged value.
4968  STATIC_ASSERT(kSmiTag == 0);
4970  __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4971  // Check that the static offsets vector buffer is large enough.
4972  __ Branch(
4973  &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
4974 
4975  // a2: Number of capture registers
4976  // regexp_data: RegExp data (FixedArray)
4977  // Check that the second argument is a string.
4978  __ lw(subject, MemOperand(sp, kSubjectOffset));
4979  __ JumpIfSmi(subject, &runtime);
4980  __ GetObjectType(subject, a0, a0);
4981  __ And(a0, a0, Operand(kIsNotStringMask));
4982  STATIC_ASSERT(kStringTag == 0);
4983  __ Branch(&runtime, ne, a0, Operand(zero_reg));
4984 
4985  // Get the length of the string to r3.
4986  __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4987 
4988  // a2: Number of capture registers
4989  // a3: Length of subject string as a smi
4990  // subject: Subject string
4991  // regexp_data: RegExp data (FixedArray)
4992  // Check that the third argument is a positive smi less than the subject
4993  // string length. A negative value will be greater (unsigned comparison).
4994  __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4995  __ JumpIfNotSmi(a0, &runtime);
4996  __ Branch(&runtime, ls, a3, Operand(a0));
4997 
4998  // a2: Number of capture registers
4999  // subject: Subject string
5000  // regexp_data: RegExp data (FixedArray)
5001  // Check that the fourth object is a JSArray object.
5002  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
5003  __ JumpIfSmi(a0, &runtime);
5004  __ GetObjectType(a0, a1, a1);
5005  __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
5006  // Check that the JSArray is in fast case.
5007  __ lw(last_match_info_elements,
5009  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
5010  __ Branch(&runtime, ne, a0, Operand(
5011  isolate->factory()->fixed_array_map()));
5012  // Check that the last match info has space for the capture registers and the
5013  // additional information.
5014  __ lw(a0,
5015  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
5016  __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
5017  __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
5018  __ Branch(&runtime, gt, a2, Operand(at));
5019 
5020  // Reset offset for possibly sliced string.
5021  __ mov(t0, zero_reg);
5022  // subject: Subject string
5023  // regexp_data: RegExp data (FixedArray)
5024  // Check the representation and encoding of the subject string.
5025  Label seq_string;
5026  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5028  // First check for flat string. None of the following string type tests will
5029  // succeed if subject is not a string or a short external string.
5030  __ And(a1,
5031  a0,
5032  Operand(kIsNotStringMask |
5036  __ Branch(&seq_string, eq, a1, Operand(zero_reg));
5037 
5038  // subject: Subject string
5039  // a0: instance type if Subject string
5040  // regexp_data: RegExp data (FixedArray)
5041  // a1: whether subject is a string and if yes, its string representation
5042  // Check for flat cons string or sliced string.
5043  // A flat cons string is a cons string where the second part is the empty
5044  // string. In that case the subject string is just the first part of the cons
5045  // string. Also in this case the first part of the cons string is known to be
5046  // a sequential string or an external string.
5047  // In the case of a sliced string its offset has to be taken into account.
5048  Label cons_string, external_string, check_encoding;
5053  __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
5054  __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
5055 
5056  // Catch non-string subject or short external string.
5058  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
5059  __ Branch(&runtime, ne, at, Operand(zero_reg));
5060 
5061  // String is sliced.
5063  __ sra(t0, t0, kSmiTagSize);
5064  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
5065  // t5: offset of sliced string, smi-tagged.
5066  __ jmp(&check_encoding);
5067  // String is a cons string, check whether it is flat.
5068  __ bind(&cons_string);
5069  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
5070  __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
5071  __ Branch(&runtime, ne, a0, Operand(a1));
5072  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
5073  // Is first part of cons or parent of slice a flat string?
5074  __ bind(&check_encoding);
5075  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5078  __ And(at, a0, Operand(kStringRepresentationMask));
5079  __ Branch(&external_string, ne, at, Operand(zero_reg));
5080 
5081  __ bind(&seq_string);
5082  // subject: Subject string
5083  // regexp_data: RegExp data (FixedArray)
5084  // a0: Instance type of subject string
5088  // Find the code object based on the assumptions above.
5089  __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
5090  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
5091  __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
5092  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
5093  __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
5094 
5095  // Check that the irregexp code has been generated for the actual string
5096  // encoding. If it has, the field contains a code object otherwise it contains
5097  // a smi (code flushing support).
5098  __ JumpIfSmi(t9, &runtime);
5099 
5100  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5101  // t9: code
5102  // subject: Subject string
5103  // regexp_data: RegExp data (FixedArray)
5104  // Load used arguments before starting to push arguments for call to native
5105  // RegExp code to avoid handling changing stack height.
5106  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
5107  __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
5108 
5109  // a1: previous index
5110  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5111  // t9: code
5112  // subject: Subject string
5113  // regexp_data: RegExp data (FixedArray)
5114  // All checks done. Now push arguments for native regexp code.
5115  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
5116  1, a0, a2);
5117 
5118  // Isolates: note we add an additional parameter here (isolate pointer).
5119  const int kRegExpExecuteArguments = 9;
5120  const int kParameterRegisters = 4;
5121  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5122 
5123  // Stack pointer now points to cell where return address is to be written.
5124  // Arguments are before that on the stack or in registers, meaning we
5125  // treat the return address as argument 5. Thus every argument after that
5126  // needs to be shifted back by 1. Since DirectCEntryStub will handle
5127  // allocating space for the c argument slots, we don't need to calculate
5128  // that into the argument positions on the stack. This is how the stack will
5129  // look (sp meaning the value of sp at this moment):
5130  // [sp + 5] - Argument 9
5131  // [sp + 4] - Argument 8
5132  // [sp + 3] - Argument 7
5133  // [sp + 2] - Argument 6
5134  // [sp + 1] - Argument 5
5135  // [sp + 0] - saved ra
5136 
5137  // Argument 9: Pass current isolate address.
5138  // CFunctionArgumentOperand handles MIPS stack argument slots.
5139  __ li(a0, Operand(ExternalReference::isolate_address()));
5140  __ sw(a0, MemOperand(sp, 5 * kPointerSize));
5141 
5142  // Argument 8: Indicate that this is a direct call from JavaScript.
5143  __ li(a0, Operand(1));
5144  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5145 
5146  // Argument 7: Start (high end) of backtracking stack memory area.
5147  __ li(a0, Operand(address_of_regexp_stack_memory_address));
5148  __ lw(a0, MemOperand(a0, 0));
5149  __ li(a2, Operand(address_of_regexp_stack_memory_size));
5150  __ lw(a2, MemOperand(a2, 0));
5151  __ addu(a0, a0, a2);
5152  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5153 
5154  // Argument 6: Set the number of capture registers to zero to force global
5155  // regexps to behave as non-global. This does not affect non-global regexps.
5156  __ mov(a0, zero_reg);
5157  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5158 
5159  // Argument 5: static offsets vector buffer.
5160  __ li(a0, Operand(
5161  ExternalReference::address_of_static_offsets_vector(isolate)));
5162  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
5163 
5164  // For arguments 4 and 3 get string length, calculate start of string data
5165  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
5166  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
5167  __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
5168  // Load the length from the original subject string from the previous stack
5169  // frame. Therefore we have to use fp, which points exactly to two pointer
5170  // sizes below the previous sp. (Because creating a new stack frame pushes
5171  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
5172  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
5173  // If slice offset is not 0, load the length from the original sliced string.
5174  // Argument 4, a3: End of string data
5175  // Argument 3, a2: Start of string data
5176  // Prepare start and end index of the input.
5177  __ sllv(t1, t0, a3);
5178  __ addu(t0, t2, t1);
5179  __ sllv(t1, a1, a3);
5180  __ addu(a2, t0, t1);
5181 
5182  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
5183  __ sra(t2, t2, kSmiTagSize);
5184  __ sllv(t1, t2, a3);
5185  __ addu(a3, t0, t1);
5186  // Argument 2 (a1): Previous index.
5187  // Already there
5188 
5189  // Argument 1 (a0): Subject string.
5190  __ mov(a0, subject);
5191 
5192  // Locate the code entry and call it.
5193  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5194  DirectCEntryStub stub;
5195  stub.GenerateCall(masm, t9);
5196 
5197  __ LeaveExitFrame(false, no_reg);
5198 
5199  // v0: result
5200  // subject: subject string (callee saved)
5201  // regexp_data: RegExp data (callee saved)
5202  // last_match_info_elements: Last match info elements (callee saved)
5203 
5204  // Check the result.
5205 
5206  Label success;
5207  __ Branch(&success, eq, v0, Operand(1));
5208  // We expect exactly one result since we force the called regexp to behave
5209  // as non-global.
5210  Label failure;
5211  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
5212  // If not exception it can only be retry. Handle that in the runtime system.
5213  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
5214  // Result must now be exception. If there is no pending exception already a
5215  // stack overflow (on the backtrack stack) was detected in RegExp code but
5216  // haven't created the exception yet. Handle that in the runtime system.
5217  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5218  __ li(a1, Operand(isolate->factory()->the_hole_value()));
5219  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5220  isolate)));
5221  __ lw(v0, MemOperand(a2, 0));
5222  __ Branch(&runtime, eq, v0, Operand(a1));
5223 
5224  __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
5225 
5226  // Check if the exception is a termination. If so, throw as uncatchable.
5227  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5228  Label termination_exception;
5229  __ Branch(&termination_exception, eq, v0, Operand(a0));
5230 
5231  __ Throw(v0);
5232 
5233  __ bind(&termination_exception);
5234  __ ThrowUncatchable(v0);
5235 
5236  __ bind(&failure);
5237  // For failure and exception return null.
5238  __ li(v0, Operand(isolate->factory()->null_value()));
5239  __ DropAndRet(4);
5240 
5241  // Process the result from the native regexp code.
5242  __ bind(&success);
5243  __ lw(a1,
5245  // Calculate number of capture registers (number_of_captures + 1) * 2.
5246  STATIC_ASSERT(kSmiTag == 0);
5248  __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5249 
5250  // a1: number of capture registers
5251  // subject: subject string
5252  // Store the capture count.
5253  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5254  __ sw(a2, FieldMemOperand(last_match_info_elements,
5256  // Store last subject and last input.
5257  __ sw(subject,
5258  FieldMemOperand(last_match_info_elements,
5260  __ mov(a2, subject);
5261  __ RecordWriteField(last_match_info_elements,
5263  a2,
5264  t3,
5266  kDontSaveFPRegs);
5267  __ sw(subject,
5268  FieldMemOperand(last_match_info_elements,
5270  __ RecordWriteField(last_match_info_elements,
5272  subject,
5273  t3,
5275  kDontSaveFPRegs);
5276 
5277  // Get the static offsets vector filled by the native regexp code.
5278  ExternalReference address_of_static_offsets_vector =
5279  ExternalReference::address_of_static_offsets_vector(isolate);
5280  __ li(a2, Operand(address_of_static_offsets_vector));
5281 
5282  // a1: number of capture registers
5283  // a2: offsets vector
5284  Label next_capture, done;
5285  // Capture register counter starts from number of capture registers and
5286  // counts down until wrapping after zero.
5287  __ Addu(a0,
5288  last_match_info_elements,
5290  __ bind(&next_capture);
5291  __ Subu(a1, a1, Operand(1));
5292  __ Branch(&done, lt, a1, Operand(zero_reg));
5293  // Read the value from the static offsets vector buffer.
5294  __ lw(a3, MemOperand(a2, 0));
5295  __ addiu(a2, a2, kPointerSize);
5296  // Store the smi value in the last match info.
5297  __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5298  __ sw(a3, MemOperand(a0, 0));
5299  __ Branch(&next_capture, USE_DELAY_SLOT);
5300  __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5301 
5302  __ bind(&done);
5303 
5304  // Return last match info.
5305  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5306  __ DropAndRet(4);
5307 
5308  // External string. Short external strings have already been ruled out.
5309  // a0: scratch
5310  __ bind(&external_string);
5311  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5313  if (FLAG_debug_code) {
5314  // Assert that we do not have a cons or slice (indirect strings) here.
5315  // Sequential strings have already been ruled out.
5316  __ And(at, a0, Operand(kIsIndirectStringMask));
5317  __ Assert(eq,
5318  "external string expected, but not found",
5319  at,
5320  Operand(zero_reg));
5321  }
5322  __ lw(subject,
5324  // Move the pointer so that offset-wise, it looks like a sequential string.
5326  __ Subu(subject,
5327  subject,
5329  __ jmp(&seq_string);
5330 
5331  // Do the runtime call to execute the regexp.
5332  __ bind(&runtime);
5333  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5334 #endif // V8_INTERPRETED_REGEXP
5335 }
5336 
5337 
5338 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5339  const int kMaxInlineLength = 100;
5340  Label slowcase;
5341  Label done;
5342  __ lw(a1, MemOperand(sp, kPointerSize * 2));
5343  STATIC_ASSERT(kSmiTag == 0);
5344  STATIC_ASSERT(kSmiTagSize == 1);
5345  __ JumpIfNotSmi(a1, &slowcase);
5346  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5347  // Smi-tagging is equivalent to multiplying by 2.
5348  // Allocate RegExpResult followed by FixedArray with size in ebx.
5349  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5350  // Elements: [Map][Length][..elements..]
5351  // Size of JSArray with two in-object properties and the header of a
5352  // FixedArray.
5353  int objects_size =
5355  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5356  __ Addu(a2, t1, Operand(objects_size));
5357  __ AllocateInNewSpace(
5358  a2, // In: Size, in words.
5359  v0, // Out: Start of allocation (tagged).
5360  a3, // Scratch register.
5361  t0, // Scratch register.
5362  &slowcase,
5363  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5364  // v0: Start of allocated area, object-tagged.
5365  // a1: Number of elements in array, as smi.
5366  // t1: Number of elements, untagged.
5367 
5368  // Set JSArray map to global.regexp_result_map().
5369  // Set empty properties FixedArray.
5370  // Set elements to point to FixedArray allocated right after the JSArray.
5371  // Interleave operations for better latency.
5373  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5374  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5380 
5381  // Set input, index and length fields from arguments.
5382  __ lw(a1, MemOperand(sp, kPointerSize * 0));
5383  __ lw(a2, MemOperand(sp, kPointerSize * 1));
5384  __ lw(t2, MemOperand(sp, kPointerSize * 2));
5388 
5389  // Fill out the elements FixedArray.
5390  // v0: JSArray, tagged.
5391  // a3: FixedArray, tagged.
5392  // t1: Number of elements in array, untagged.
5393 
5394  // Set map.
5395  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5397  // Set FixedArray length.
5398  __ sll(t2, t1, kSmiTagSize);
5400  // Fill contents of fixed-array with undefined.
5401  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
5402  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5403  // Fill fixed array elements with undefined.
5404  // v0: JSArray, tagged.
5405  // a2: undefined.
5406  // a3: Start of elements in FixedArray.
5407  // t1: Number of elements to fill.
5408  Label loop;
5409  __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5410  __ addu(t1, t1, a3); // Point past last element to store.
5411  __ bind(&loop);
5412  __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5413  __ sw(a2, MemOperand(a3));
5414  __ Branch(&loop, USE_DELAY_SLOT);
5415  __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5416 
5417  __ bind(&done);
5418  __ DropAndRet(3);
5419 
5420  __ bind(&slowcase);
5421  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5422 }
5423 
5424 
5425 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5426  // Cache the called function in a global property cell. Cache states
5427  // are uninitialized, monomorphic (indicated by a JSFunction), and
5428  // megamorphic.
5429  // a1 : the function to call
5430  // a2 : cache cell for call target
5431  Label done;
5432 
5434  masm->isolate()->heap()->undefined_value());
5436  masm->isolate()->heap()->the_hole_value());
5437 
5438  // Load the cache state into a3.
5440 
5441  // A monomorphic cache hit or an already megamorphic state: invoke the
5442  // function without changing the state.
5443  __ Branch(&done, eq, a3, Operand(a1));
5444  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5445  __ Branch(&done, eq, a3, Operand(at));
5446 
5447  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5448  // megamorphic.
5449  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5450 
5451  __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
5452  // An uninitialized cache is patched with the function.
5453  // Store a1 in the delay slot. This may or may not get overwritten depending
5454  // on the result of the comparison.
5456  // No need for a write barrier here - cells are rescanned.
5457 
5458  // MegamorphicSentinel is an immortal immovable object (undefined) so no
5459  // write-barrier is needed.
5460  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5462 
5463  __ bind(&done);
5464 }
5465 
5466 
5467 void CallFunctionStub::Generate(MacroAssembler* masm) {
5468  // a1 : the function to call
5469  // a2 : cache cell for call target
5470  Label slow, non_function;
5471 
5472  // The receiver might implicitly be the global object. This is
5473  // indicated by passing the hole as the receiver to the call
5474  // function stub.
5475  if (ReceiverMightBeImplicit()) {
5476  Label call;
5477  // Get the receiver from the stack.
5478  // function, receiver [, arguments]
5479  __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5480  // Call as function is indicated with the hole.
5481  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5482  __ Branch(&call, ne, t0, Operand(at));
5483  // Patch the receiver on the stack with the global receiver object.
5484  __ lw(a3,
5487  __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
5488  __ bind(&call);
5489  }
5490 
5491  // Check that the function is really a JavaScript function.
5492  // a1: pushed function (to be verified)
5493  __ JumpIfSmi(a1, &non_function);
5494  // Get the map of the function object.
5495  __ GetObjectType(a1, a3, a3);
5496  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5497 
5498  if (RecordCallTarget()) {
5499  GenerateRecordCallTarget(masm);
5500  }
5501 
5502  // Fast-case: Invoke the function now.
5503  // a1: pushed function
5504  ParameterCount actual(argc_);
5505 
5506  if (ReceiverMightBeImplicit()) {
5507  Label call_as_function;
5508  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5509  __ Branch(&call_as_function, eq, t0, Operand(at));
5510  __ InvokeFunction(a1,
5511  actual,
5512  JUMP_FUNCTION,
5513  NullCallWrapper(),
5514  CALL_AS_METHOD);
5515  __ bind(&call_as_function);
5516  }
5517  __ InvokeFunction(a1,
5518  actual,
5519  JUMP_FUNCTION,
5520  NullCallWrapper(),
5522 
5523  // Slow-case: Non-function called.
5524  __ bind(&slow);
5525  if (RecordCallTarget()) {
5526  // If there is a call target cache, mark it megamorphic in the
5527  // non-function case. MegamorphicSentinel is an immortal immovable
5528  // object (undefined) so no write barrier is needed.
5530  masm->isolate()->heap()->undefined_value());
5531  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5533  }
5534  // Check for function proxy.
5535  __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5536  __ push(a1); // Put proxy as additional argument.
5537  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5538  __ li(a2, Operand(0, RelocInfo::NONE));
5539  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5540  __ SetCallKind(t1, CALL_AS_METHOD);
5541  {
5542  Handle<Code> adaptor =
5543  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5544  __ Jump(adaptor, RelocInfo::CODE_TARGET);
5545  }
5546 
5547  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5548  // of the original receiver from the call site).
5549  __ bind(&non_function);
5550  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5551  __ li(a0, Operand(argc_)); // Set up the number of arguments.
5552  __ mov(a2, zero_reg);
5553  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5554  __ SetCallKind(t1, CALL_AS_METHOD);
5555  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5556  RelocInfo::CODE_TARGET);
5557 }
5558 
5559 
5560 void CallConstructStub::Generate(MacroAssembler* masm) {
5561  // a0 : number of arguments
5562  // a1 : the function to call
5563  // a2 : cache cell for call target
5564  Label slow, non_function_call;
5565 
5566  // Check that the function is not a smi.
5567  __ JumpIfSmi(a1, &non_function_call);
5568  // Check that the function is a JSFunction.
5569  __ GetObjectType(a1, a3, a3);
5570  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5571 
5572  if (RecordCallTarget()) {
5573  GenerateRecordCallTarget(masm);
5574  }
5575 
5576  // Jump to the function-specific construct stub.
5579  __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
5580  __ Jump(at);
5581 
5582  // a0: number of arguments
5583  // a1: called object
5584  // a3: object type
5585  Label do_call;
5586  __ bind(&slow);
5587  __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5588  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5589  __ jmp(&do_call);
5590 
5591  __ bind(&non_function_call);
5592  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5593  __ bind(&do_call);
5594  // Set expected number of arguments to zero (not changing r0).
5595  __ li(a2, Operand(0, RelocInfo::NONE));
5596  __ SetCallKind(t1, CALL_AS_METHOD);
5597  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5598  RelocInfo::CODE_TARGET);
5599 }
5600 
5601 
5602 // Unfortunately you have to run without snapshots to see most of these
5603 // names in the profile since most compare stubs end up in the snapshot.
5604 void CompareStub::PrintName(StringStream* stream) {
5605  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5606  (lhs_.is(a1) && rhs_.is(a0)));
5607  const char* cc_name;
5608  switch (cc_) {
5609  case lt: cc_name = "LT"; break;
5610  case gt: cc_name = "GT"; break;
5611  case le: cc_name = "LE"; break;
5612  case ge: cc_name = "GE"; break;
5613  case eq: cc_name = "EQ"; break;
5614  case ne: cc_name = "NE"; break;
5615  default: cc_name = "UnknownCondition"; break;
5616  }
5617  bool is_equality = cc_ == eq || cc_ == ne;
5618  stream->Add("CompareStub_%s", cc_name);
5619  stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5620  stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5621  if (strict_ && is_equality) stream->Add("_STRICT");
5622  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5623  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5624  if (!include_smi_compare_) stream->Add("_NO_SMI");
5625 }
5626 
5627 
5628 int CompareStub::MinorKey() {
5629  // Encode the two parameters in a unique 16 bit value.
5630  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5631  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5632  (lhs_.is(a1) && rhs_.is(a0)));
5633  return ConditionField::encode(static_cast<unsigned>(cc_))
5634  | RegisterField::encode(lhs_.is(a0))
5635  | StrictField::encode(strict_)
5636  | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5637  | IncludeSmiCompareField::encode(include_smi_compare_);
5638 }
5639 
5640 
5641 // StringCharCodeAtGenerator.
5642 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5643  Label flat_string;
5644  Label ascii_string;
5645  Label got_char_code;
5646  Label sliced_string;
5647 
5648  ASSERT(!t0.is(index_));
5649  ASSERT(!t0.is(result_));
5650  ASSERT(!t0.is(object_));
5651 
5652  // If the receiver is a smi trigger the non-string case.
5653  __ JumpIfSmi(object_, receiver_not_string_);
5654 
5655  // Fetch the instance type of the receiver into result register.
5656  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5657  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5658  // If the receiver is not a string trigger the non-string case.
5659  __ And(t0, result_, Operand(kIsNotStringMask));
5660  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5661 
5662  // If the index is non-smi trigger the non-smi case.
5663  __ JumpIfNotSmi(index_, &index_not_smi_);
5664 
5665  __ bind(&got_smi_index_);
5666 
5667  // Check for index out of range.
5668  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5669  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
5670 
5671  __ sra(index_, index_, kSmiTagSize);
5672 
5674  object_,
5675  index_,
5676  result_,
5677  &call_runtime_);
5678 
5679  __ sll(result_, result_, kSmiTagSize);
5680  __ bind(&exit_);
5681 }
5682 
5683 
5685  MacroAssembler* masm,
5686  const RuntimeCallHelper& call_helper) {
5687  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5688 
5689  // Index is not a smi.
5690  __ bind(&index_not_smi_);
5691  // If index is a heap number, try converting it to an integer.
5692  __ CheckMap(index_,
5693  result_,
5694  Heap::kHeapNumberMapRootIndex,
5695  index_not_number_,
5697  call_helper.BeforeCall(masm);
5698  // Consumed by runtime conversion function:
5699  __ Push(object_, index_);
5700  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5701  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5702  } else {
5703  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5704  // NumberToSmi discards numbers that are not exact integers.
5705  __ CallRuntime(Runtime::kNumberToSmi, 1);
5706  }
5707 
5708  // Save the conversion result before the pop instructions below
5709  // have a chance to overwrite it.
5710 
5711  __ Move(index_, v0);
5712  __ pop(object_);
5713  // Reload the instance type.
5714  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5715  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5716  call_helper.AfterCall(masm);
5717  // If index is still not a smi, it must be out of range.
5718  __ JumpIfNotSmi(index_, index_out_of_range_);
5719  // Otherwise, return to the fast path.
5720  __ Branch(&got_smi_index_);
5721 
5722  // Call runtime. We get here when the receiver is a string and the
5723  // index is a number, but the code of getting the actual character
5724  // is too complex (e.g., when the string needs to be flattened).
5725  __ bind(&call_runtime_);
5726  call_helper.BeforeCall(masm);
5727  __ sll(index_, index_, kSmiTagSize);
5728  __ Push(object_, index_);
5729  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5730 
5731  __ Move(result_, v0);
5732 
5733  call_helper.AfterCall(masm);
5734  __ jmp(&exit_);
5735 
5736  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5737 }
5738 
5739 
5740 // -------------------------------------------------------------------------
5741 // StringCharFromCodeGenerator
5742 
5743 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5744  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5745 
5746  ASSERT(!t0.is(result_));
5747  ASSERT(!t0.is(code_));
5748 
5749  STATIC_ASSERT(kSmiTag == 0);
5752  __ And(t0,
5753  code_,
5754  Operand(kSmiTagMask |
5756  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5757 
5758  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5759  // At this point code register contains smi tagged ASCII char code.
5760  STATIC_ASSERT(kSmiTag == 0);
5761  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5762  __ Addu(result_, result_, t0);
5763  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5764  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5765  __ Branch(&slow_case_, eq, result_, Operand(t0));
5766  __ bind(&exit_);
5767 }
5768 
5769 
5771  MacroAssembler* masm,
5772  const RuntimeCallHelper& call_helper) {
5773  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5774 
5775  __ bind(&slow_case_);
5776  call_helper.BeforeCall(masm);
5777  __ push(code_);
5778  __ CallRuntime(Runtime::kCharFromCode, 1);
5779  __ Move(result_, v0);
5780 
5781  call_helper.AfterCall(masm);
5782  __ Branch(&exit_);
5783 
5784  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5785 }
5786 
5787 
5788 // -------------------------------------------------------------------------
5789 // StringCharAtGenerator
5790 
5791 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5792  char_code_at_generator_.GenerateFast(masm);
5793  char_from_code_generator_.GenerateFast(masm);
5794 }
5795 
5796 
5798  MacroAssembler* masm,
5799  const RuntimeCallHelper& call_helper) {
5800  char_code_at_generator_.GenerateSlow(masm, call_helper);
5801  char_from_code_generator_.GenerateSlow(masm, call_helper);
5802 }
5803 
5804 
5805 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5806  Register dest,
5807  Register src,
5808  Register count,
5809  Register scratch,
5810  bool ascii) {
5811  Label loop;
5812  Label done;
5813  // This loop just copies one character at a time, as it is only used for
5814  // very short strings.
5815  if (!ascii) {
5816  __ addu(count, count, count);
5817  }
5818  __ Branch(&done, eq, count, Operand(zero_reg));
5819  __ addu(count, dest, count); // Count now points to the last dest byte.
5820 
5821  __ bind(&loop);
5822  __ lbu(scratch, MemOperand(src));
5823  __ addiu(src, src, 1);
5824  __ sb(scratch, MemOperand(dest));
5825  __ addiu(dest, dest, 1);
5826  __ Branch(&loop, lt, dest, Operand(count));
5827 
5828  __ bind(&done);
5829 }
5830 
5831 
5832 enum CopyCharactersFlags {
5833  COPY_ASCII = 1,
5834  DEST_ALWAYS_ALIGNED = 2
5835 };
5836 
5837 
5838 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5839  Register dest,
5840  Register src,
5841  Register count,
5842  Register scratch1,
5843  Register scratch2,
5844  Register scratch3,
5845  Register scratch4,
5846  Register scratch5,
5847  int flags) {
5848  bool ascii = (flags & COPY_ASCII) != 0;
5849  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5850 
5851  if (dest_always_aligned && FLAG_debug_code) {
5852  // Check that destination is actually word aligned if the flag says
5853  // that it is.
5854  __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5855  __ Check(eq,
5856  "Destination of copy not aligned.",
5857  scratch4,
5858  Operand(zero_reg));
5859  }
5860 
5861  const int kReadAlignment = 4;
5862  const int kReadAlignmentMask = kReadAlignment - 1;
5863  // Ensure that reading an entire aligned word containing the last character
5864  // of a string will not read outside the allocated area (because we pad up
5865  // to kObjectAlignment).
5866  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5867  // Assumes word reads and writes are little endian.
5868  // Nothing to do for zero characters.
5869  Label done;
5870 
5871  if (!ascii) {
5872  __ addu(count, count, count);
5873  }
5874  __ Branch(&done, eq, count, Operand(zero_reg));
5875 
5876  Label byte_loop;
5877  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5878  __ Subu(scratch1, count, Operand(8));
5879  __ Addu(count, dest, Operand(count));
5880  Register limit = count; // Read until src equals this.
5881  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5882 
5883  if (!dest_always_aligned) {
5884  // Align dest by byte copying. Copies between zero and three bytes.
5885  __ And(scratch4, dest, Operand(kReadAlignmentMask));
5886  Label dest_aligned;
5887  __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5888  Label aligned_loop;
5889  __ bind(&aligned_loop);
5890  __ lbu(scratch1, MemOperand(src));
5891  __ addiu(src, src, 1);
5892  __ sb(scratch1, MemOperand(dest));
5893  __ addiu(dest, dest, 1);
5894  __ addiu(scratch4, scratch4, 1);
5895  __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5896  __ bind(&dest_aligned);
5897  }
5898 
5899  Label simple_loop;
5900 
5901  __ And(scratch4, src, Operand(kReadAlignmentMask));
5902  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5903 
5904  // Loop for src/dst that are not aligned the same way.
5905  // This loop uses lwl and lwr instructions. These instructions
5906  // depend on the endianness, and the implementation assumes little-endian.
5907  {
5908  Label loop;
5909  __ bind(&loop);
5910  __ lwr(scratch1, MemOperand(src));
5911  __ Addu(src, src, Operand(kReadAlignment));
5912  __ lwl(scratch1, MemOperand(src, -1));
5913  __ sw(scratch1, MemOperand(dest));
5914  __ Addu(dest, dest, Operand(kReadAlignment));
5915  __ Subu(scratch2, limit, dest);
5916  __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5917  }
5918 
5919  __ Branch(&byte_loop);
5920 
5921  // Simple loop.
5922  // Copy words from src to dest, until less than four bytes left.
5923  // Both src and dest are word aligned.
5924  __ bind(&simple_loop);
5925  {
5926  Label loop;
5927  __ bind(&loop);
5928  __ lw(scratch1, MemOperand(src));
5929  __ Addu(src, src, Operand(kReadAlignment));
5930  __ sw(scratch1, MemOperand(dest));
5931  __ Addu(dest, dest, Operand(kReadAlignment));
5932  __ Subu(scratch2, limit, dest);
5933  __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5934  }
5935 
5936  // Copy bytes from src to dest until dest hits limit.
5937  __ bind(&byte_loop);
5938  // Test if dest has already reached the limit.
5939  __ Branch(&done, ge, dest, Operand(limit));
5940  __ lbu(scratch1, MemOperand(src));
5941  __ addiu(src, src, 1);
5942  __ sb(scratch1, MemOperand(dest));
5943  __ addiu(dest, dest, 1);
5944  __ Branch(&byte_loop);
5945 
5946  __ bind(&done);
5947 }
5948 
5949 
5950 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5951  Register c1,
5952  Register c2,
5953  Register scratch1,
5954  Register scratch2,
5955  Register scratch3,
5956  Register scratch4,
5957  Register scratch5,
5958  Label* not_found) {
5959  // Register scratch3 is the general scratch register in this function.
5960  Register scratch = scratch3;
5961 
5962  // Make sure that both characters are not digits as such strings has a
5963  // different hash algorithm. Don't try to look for these in the symbol table.
5964  Label not_array_index;
5965  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5966  __ Branch(&not_array_index,
5967  Ugreater,
5968  scratch,
5969  Operand(static_cast<int>('9' - '0')));
5970  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5971 
5972  // If check failed combine both characters into single halfword.
5973  // This is required by the contract of the method: code at the
5974  // not_found branch expects this combination in c1 register.
5975  Label tmp;
5976  __ sll(scratch1, c2, kBitsPerByte);
5977  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5978  __ Or(c1, c1, scratch1);
5979  __ bind(&tmp);
5980  __ Branch(
5981  not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
5982 
5983  __ bind(&not_array_index);
5984  // Calculate the two character string hash.
5985  Register hash = scratch1;
5986  StringHelper::GenerateHashInit(masm, hash, c1);
5989 
5990  // Collect the two characters in a register.
5991  Register chars = c1;
5992  __ sll(scratch, c2, kBitsPerByte);
5993  __ Or(chars, chars, scratch);
5994 
5995  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5996  // hash: hash of two character string.
5997 
5998  // Load symbol table.
5999  // Load address of first element of the symbol table.
6000  Register symbol_table = c2;
6001  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
6002 
6003  Register undefined = scratch4;
6004  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6005 
6006  // Calculate capacity mask from the symbol table capacity.
6007  Register mask = scratch2;
6008  __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
6009  __ sra(mask, mask, 1);
6010  __ Addu(mask, mask, -1);
6011 
6012  // Calculate untagged address of the first element of the symbol table.
6013  Register first_symbol_table_element = symbol_table;
6014  __ Addu(first_symbol_table_element, symbol_table,
6016 
6017  // Registers.
6018  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
6019  // hash: hash of two character string
6020  // mask: capacity mask
6021  // first_symbol_table_element: address of the first element of
6022  // the symbol table
6023  // undefined: the undefined object
6024  // scratch: -
6025 
6026  // Perform a number of probes in the symbol table.
6027  const int kProbes = 4;
6028  Label found_in_symbol_table;
6029  Label next_probe[kProbes];
6030  Register candidate = scratch5; // Scratch register contains candidate.
6031  for (int i = 0; i < kProbes; i++) {
6032  // Calculate entry in symbol table.
6033  if (i > 0) {
6034  __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
6035  } else {
6036  __ mov(candidate, hash);
6037  }
6038 
6039  __ And(candidate, candidate, Operand(mask));
6040 
6041  // Load the entry from the symble table.
6043  __ sll(scratch, candidate, kPointerSizeLog2);
6044  __ Addu(scratch, scratch, first_symbol_table_element);
6045  __ lw(candidate, MemOperand(scratch));
6046 
6047  // If entry is undefined no string with this hash can be found.
6048  Label is_string;
6049  __ GetObjectType(candidate, scratch, scratch);
6050  __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
6051 
6052  __ Branch(not_found, eq, undefined, Operand(candidate));
6053  // Must be the hole (deleted entry).
6054  if (FLAG_debug_code) {
6055  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
6056  __ Assert(eq, "oddball in symbol table is not undefined or the hole",
6057  scratch, Operand(candidate));
6058  }
6059  __ jmp(&next_probe[i]);
6060 
6061  __ bind(&is_string);
6062 
6063  // Check that the candidate is a non-external ASCII string. The instance
6064  // type is still in the scratch register from the CompareObjectType
6065  // operation.
6066  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
6067 
6068  // If length is not 2 the string is not a candidate.
6069  __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
6070  __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
6071 
6072  // Check if the two characters match.
6073  // Assumes that word load is little endian.
6074  __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
6075  __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
6076  __ bind(&next_probe[i]);
6077  }
6078 
6079  // No matching 2 character string found by probing.
6080  __ jmp(not_found);
6081 
6082  // Scratch register contains result when we fall through to here.
6083  Register result = candidate;
6084  __ bind(&found_in_symbol_table);
6085  __ mov(v0, result);
6086 }
6087 
6088 
6089 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6090  Register hash,
6091  Register character) {
6092  // hash = seed + character + ((seed + character) << 10);
6093  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
6094  // Untag smi seed and add the character.
6095  __ SmiUntag(hash);
6096  __ addu(hash, hash, character);
6097  __ sll(at, hash, 10);
6098  __ addu(hash, hash, at);
6099  // hash ^= hash >> 6;
6100  __ srl(at, hash, 6);
6101  __ xor_(hash, hash, at);
6102 }
6103 
6104 
6105 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6106  Register hash,
6107  Register character) {
6108  // hash += character;
6109  __ addu(hash, hash, character);
6110  // hash += hash << 10;
6111  __ sll(at, hash, 10);
6112  __ addu(hash, hash, at);
6113  // hash ^= hash >> 6;
6114  __ srl(at, hash, 6);
6115  __ xor_(hash, hash, at);
6116 }
6117 
6118 
6119 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6120  Register hash) {
6121  // hash += hash << 3;
6122  __ sll(at, hash, 3);
6123  __ addu(hash, hash, at);
6124  // hash ^= hash >> 11;
6125  __ srl(at, hash, 11);
6126  __ xor_(hash, hash, at);
6127  // hash += hash << 15;
6128  __ sll(at, hash, 15);
6129  __ addu(hash, hash, at);
6130 
6131  __ li(at, Operand(String::kHashBitMask));
6132  __ and_(hash, hash, at);
6133 
6134  // if (hash == 0) hash = 27;
6135  __ ori(at, zero_reg, StringHasher::kZeroHash);
6136  __ Movz(hash, at, hash);
6137 }
6138 
6139 
6140 void SubStringStub::Generate(MacroAssembler* masm) {
6141  Label runtime;
6142  // Stack frame on entry.
6143  // ra: return address
6144  // sp[0]: to
6145  // sp[4]: from
6146  // sp[8]: string
6147 
6148  // This stub is called from the native-call %_SubString(...), so
6149  // nothing can be assumed about the arguments. It is tested that:
6150  // "string" is a sequential string,
6151  // both "from" and "to" are smis, and
6152  // 0 <= from <= to <= string.length.
6153  // If any of these assumptions fail, we call the runtime system.
6154 
6155  const int kToOffset = 0 * kPointerSize;
6156  const int kFromOffset = 1 * kPointerSize;
6157  const int kStringOffset = 2 * kPointerSize;
6158 
6159  __ lw(a2, MemOperand(sp, kToOffset));
6160  __ lw(a3, MemOperand(sp, kFromOffset));
6161  STATIC_ASSERT(kFromOffset == kToOffset + 4);
6162  STATIC_ASSERT(kSmiTag == 0);
6164 
6165  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
6166  // safe in this case.
6167  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
6168  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6169  // Both a2 and a3 are untagged integers.
6170 
6171  __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
6172 
6173  __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
6174  __ Subu(a2, a2, a3);
6175 
6176  // Make sure first argument is a string.
6177  __ lw(v0, MemOperand(sp, kStringOffset));
6178  __ JumpIfSmi(v0, &runtime);
6181  __ And(t0, a1, Operand(kIsNotStringMask));
6182 
6183  __ Branch(&runtime, ne, t0, Operand(zero_reg));
6184 
6185  // Short-cut for the case of trivial substring.
6186  Label return_v0;
6187  // v0: original string
6188  // a2: result string length
6190  __ sra(t0, t0, 1);
6191  // Return original string.
6192  __ Branch(&return_v0, eq, a2, Operand(t0));
6193  // Longer than original string's length or negative: unsafe arguments.
6194  __ Branch(&runtime, hi, a2, Operand(t0));
6195  // Shorter than original string's length: an actual substring.
6196 
6197  // Deal with different string types: update the index if necessary
6198  // and put the underlying string into t1.
6199  // v0: original string
6200  // a1: instance type
6201  // a2: length
6202  // a3: from index (untagged)
6203  Label underlying_unpacked, sliced_string, seq_or_external_string;
6204  // If the string is not indirect, it can only be sequential or external.
6207  __ And(t0, a1, Operand(kIsIndirectStringMask));
6208  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
6209  // t0 is used as a scratch register and can be overwritten in either case.
6210  __ And(t0, a1, Operand(kSlicedNotConsMask));
6211  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6212  // Cons string. Check whether it is flat, then fetch first part.
6214  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6215  __ Branch(&runtime, ne, t1, Operand(t0));
6217  // Update instance type.
6220  __ jmp(&underlying_unpacked);
6221 
6222  __ bind(&sliced_string);
6223  // Sliced string. Fetch parent and correct start index by offset.
6226  __ sra(t0, t0, 1); // Add offset to index.
6227  __ Addu(a3, a3, t0);
6228  // Update instance type.
6231  __ jmp(&underlying_unpacked);
6232 
6233  __ bind(&seq_or_external_string);
6234  // Sequential or external string. Just move string to the expected register.
6235  __ mov(t1, v0);
6236 
6237  __ bind(&underlying_unpacked);
6238 
6239  if (FLAG_string_slices) {
6240  Label copy_routine;
6241  // t1: underlying subject string
6242  // a1: instance type of underlying subject string
6243  // a2: length
6244  // a3: adjusted start index (untagged)
6245  // Short slice. Copy instead of slicing.
6246  __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
6247  // Allocate new sliced string. At this point we do not reload the instance
6248  // type including the string encoding because we simply rely on the info
6249  // provided by the original string. It does not matter if the original
6250  // string's encoding is wrong because we always have to recheck encoding of
6251  // the newly created string's parent anyways due to externalized strings.
6252  Label two_byte_slice, set_slice_header;
6255  __ And(t0, a1, Operand(kStringEncodingMask));
6256  __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
6257  __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6258  __ jmp(&set_slice_header);
6259  __ bind(&two_byte_slice);
6260  __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6261  __ bind(&set_slice_header);
6262  __ sll(a3, a3, 1);
6265  __ jmp(&return_v0);
6266 
6267  __ bind(&copy_routine);
6268  }
6269 
6270  // t1: underlying subject string
6271  // a1: instance type of underlying subject string
6272  // a2: length
6273  // a3: adjusted start index (untagged)
6274  Label two_byte_sequential, sequential_string, allocate_result;
6277  __ And(t0, a1, Operand(kExternalStringTag));
6278  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
6279 
6280  // Handle external string.
6281  // Rule out short external strings.
6283  __ And(t0, a1, Operand(kShortExternalStringTag));
6284  __ Branch(&runtime, ne, t0, Operand(zero_reg));
6286  // t1 already points to the first character of underlying string.
6287  __ jmp(&allocate_result);
6288 
6289  __ bind(&sequential_string);
6290  // Locate first character of underlying subject string.
6292  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6293 
6294  __ bind(&allocate_result);
6295  // Sequential acii string. Allocate the result.
6297  __ And(t0, a1, Operand(kStringEncodingMask));
6298  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
6299 
6300  // Allocate and copy the resulting ASCII string.
6301  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6302 
6303  // Locate first character of substring to copy.
6304  __ Addu(t1, t1, a3);
6305 
6306  // Locate first character of result.
6307  __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6308 
6309  // v0: result string
6310  // a1: first character of result string
6311  // a2: result string length
6312  // t1: first character of substring to copy
6315  masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6316  __ jmp(&return_v0);
6317 
6318  // Allocate and copy the resulting two-byte string.
6319  __ bind(&two_byte_sequential);
6320  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
6321 
6322  // Locate first character of substring to copy.
6323  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6324  __ sll(t0, a3, 1);
6325  __ Addu(t1, t1, t0);
6326  // Locate first character of result.
6327  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6328 
6329  // v0: result string.
6330  // a1: first character of result.
6331  // a2: result length.
6332  // t1: first character of substring to copy.
6335  masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6336 
6337  __ bind(&return_v0);
6338  Counters* counters = masm->isolate()->counters();
6339  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6340  __ DropAndRet(3);
6341 
6342  // Just jump to runtime to create the sub string.
6343  __ bind(&runtime);
6344  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6345 }
6346 
6347 
6348 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6349  Register left,
6350  Register right,
6351  Register scratch1,
6352  Register scratch2,
6353  Register scratch3) {
6354  Register length = scratch1;
6355 
6356  // Compare lengths.
6357  Label strings_not_equal, check_zero_length;
6358  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6359  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6360  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6361  __ bind(&strings_not_equal);
6362  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6363  __ Ret();
6364 
6365  // Check if the length is zero.
6366  Label compare_chars;
6367  __ bind(&check_zero_length);
6368  STATIC_ASSERT(kSmiTag == 0);
6369  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6370  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6371  __ Ret();
6372 
6373  // Compare characters.
6374  __ bind(&compare_chars);
6375 
6376  GenerateAsciiCharsCompareLoop(masm,
6377  left, right, length, scratch2, scratch3, v0,
6378  &strings_not_equal);
6379 
6380  // Characters are equal.
6381  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6382  __ Ret();
6383 }
6384 
6385 
6386 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6387  Register left,
6388  Register right,
6389  Register scratch1,
6390  Register scratch2,
6391  Register scratch3,
6392  Register scratch4) {
6393  Label result_not_equal, compare_lengths;
6394  // Find minimum length and length difference.
6395  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6396  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6397  __ Subu(scratch3, scratch1, Operand(scratch2));
6398  Register length_delta = scratch3;
6399  __ slt(scratch4, scratch2, scratch1);
6400  __ Movn(scratch1, scratch2, scratch4);
6401  Register min_length = scratch1;
6402  STATIC_ASSERT(kSmiTag == 0);
6403  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6404 
6405  // Compare loop.
6406  GenerateAsciiCharsCompareLoop(masm,
6407  left, right, min_length, scratch2, scratch4, v0,
6408  &result_not_equal);
6409 
6410  // Compare lengths - strings up to min-length are equal.
6411  __ bind(&compare_lengths);
6412  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6413  // Use length_delta as result if it's zero.
6414  __ mov(scratch2, length_delta);
6415  __ mov(scratch4, zero_reg);
6416  __ mov(v0, zero_reg);
6417 
6418  __ bind(&result_not_equal);
6419  // Conditionally update the result based either on length_delta or
6420  // the last comparion performed in the loop above.
6421  Label ret;
6422  __ Branch(&ret, eq, scratch2, Operand(scratch4));
6423  __ li(v0, Operand(Smi::FromInt(GREATER)));
6424  __ Branch(&ret, gt, scratch2, Operand(scratch4));
6425  __ li(v0, Operand(Smi::FromInt(LESS)));
6426  __ bind(&ret);
6427  __ Ret();
6428 }
6429 
6430 
6431 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6432  MacroAssembler* masm,
6433  Register left,
6434  Register right,
6435  Register length,
6436  Register scratch1,
6437  Register scratch2,
6438  Register scratch3,
6439  Label* chars_not_equal) {
6440  // Change index to run from -length to -1 by adding length to string
6441  // start. This means that loop ends when index reaches zero, which
6442  // doesn't need an additional compare.
6443  __ SmiUntag(length);
6444  __ Addu(scratch1, length,
6446  __ Addu(left, left, Operand(scratch1));
6447  __ Addu(right, right, Operand(scratch1));
6448  __ Subu(length, zero_reg, length);
6449  Register index = length; // index = -length;
6450 
6451 
6452  // Compare loop.
6453  Label loop;
6454  __ bind(&loop);
6455  __ Addu(scratch3, left, index);
6456  __ lbu(scratch1, MemOperand(scratch3));
6457  __ Addu(scratch3, right, index);
6458  __ lbu(scratch2, MemOperand(scratch3));
6459  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6460  __ Addu(index, index, 1);
6461  __ Branch(&loop, ne, index, Operand(zero_reg));
6462 }
6463 
6464 
6465 void StringCompareStub::Generate(MacroAssembler* masm) {
6466  Label runtime;
6467 
6468  Counters* counters = masm->isolate()->counters();
6469 
6470  // Stack frame on entry.
6471  // sp[0]: right string
6472  // sp[4]: left string
6473  __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6474  __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6475 
6476  Label not_same;
6477  __ Branch(&not_same, ne, a0, Operand(a1));
6478  STATIC_ASSERT(EQUAL == 0);
6479  STATIC_ASSERT(kSmiTag == 0);
6480  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6481  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6482  __ DropAndRet(2);
6483 
6484  __ bind(&not_same);
6485 
6486  // Check that both objects are sequential ASCII strings.
6487  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6488 
6489  // Compare flat ASCII strings natively. Remove arguments from stack first.
6490  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6491  __ Addu(sp, sp, Operand(2 * kPointerSize));
6492  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6493 
6494  __ bind(&runtime);
6495  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6496 }
6497 
6498 
6499 void StringAddStub::Generate(MacroAssembler* masm) {
6500  Label call_runtime, call_builtin;
6501  Builtins::JavaScript builtin_id = Builtins::ADD;
6502 
6503  Counters* counters = masm->isolate()->counters();
6504 
6505  // Stack on entry:
6506  // sp[0]: second argument (right).
6507  // sp[4]: first argument (left).
6508 
6509  // Load the two arguments.
6510  __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6511  __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6512 
6513  // Make sure that both arguments are strings if not known in advance.
6514  if (flags_ == NO_STRING_ADD_FLAGS) {
6515  __ JumpIfEitherSmi(a0, a1, &call_runtime);
6516  // Load instance types.
6521  STATIC_ASSERT(kStringTag == 0);
6522  // If either is not a string, go to runtime.
6523  __ Or(t4, t0, Operand(t1));
6524  __ And(t4, t4, Operand(kIsNotStringMask));
6525  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6526  } else {
6527  // Here at least one of the arguments is definitely a string.
6528  // We convert the one that is not known to be a string.
6529  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6530  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6531  GenerateConvertArgument(
6532  masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6533  builtin_id = Builtins::STRING_ADD_RIGHT;
6534  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6535  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6536  GenerateConvertArgument(
6537  masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6538  builtin_id = Builtins::STRING_ADD_LEFT;
6539  }
6540  }
6541 
6542  // Both arguments are strings.
6543  // a0: first string
6544  // a1: second string
6545  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6546  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6547  {
6548  Label strings_not_empty;
6549  // Check if either of the strings are empty. In that case return the other.
6550  // These tests use zero-length check on string-length whch is an Smi.
6551  // Assert that Smi::FromInt(0) is really 0.
6552  STATIC_ASSERT(kSmiTag == 0);
6553  ASSERT(Smi::FromInt(0) == 0);
6556  __ mov(v0, a0); // Assume we'll return first string (from a0).
6557  __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
6558  __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6559  __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6560  __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6561  __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6562 
6563  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6564  __ DropAndRet(2);
6565 
6566  __ bind(&strings_not_empty);
6567  }
6568 
6569  // Untag both string-lengths.
6570  __ sra(a2, a2, kSmiTagSize);
6571  __ sra(a3, a3, kSmiTagSize);
6572 
6573  // Both strings are non-empty.
6574  // a0: first string
6575  // a1: second string
6576  // a2: length of first string
6577  // a3: length of second string
6578  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6579  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6580  // Look at the length of the result of adding the two strings.
6581  Label string_add_flat_result, longer_than_two;
6582  // Adding two lengths can't overflow.
6584  __ Addu(t2, a2, Operand(a3));
6585  // Use the symbol table when adding two one character strings, as it
6586  // helps later optimizations to return a symbol here.
6587  __ Branch(&longer_than_two, ne, t2, Operand(2));
6588 
6589  // Check that both strings are non-external ASCII strings.
6590  if (flags_ != NO_STRING_ADD_FLAGS) {
6595  }
6596  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6597  &call_runtime);
6598 
6599  // Get the two characters forming the sub string.
6602 
6603  // Try to lookup two character string in symbol table. If it is not found
6604  // just allocate a new one.
6605  Label make_two_character_string;
6607  masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6608  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6609  __ DropAndRet(2);
6610 
6611  __ bind(&make_two_character_string);
6612  // Resulting string has length 2 and first chars of two strings
6613  // are combined into single halfword in a2 register.
6614  // So we can fill resulting string without two loops by a single
6615  // halfword store instruction (which assumes that processor is
6616  // in a little endian mode).
6617  __ li(t2, Operand(2));
6618  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6620  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6621  __ DropAndRet(2);
6622 
6623  __ bind(&longer_than_two);
6624  // Check if resulting string will be flat.
6625  __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
6626  // Handle exceptionally long strings in the runtime system.
6627  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6629  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6630  __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
6631 
6632  // If result is not supposed to be flat, allocate a cons string object.
6633  // If both strings are ASCII the result is an ASCII cons string.
6634  if (flags_ != NO_STRING_ADD_FLAGS) {
6639  }
6640  Label non_ascii, allocated, ascii_data;
6642  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
6643  __ And(t4, t0, Operand(t1));
6644  __ And(t4, t4, Operand(kStringEncodingMask));
6645  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6646 
6647  // Allocate an ASCII cons string.
6648  __ bind(&ascii_data);
6649  __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
6650  __ bind(&allocated);
6651  // Fill the fields of the cons string.
6654  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6655  __ DropAndRet(2);
6656 
6657  __ bind(&non_ascii);
6658  // At least one of the strings is two-byte. Check whether it happens
6659  // to contain only ASCII characters.
6660  // t0: first instance type.
6661  // t1: second instance type.
6662  // Branch to if _both_ instances have kAsciiDataHintMask set.
6663  __ And(at, t0, Operand(kAsciiDataHintMask));
6664  __ and_(at, at, t1);
6665  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6666 
6667  __ xor_(t0, t0, t1);
6669  __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6670  __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6671 
6672  // Allocate a two byte cons string.
6673  __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
6674  __ Branch(&allocated);
6675 
6676  // We cannot encounter sliced strings or cons strings here since:
6678  // Handle creating a flat result from either external or sequential strings.
6679  // Locate the first characters' locations.
6680  // a0: first string
6681  // a1: second string
6682  // a2: length of first string
6683  // a3: length of second string
6684  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6685  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6686  // t2: sum of lengths.
6687  Label first_prepared, second_prepared;
6688  __ bind(&string_add_flat_result);
6689  if (flags_ != NO_STRING_ADD_FLAGS) {
6694  }
6695  // Check whether both strings have same encoding
6696  __ Xor(t3, t0, Operand(t1));
6697  __ And(t3, t3, Operand(kStringEncodingMask));
6698  __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
6699 
6701  __ And(t4, t0, Operand(kStringRepresentationMask));
6702 
6704  Label skip_first_add;
6705  __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
6706  __ Branch(USE_DELAY_SLOT, &first_prepared);
6708  __ bind(&skip_first_add);
6709  // External string: rule out short external string and load string resource.
6711  __ And(t4, t0, Operand(kShortExternalStringMask));
6712  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6714  __ bind(&first_prepared);
6715 
6717  __ And(t4, t1, Operand(kStringRepresentationMask));
6719  Label skip_second_add;
6720  __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
6721  __ Branch(USE_DELAY_SLOT, &second_prepared);
6723  __ bind(&skip_second_add);
6724  // External string: rule out short external string and load string resource.
6726  __ And(t4, t1, Operand(kShortExternalStringMask));
6727  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6729  __ bind(&second_prepared);
6730 
6731  Label non_ascii_string_add_flat_result;
6732  // t3: first character of first string
6733  // a1: first character of second string
6734  // a2: length of first string
6735  // a3: length of second string
6736  // t2: sum of lengths.
6737  // Both strings have the same encoding.
6739  __ And(t4, t1, Operand(kStringEncodingMask));
6740  __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
6741 
6742  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6743  __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6744  // v0: result string.
6745  // t3: first character of first string.
6746  // a1: first character of second string
6747  // a2: length of first string.
6748  // a3: length of second string.
6749  // t2: first character of result.
6750 
6751  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
6752  // t2: next character of result.
6753  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6754  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6755  __ DropAndRet(2);
6756 
6757  __ bind(&non_ascii_string_add_flat_result);
6758  __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6759  __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6760  // v0: result string.
6761  // t3: first character of first string.
6762  // a1: first character of second string.
6763  // a2: length of first string.
6764  // a3: length of second string.
6765  // t2: first character of result.
6766  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
6767  // t2: next character of result.
6768  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6769 
6770  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6771  __ DropAndRet(2);
6772 
6773  // Just jump to runtime to add the two strings.
6774  __ bind(&call_runtime);
6775  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6776 
6777  if (call_builtin.is_linked()) {
6778  __ bind(&call_builtin);
6779  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6780  }
6781 }
6782 
6783 
6784 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6785  int stack_offset,
6786  Register arg,
6787  Register scratch1,
6788  Register scratch2,
6789  Register scratch3,
6790  Register scratch4,
6791  Label* slow) {
6792  // First check if the argument is already a string.
6793  Label not_string, done;
6794  __ JumpIfSmi(arg, &not_string);
6795  __ GetObjectType(arg, scratch1, scratch1);
6796  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6797 
6798  // Check the number to string cache.
6799  Label not_cached;
6800  __ bind(&not_string);
6801  // Puts the cached result into scratch1.
6803  arg,
6804  scratch1,
6805  scratch2,
6806  scratch3,
6807  scratch4,
6808  false,
6809  &not_cached);
6810  __ mov(arg, scratch1);
6811  __ sw(arg, MemOperand(sp, stack_offset));
6812  __ jmp(&done);
6813 
6814  // Check if the argument is a safe string wrapper.
6815  __ bind(&not_cached);
6816  __ JumpIfSmi(arg, slow);
6817  __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6818  __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6819  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6820  __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6821  __ And(scratch2, scratch2, scratch4);
6822  __ Branch(slow, ne, scratch2, Operand(scratch4));
6823  __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6824  __ sw(arg, MemOperand(sp, stack_offset));
6825 
6826  __ bind(&done);
6827 }
6828 
6829 
6830 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6831  ASSERT(state_ == CompareIC::SMIS);
6832  Label miss;
6833  __ Or(a2, a1, a0);
6834  __ JumpIfNotSmi(a2, &miss);
6835 
6836  if (GetCondition() == eq) {
6837  // For equality we do not care about the sign of the result.
6838  __ Subu(v0, a0, a1);
6839  } else {
6840  // Untag before subtracting to avoid handling overflow.
6841  __ SmiUntag(a1);
6842  __ SmiUntag(a0);
6843  __ Subu(v0, a1, a0);
6844  }
6845  __ Ret();
6846 
6847  __ bind(&miss);
6848  GenerateMiss(masm);
6849 }
6850 
6851 
6852 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6853  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6854 
6855  Label generic_stub;
6856  Label unordered, maybe_undefined1, maybe_undefined2;
6857  Label miss;
6858  __ And(a2, a1, Operand(a0));
6859  __ JumpIfSmi(a2, &generic_stub);
6860 
6861  __ GetObjectType(a0, a2, a2);
6862  __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
6863  __ GetObjectType(a1, a2, a2);
6864  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6865 
6866  // Inlining the double comparison and falling back to the general compare
6867  // stub if NaN is involved or FPU is unsupported.
6869  CpuFeatures::Scope scope(FPU);
6870 
6871  // Load left and right operand.
6872  __ Subu(a2, a1, Operand(kHeapObjectTag));
6874  __ Subu(a2, a0, Operand(kHeapObjectTag));
6876 
6877  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6878  Label fpu_eq, fpu_lt;
6879  // Test if equal, and also handle the unordered/NaN case.
6880  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6881 
6882  // Test if less (unordered case is already handled).
6883  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6884 
6885  // Otherwise it's greater, so just fall thru, and return.
6886  __ li(v0, Operand(GREATER));
6887  __ Ret();
6888 
6889  __ bind(&fpu_eq);
6890  __ li(v0, Operand(EQUAL));
6891  __ Ret();
6892 
6893  __ bind(&fpu_lt);
6894  __ li(v0, Operand(LESS));
6895  __ Ret();
6896  }
6897 
6898  __ bind(&unordered);
6899 
6900  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6901  __ bind(&generic_stub);
6902  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6903 
6904  __ bind(&maybe_undefined1);
6906  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6907  __ Branch(&miss, ne, a0, Operand(at));
6908  __ GetObjectType(a1, a2, a2);
6909  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6910  __ jmp(&unordered);
6911  }
6912 
6913  __ bind(&maybe_undefined2);
6915  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6916  __ Branch(&unordered, eq, a1, Operand(at));
6917  }
6918 
6919  __ bind(&miss);
6920  GenerateMiss(masm);
6921 }
6922 
6923 
6924 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6925  ASSERT(state_ == CompareIC::SYMBOLS);
6926  Label miss;
6927 
6928  // Registers containing left and right operands respectively.
6929  Register left = a1;
6930  Register right = a0;
6931  Register tmp1 = a2;
6932  Register tmp2 = a3;
6933 
6934  // Check that both operands are heap objects.
6935  __ JumpIfEitherSmi(left, right, &miss);
6936 
6937  // Check that both operands are symbols.
6938  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6939  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6940  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6941  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6942  STATIC_ASSERT(kSymbolTag != 0);
6943  __ And(tmp1, tmp1, Operand(tmp2));
6944  __ And(tmp1, tmp1, kIsSymbolMask);
6945  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6946  // Make sure a0 is non-zero. At this point input operands are
6947  // guaranteed to be non-zero.
6948  ASSERT(right.is(a0));
6949  STATIC_ASSERT(EQUAL == 0);
6950  STATIC_ASSERT(kSmiTag == 0);
6951  __ mov(v0, right);
6952  // Symbols are compared by identity.
6953  __ Ret(ne, left, Operand(right));
6954  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6955  __ Ret();
6956 
6957  __ bind(&miss);
6958  GenerateMiss(masm);
6959 }
6960 
6961 
6962 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6963  ASSERT(state_ == CompareIC::STRINGS);
6964  Label miss;
6965 
6966  bool equality = Token::IsEqualityOp(op_);
6967 
6968  // Registers containing left and right operands respectively.
6969  Register left = a1;
6970  Register right = a0;
6971  Register tmp1 = a2;
6972  Register tmp2 = a3;
6973  Register tmp3 = t0;
6974  Register tmp4 = t1;
6975  Register tmp5 = t2;
6976 
6977  // Check that both operands are heap objects.
6978  __ JumpIfEitherSmi(left, right, &miss);
6979 
6980  // Check that both operands are strings. This leaves the instance
6981  // types loaded in tmp1 and tmp2.
6982  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6983  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6984  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6985  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6987  __ Or(tmp3, tmp1, tmp2);
6988  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6989  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6990 
6991  // Fast check for identical strings.
6992  Label left_ne_right;
6993  STATIC_ASSERT(EQUAL == 0);
6994  STATIC_ASSERT(kSmiTag == 0);
6995  __ Branch(&left_ne_right, ne, left, Operand(right));
6996  __ Ret(USE_DELAY_SLOT);
6997  __ mov(v0, zero_reg); // In the delay slot.
6998  __ bind(&left_ne_right);
6999 
7000  // Handle not identical strings.
7001 
7002  // Check that both strings are symbols. If they are, we're done
7003  // because we already know they are not identical.
7004  if (equality) {
7005  ASSERT(GetCondition() == eq);
7006  STATIC_ASSERT(kSymbolTag != 0);
7007  __ And(tmp3, tmp1, Operand(tmp2));
7008  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
7009  Label is_symbol;
7010  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
7011  // Make sure a0 is non-zero. At this point input operands are
7012  // guaranteed to be non-zero.
7013  ASSERT(right.is(a0));
7014  __ Ret(USE_DELAY_SLOT);
7015  __ mov(v0, a0); // In the delay slot.
7016  __ bind(&is_symbol);
7017  }
7018 
7019  // Check that both strings are sequential ASCII.
7020  Label runtime;
7021  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
7022  tmp1, tmp2, tmp3, tmp4, &runtime);
7023 
7024  // Compare flat ASCII strings. Returns when done.
7025  if (equality) {
7027  masm, left, right, tmp1, tmp2, tmp3);
7028  } else {
7030  masm, left, right, tmp1, tmp2, tmp3, tmp4);
7031  }
7032 
7033  // Handle more complex cases in runtime.
7034  __ bind(&runtime);
7035  __ Push(left, right);
7036  if (equality) {
7037  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
7038  } else {
7039  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
7040  }
7041 
7042  __ bind(&miss);
7043  GenerateMiss(masm);
7044 }
7045 
7046 
7047 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
7048  ASSERT(state_ == CompareIC::OBJECTS);
7049  Label miss;
7050  __ And(a2, a1, Operand(a0));
7051  __ JumpIfSmi(a2, &miss);
7052 
7053  __ GetObjectType(a0, a2, a2);
7054  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
7055  __ GetObjectType(a1, a2, a2);
7056  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
7057 
7058  ASSERT(GetCondition() == eq);
7059  __ Ret(USE_DELAY_SLOT);
7060  __ subu(v0, a0, a1);
7061 
7062  __ bind(&miss);
7063  GenerateMiss(masm);
7064 }
7065 
7066 
7067 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
7068  Label miss;
7069  __ And(a2, a1, a0);
7070  __ JumpIfSmi(a2, &miss);
7073  __ Branch(&miss, ne, a2, Operand(known_map_));
7074  __ Branch(&miss, ne, a3, Operand(known_map_));
7075 
7076  __ Ret(USE_DELAY_SLOT);
7077  __ subu(v0, a0, a1);
7078 
7079  __ bind(&miss);
7080  GenerateMiss(masm);
7081 }
7082 
7083 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7084  {
7085  // Call the runtime system in a fresh internal frame.
7086  ExternalReference miss =
7087  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7088  FrameScope scope(masm, StackFrame::INTERNAL);
7089  __ Push(a1, a0);
7090  __ push(ra);
7091  __ Push(a1, a0);
7092  __ li(t0, Operand(Smi::FromInt(op_)));
7093  __ addiu(sp, sp, -kPointerSize);
7094  __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
7095  __ sw(t0, MemOperand(sp)); // In the delay slot.
7096  // Compute the entry point of the rewritten stub.
7097  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
7098  // Restore registers.
7099  __ Pop(a1, a0, ra);
7100  }
7101  __ Jump(a2);
7102 }
7103 
7104 
7105 void DirectCEntryStub::Generate(MacroAssembler* masm) {
7106  // No need to pop or drop anything, LeaveExitFrame will restore the old
7107  // stack, thus dropping the allocated space for the return value.
7108  // The saved ra is after the reserved stack space for the 4 args.
7109  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
7110 
7111  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
7112  // In case of an error the return address may point to a memory area
7113  // filled with kZapValue by the GC.
7114  // Dereference the address and check for this.
7115  __ lw(t0, MemOperand(t9));
7116  __ Assert(ne, "Received invalid return address.", t0,
7117  Operand(reinterpret_cast<uint32_t>(kZapValue)));
7118  }
7119  __ Jump(t9);
7120 }
7121 
7122 
7123 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7124  ExternalReference function) {
7125  __ li(t9, Operand(function));
7126  this->GenerateCall(masm, t9);
7127 }
7128 
7129 
7130 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7131  Register target) {
7132  __ Move(t9, target);
7133  __ AssertStackIsAligned();
7134  // Allocate space for arg slots.
7135  __ Subu(sp, sp, kCArgsSlotsSize);
7136 
7137  // Block the trampoline pool through the whole function to make sure the
7138  // number of generated instructions is constant.
7139  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
7140 
7141  // We need to get the current 'pc' value, which is not available on MIPS.
7142  Label find_ra;
7143  masm->bal(&find_ra); // ra = pc + 8.
7144  masm->nop(); // Branch delay slot nop.
7145  masm->bind(&find_ra);
7146 
7147  const int kNumInstructionsToJump = 6;
7148  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7149  // Push return address (accessible to GC through exit frame pc).
7150  // This spot for ra was reserved in EnterExitFrame.
7151  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7152  masm->li(ra,
7153  Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7154  RelocInfo::CODE_TARGET),
7155  CONSTANT_SIZE);
7156  // Call the function.
7157  masm->Jump(t9);
7158  // Make sure the stored 'ra' points to this position.
7159  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7160 }
7161 
7162 
7163 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7164  Label* miss,
7165  Label* done,
7166  Register receiver,
7167  Register properties,
7168  Handle<String> name,
7169  Register scratch0) {
7170  // If names of slots in range from 1 to kProbes - 1 for the hash value are
7171  // not equal to the name and kProbes-th slot is not used (its name is the
7172  // undefined value), it guarantees the hash table doesn't contain the
7173  // property. It's true even if some slots represent deleted properties
7174  // (their names are the hole value).
7175  for (int i = 0; i < kInlinedProbes; i++) {
7176  // scratch0 points to properties hash.
7177  // Compute the masked index: (hash + i + i * i) & mask.
7178  Register index = scratch0;
7179  // Capacity is smi 2^n.
7180  __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7181  __ Subu(index, index, Operand(1));
7182  __ And(index, index, Operand(
7183  Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7184 
7185  // Scale the index by multiplying by the entry size.
7187  __ sll(at, index, 1);
7188  __ Addu(index, index, at);
7189 
7190  Register entity_name = scratch0;
7191  // Having undefined at this place means the name is not contained.
7192  ASSERT_EQ(kSmiTagSize, 1);
7193  Register tmp = properties;
7194  __ sll(scratch0, index, 1);
7195  __ Addu(tmp, properties, scratch0);
7196  __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7197 
7198  ASSERT(!tmp.is(entity_name));
7199  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7200  __ Branch(done, eq, entity_name, Operand(tmp));
7201 
7202  if (i != kInlinedProbes - 1) {
7203  // Load the hole ready for use below:
7204  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7205 
7206  // Stop if found the property.
7207  __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7208 
7209  Label the_hole;
7210  __ Branch(&the_hole, eq, entity_name, Operand(tmp));
7211 
7212  // Check if the entry name is not a symbol.
7213  __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7214  __ lbu(entity_name,
7216  __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7217  __ Branch(miss, eq, scratch0, Operand(zero_reg));
7218 
7219  __ bind(&the_hole);
7220 
7221  // Restore the properties.
7222  __ lw(properties,
7224  }
7225  }
7226 
7227  const int spill_mask =
7228  (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7229  a2.bit() | a1.bit() | a0.bit() | v0.bit());
7230 
7231  __ MultiPush(spill_mask);
7232  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7233  __ li(a1, Operand(Handle<String>(name)));
7235  __ CallStub(&stub);
7236  __ mov(at, v0);
7237  __ MultiPop(spill_mask);
7238 
7239  __ Branch(done, eq, at, Operand(zero_reg));
7240  __ Branch(miss, ne, at, Operand(zero_reg));
7241 }
7242 
7243 
7244 // Probe the string dictionary in the |elements| register. Jump to the
7245 // |done| label if a property with the given name is found. Jump to
7246 // the |miss| label otherwise.
7247 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
7248 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7249  Label* miss,
7250  Label* done,
7251  Register elements,
7252  Register name,
7253  Register scratch1,
7254  Register scratch2) {
7255  ASSERT(!elements.is(scratch1));
7256  ASSERT(!elements.is(scratch2));
7257  ASSERT(!name.is(scratch1));
7258  ASSERT(!name.is(scratch2));
7259 
7260  __ AssertString(name);
7261 
7262  // Compute the capacity mask.
7263  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7264  __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7265  __ Subu(scratch1, scratch1, Operand(1));
7266 
7267  // Generate an unrolled loop that performs a few probes before
7268  // giving up. Measurements done on Gmail indicate that 2 probes
7269  // cover ~93% of loads from dictionaries.
7270  for (int i = 0; i < kInlinedProbes; i++) {
7271  // Compute the masked index: (hash + i + i * i) & mask.
7272  __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7273  if (i > 0) {
7274  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7275  // the hash in a separate instruction. The value hash + i + i * i is right
7276  // shifted in the following and instruction.
7277  ASSERT(StringDictionary::GetProbeOffset(i) <
7278  1 << (32 - String::kHashFieldOffset));
7279  __ Addu(scratch2, scratch2, Operand(
7280  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7281  }
7282  __ srl(scratch2, scratch2, String::kHashShift);
7283  __ And(scratch2, scratch1, scratch2);
7284 
7285  // Scale the index by multiplying by the element size.
7287  // scratch2 = scratch2 * 3.
7288 
7289  __ sll(at, scratch2, 1);
7290  __ Addu(scratch2, scratch2, at);
7291 
7292  // Check if the key is identical to the name.
7293  __ sll(at, scratch2, 2);
7294  __ Addu(scratch2, elements, at);
7295  __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7296  __ Branch(done, eq, name, Operand(at));
7297  }
7298 
7299  const int spill_mask =
7300  (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7301  a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7302  ~(scratch1.bit() | scratch2.bit());
7303 
7304  __ MultiPush(spill_mask);
7305  if (name.is(a0)) {
7306  ASSERT(!elements.is(a1));
7307  __ Move(a1, name);
7308  __ Move(a0, elements);
7309  } else {
7310  __ Move(a0, elements);
7311  __ Move(a1, name);
7312  }
7314  __ CallStub(&stub);
7315  __ mov(scratch2, a2);
7316  __ mov(at, v0);
7317  __ MultiPop(spill_mask);
7318 
7319  __ Branch(done, ne, at, Operand(zero_reg));
7320  __ Branch(miss, eq, at, Operand(zero_reg));
7321 }
7322 
7323 
7324 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7325  // This stub overrides SometimesSetsUpAFrame() to return false. That means
7326  // we cannot call anything that could cause a GC from this stub.
7327  // Registers:
7328  // result: StringDictionary to probe
7329  // a1: key
7330  // : StringDictionary to probe.
7331  // index_: will hold an index of entry if lookup is successful.
7332  // might alias with result_.
7333  // Returns:
7334  // result_ is zero if lookup failed, non zero otherwise.
7335 
7336  Register result = v0;
7337  Register dictionary = a0;
7338  Register key = a1;
7339  Register index = a2;
7340  Register mask = a3;
7341  Register hash = t0;
7342  Register undefined = t1;
7343  Register entry_key = t2;
7344 
7345  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7346 
7347  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7348  __ sra(mask, mask, kSmiTagSize);
7349  __ Subu(mask, mask, Operand(1));
7350 
7352 
7353  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7354 
7355  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7356  // Compute the masked index: (hash + i + i * i) & mask.
7357  // Capacity is smi 2^n.
7358  if (i > 0) {
7359  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7360  // the hash in a separate instruction. The value hash + i + i * i is right
7361  // shifted in the following and instruction.
7362  ASSERT(StringDictionary::GetProbeOffset(i) <
7363  1 << (32 - String::kHashFieldOffset));
7364  __ Addu(index, hash, Operand(
7365  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7366  } else {
7367  __ mov(index, hash);
7368  }
7369  __ srl(index, index, String::kHashShift);
7370  __ And(index, mask, index);
7371 
7372  // Scale the index by multiplying by the entry size.
7374  // index *= 3.
7375  __ mov(at, index);
7376  __ sll(index, index, 1);
7377  __ Addu(index, index, at);
7378 
7379 
7380  ASSERT_EQ(kSmiTagSize, 1);
7381  __ sll(index, index, 2);
7382  __ Addu(index, index, dictionary);
7383  __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7384 
7385  // Having undefined at this place means the name is not contained.
7386  __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
7387 
7388  // Stop if found the property.
7389  __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7390 
7391  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7392  // Check if the entry name is not a symbol.
7393  __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7394  __ lbu(entry_key,
7396  __ And(result, entry_key, Operand(kIsSymbolMask));
7397  __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7398  }
7399  }
7400 
7401  __ bind(&maybe_in_dictionary);
7402  // If we are doing negative lookup then probing failure should be
7403  // treated as a lookup success. For positive lookup probing failure
7404  // should be treated as lookup failure.
7405  if (mode_ == POSITIVE_LOOKUP) {
7406  __ Ret(USE_DELAY_SLOT);
7407  __ mov(result, zero_reg);
7408  }
7409 
7410  __ bind(&in_dictionary);
7411  __ Ret(USE_DELAY_SLOT);
7412  __ li(result, 1);
7413 
7414  __ bind(&not_in_dictionary);
7415  __ Ret(USE_DELAY_SLOT);
7416  __ mov(result, zero_reg);
7417 }
7418 
7419 
7420 struct AheadOfTimeWriteBarrierStubList {
7421  Register object, value, address;
7422  RememberedSetAction action;
7423 };
7424 
7425 #define REG(Name) { kRegister_ ## Name ## _Code }
7426 
7427 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7428  // Used in RegExpExecStub.
7429  { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
7430  { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
7431  // Used in CompileArrayPushCall.
7432  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7433  // Also used in KeyedStoreIC::GenerateGeneric.
7434  { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
7435  // Used in CompileStoreGlobal.
7436  { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
7437  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7438  { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
7439  { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
7440  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7441  { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
7442  { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
7443  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7444  { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7445  { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7446  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7447  // and ElementsTransitionGenerator::GenerateSmiToDouble
7448  // and ElementsTransitionGenerator::GenerateDoubleToObject
7449  { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7450  { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7451  // ElementsTransitionGenerator::GenerateDoubleToObject
7452  { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
7453  { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
7454  // StoreArrayLiteralElementStub::Generate
7455  { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
7456  // FastNewClosureStub::Generate
7457  { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
7458  // Null termination.
7459  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7460 };
7461 
7462 #undef REG
7463 
7464 
7466  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7467  !entry->object.is(no_reg);
7468  entry++) {
7469  if (object_.is(entry->object) &&
7470  value_.is(entry->value) &&
7471  address_.is(entry->address) &&
7472  remembered_set_action_ == entry->action &&
7473  save_fp_regs_mode_ == kDontSaveFPRegs) {
7474  return true;
7475  }
7476  }
7477  return false;
7478 }
7479 
7480 
7482  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7483 }
7484 
7485 
7488  stub1.GetCode()->set_is_pregenerated(true);
7489 }
7490 
7491 
7493  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7494  !entry->object.is(no_reg);
7495  entry++) {
7496  RecordWriteStub stub(entry->object,
7497  entry->value,
7498  entry->address,
7499  entry->action,
7500  kDontSaveFPRegs);
7501  stub.GetCode()->set_is_pregenerated(true);
7502  }
7503 }
7504 
7505 
7506 bool CodeStub::CanUseFPRegisters() {
7507  return CpuFeatures::IsSupported(FPU);
7508 }
7509 
7510 
7511 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7512 // the value has just been written into the object, now this stub makes sure
7513 // we keep the GC informed. The word in the object where the value has been
7514 // written is in the address register.
7515 void RecordWriteStub::Generate(MacroAssembler* masm) {
7516  Label skip_to_incremental_noncompacting;
7517  Label skip_to_incremental_compacting;
7518 
7519  // The first two branch+nop instructions are generated with labels so as to
7520  // get the offset fixed up correctly by the bind(Label*) call. We patch it
7521  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7522  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7523  // incremental heap marking.
7524  // See RecordWriteStub::Patch for details.
7525  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7526  __ nop();
7527  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7528  __ nop();
7529 
7530  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7531  __ RememberedSetHelper(object_,
7532  address_,
7533  value_,
7534  save_fp_regs_mode_,
7536  }
7537  __ Ret();
7538 
7539  __ bind(&skip_to_incremental_noncompacting);
7540  GenerateIncremental(masm, INCREMENTAL);
7541 
7542  __ bind(&skip_to_incremental_compacting);
7543  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7544 
7545  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7546  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7547 
7548  PatchBranchIntoNop(masm, 0);
7550 }
7551 
7552 
7553 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7554  regs_.Save(masm);
7555 
7556  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7557  Label dont_need_remembered_set;
7558 
7559  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7560  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7561  regs_.scratch0(),
7562  &dont_need_remembered_set);
7563 
7564  __ CheckPageFlag(regs_.object(),
7565  regs_.scratch0(),
7567  ne,
7568  &dont_need_remembered_set);
7569 
7570  // First notify the incremental marker if necessary, then update the
7571  // remembered set.
7572  CheckNeedsToInformIncrementalMarker(
7573  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7574  InformIncrementalMarker(masm, mode);
7575  regs_.Restore(masm);
7576  __ RememberedSetHelper(object_,
7577  address_,
7578  value_,
7579  save_fp_regs_mode_,
7581 
7582  __ bind(&dont_need_remembered_set);
7583  }
7584 
7585  CheckNeedsToInformIncrementalMarker(
7586  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7587  InformIncrementalMarker(masm, mode);
7588  regs_.Restore(masm);
7589  __ Ret();
7590 }
7591 
7592 
7593 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7594  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7595  int argument_count = 3;
7596  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7597  Register address =
7598  a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7599  ASSERT(!address.is(regs_.object()));
7600  ASSERT(!address.is(a0));
7601  __ Move(address, regs_.address());
7602  __ Move(a0, regs_.object());
7603  if (mode == INCREMENTAL_COMPACTION) {
7604  __ Move(a1, address);
7605  } else {
7606  ASSERT(mode == INCREMENTAL);
7607  __ lw(a1, MemOperand(address, 0));
7608  }
7609  __ li(a2, Operand(ExternalReference::isolate_address()));
7610 
7611  AllowExternalCallThatCantCauseGC scope(masm);
7612  if (mode == INCREMENTAL_COMPACTION) {
7613  __ CallCFunction(
7614  ExternalReference::incremental_evacuation_record_write_function(
7615  masm->isolate()),
7616  argument_count);
7617  } else {
7618  ASSERT(mode == INCREMENTAL);
7619  __ CallCFunction(
7620  ExternalReference::incremental_marking_record_write_function(
7621  masm->isolate()),
7622  argument_count);
7623  }
7624  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7625 }
7626 
7627 
7628 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7629  MacroAssembler* masm,
7630  OnNoNeedToInformIncrementalMarker on_no_need,
7631  Mode mode) {
7632  Label on_black;
7633  Label need_incremental;
7634  Label need_incremental_pop_scratch;
7635 
7636  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
7637  __ lw(regs_.scratch1(),
7638  MemOperand(regs_.scratch0(),
7640  __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
7641  __ sw(regs_.scratch1(),
7642  MemOperand(regs_.scratch0(),
7644  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
7645 
7646  // Let's look at the color of the object: If it is not black we don't have
7647  // to inform the incremental marker.
7648  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7649 
7650  regs_.Restore(masm);
7651  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7652  __ RememberedSetHelper(object_,
7653  address_,
7654  value_,
7655  save_fp_regs_mode_,
7657  } else {
7658  __ Ret();
7659  }
7660 
7661  __ bind(&on_black);
7662 
7663  // Get the value from the slot.
7664  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7665 
7666  if (mode == INCREMENTAL_COMPACTION) {
7667  Label ensure_not_white;
7668 
7669  __ CheckPageFlag(regs_.scratch0(), // Contains value.
7670  regs_.scratch1(), // Scratch.
7672  eq,
7673  &ensure_not_white);
7674 
7675  __ CheckPageFlag(regs_.object(),
7676  regs_.scratch1(), // Scratch.
7678  eq,
7679  &need_incremental);
7680 
7681  __ bind(&ensure_not_white);
7682  }
7683 
7684  // We need extra registers for this, so we push the object and the address
7685  // register temporarily.
7686  __ Push(regs_.object(), regs_.address());
7687  __ EnsureNotWhite(regs_.scratch0(), // The value.
7688  regs_.scratch1(), // Scratch.
7689  regs_.object(), // Scratch.
7690  regs_.address(), // Scratch.
7691  &need_incremental_pop_scratch);
7692  __ Pop(regs_.object(), regs_.address());
7693 
7694  regs_.Restore(masm);
7695  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7696  __ RememberedSetHelper(object_,
7697  address_,
7698  value_,
7699  save_fp_regs_mode_,
7701  } else {
7702  __ Ret();
7703  }
7704 
7705  __ bind(&need_incremental_pop_scratch);
7706  __ Pop(regs_.object(), regs_.address());
7707 
7708  __ bind(&need_incremental);
7709 
7710  // Fall through when we need to inform the incremental marker.
7711 }
7712 
7713 
7714 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7715  // ----------- S t a t e -------------
7716  // -- a0 : element value to store
7717  // -- a1 : array literal
7718  // -- a2 : map of array literal
7719  // -- a3 : element index as smi
7720  // -- t0 : array literal index in function as smi
7721  // -----------------------------------
7722 
7723  Label element_done;
7724  Label double_elements;
7725  Label smi_element;
7726  Label slow_elements;
7727  Label fast_elements;
7728 
7729  __ CheckFastElements(a2, t1, &double_elements);
7730  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
7731  __ JumpIfSmi(a0, &smi_element);
7732  __ CheckFastSmiElements(a2, t1, &fast_elements);
7733 
7734  // Store into the array literal requires a elements transition. Call into
7735  // the runtime.
7736  __ bind(&slow_elements);
7737  // call.
7738  __ Push(a1, a3, a0);
7741  __ Push(t1, t0);
7742  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7743 
7744  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7745  __ bind(&fast_elements);
7747  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7748  __ Addu(t2, t1, t2);
7749  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7750  __ sw(a0, MemOperand(t2, 0));
7751  // Update the write barrier for the array store.
7752  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7754  __ Ret(USE_DELAY_SLOT);
7755  __ mov(v0, a0);
7756 
7757  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7758  // and value is Smi.
7759  __ bind(&smi_element);
7761  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7762  __ Addu(t2, t1, t2);
7764  __ Ret(USE_DELAY_SLOT);
7765  __ mov(v0, a0);
7766 
7767  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
7768  __ bind(&double_elements);
7770  __ StoreNumberToDoubleElements(a0, a3, a1,
7771  // Overwrites all regs after this.
7772  t1, t2, t3, t5, a2,
7773  &slow_elements);
7774  __ Ret(USE_DELAY_SLOT);
7775  __ mov(v0, a0);
7776 }
7777 
7778 
7779 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
7780  if (entry_hook_ != NULL) {
7781  ProfileEntryHookStub stub;
7782  __ push(ra);
7783  __ CallStub(&stub);
7784  __ pop(ra);
7785  }
7786 }
7787 
7788 
7789 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
7790  // The entry hook is a "push ra" instruction, followed by a call.
7791  // Note: on MIPS "push" is 2 instruction
7792  const int32_t kReturnAddressDistanceFromFunctionStart =
7794 
7795  // Save live volatile registers.
7796  __ Push(ra, t1, a1);
7797  const int32_t kNumSavedRegs = 3;
7798 
7799  // Compute the function's address for the first argument.
7800  __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
7801 
7802  // The caller's return address is above the saved temporaries.
7803  // Grab that for the second argument to the hook.
7804  __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
7805 
7806  // Align the stack if necessary.
7807  int frame_alignment = masm->ActivationFrameAlignment();
7808  if (frame_alignment > kPointerSize) {
7809  __ mov(t1, sp);
7810  ASSERT(IsPowerOf2(frame_alignment));
7811  __ And(sp, sp, Operand(-frame_alignment));
7812  }
7813 
7814 #if defined(V8_HOST_ARCH_MIPS)
7815  __ li(at, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
7816  __ lw(at, MemOperand(at));
7817 #else
7818  // Under the simulator we need to indirect the entry hook through a
7819  // trampoline function at a known address.
7820  Address trampoline_address = reinterpret_cast<Address>(
7821  reinterpret_cast<intptr_t>(EntryHookTrampoline));
7822  ApiFunction dispatcher(trampoline_address);
7823  __ li(at, Operand(ExternalReference(&dispatcher,
7824  ExternalReference::BUILTIN_CALL,
7825  masm->isolate())));
7826 #endif
7827  __ Call(at);
7828 
7829  // Restore the stack pointer if needed.
7830  if (frame_alignment > kPointerSize) {
7831  __ mov(sp, t1);
7832  }
7833 
7834  __ Pop(ra, t1, a1);
7835  __ Ret();
7836 }
7837 
7838 
7839 #undef __
7840 
7841 } } // namespace v8::internal
7842 
7843 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:157
bool FLAG_enable_slow_asserts
static const int kResourceDataOffset
Definition: objects.h:7747
const FPURegister f4
void GenerateFast(MacroAssembler *masm)
const SwVfpRegister s2
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:5160
const int kCArgsSlotsSize
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:4016
static const int kCodeOffset
Definition: objects.h:5796
static const int kEvacuationCandidateMask
Definition: spaces.h:411
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:425
const FPURegister f11
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const int kArgumentsObjectSize
Definition: heap.h:895
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:469
const int kFailureTypeTagSize
Definition: objects.h:1081
const Register cp
static const uint32_t kExponentMask
Definition: objects.h:1352
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2241
static Failure * InternalError()
Definition: objects-inl.h:1019
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
const FPURegister f0
static void LoadNumberAsInt32Double(MacroAssembler *masm, Register object, Destination destination, DwVfpRegister double_dst, DwVfpRegister double_scratch, Register dst1, Register dst2, Register heap_number_map, Register scratch1, Register scratch2, SwVfpRegister single_scratch, Label *not_int32)
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:981
void Generate(MacroAssembler *masm)
static void DoubleIs32BitInteger(MacroAssembler *masm, Register src1, Register src2, Register dst, Register scratch, Label *not_int32)
static const int kOptimizedCodeMapOffset
Definition: objects.h:5797
const FPURegister f22
static const int kDataOffset
Definition: objects.h:6624
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:994
const FPURegister f10
const int kNumCalleeSavedFPU
Definition: frames-mips.h:87
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, int flags)
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1029
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static const intptr_t kPageAlignmentMask
Definition: spaces.h:720
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5339
static bool IsSupported(CpuFeature f)
static Failure * Exception()
Definition: objects-inl.h:1024
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:270
static void LoadOperands(MacroAssembler *masm, FloatingPointHelper::Destination destination, Register heap_number_map, Register scratch1, Register scratch2, Label *not_number)
const RegList kJSCallerSaved
Definition: frames-arm.h:47
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5344
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2306
const uint32_t kStringRepresentationMask
Definition: objects.h:474
MemOperand GlobalObjectOperand()
static const int kSize
Definition: objects.h:8355
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
const uint32_t kAsciiDataHintTag
Definition: objects.h:498
const uint32_t kShortExternalStringMask
Definition: objects.h:502
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:191
ProfileEntryHookStub()
Definition: code-stubs.h:1161
const int kIntSize
Definition: globals.h:217
static const int kZeroHash
Definition: objects.h:7017
#define V8_INFINITY
Definition: globals.h:32
const RegList kCalleeSavedFPU
Definition: frames-mips.h:79
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7319
static const int kSize
Definition: objects.h:8333
static const int kLastCaptureCountOffset
Definition: jsregexp.h:189
const RegList kCallerSavedFPU
Definition: frames-mips.h:89
static const int kFirstOffset
Definition: objects.h:7653
static const int kMinLength
Definition: objects.h:7666
const uint32_t kNotStringTag
Definition: objects.h:457
const Register sp
static const int kParentOffset
Definition: objects.h:7705
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1359
static const int kLiteralsOffset
Definition: objects.h:6188
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
static const int kArgumentsObjectSizeStrict
Definition: heap.h:898
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
const uint32_t kIsSymbolMask
Definition: objects.h:462
static const int kExponentShift
Definition: objects.h:1357
const intptr_t kFailureTagMask
Definition: v8globals.h:64
static const int kValueOffset
Definition: objects.h:1342
const int kFailureTagSize
Definition: v8globals.h:63
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:218
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6670
static const int kInputOffset
Definition: objects.h:8354
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:504
static bool IsBitOp(Value op)
Definition: token.h:256
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const bool IsMipsSoftFloatABI
void Generate(MacroAssembler *masm)
const int kPointerSize
Definition: globals.h:220
static void LoadSmis(MacroAssembler *masm, Destination destination, Register scratch1, Register scratch2)
static void CallCCodeForDoubleOperation(MacroAssembler *masm, Token::Value op, Register heap_number_result, Register scratch)
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5177
static void MaybeCallEntryHook(MacroAssembler *masm)
const Address kZapValue
Definition: v8globals.h:80
const int kHeapObjectTag
Definition: v8.h:4009
const RegList kCalleeSaved
Definition: frames-arm.h:63
const uint32_t kAsciiDataHintMask
Definition: objects.h:497
#define __
static void ConvertNumberToInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2171
static void LoadNumberAsInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch0, DwVfpRegister double_scratch1, Label *not_int32)
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static const int kMinLength
Definition: objects.h:7717
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:503
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7517
static const int kNextFunctionLinkOffset
Definition: objects.h:6190
void Generate(MacroAssembler *masm)
const int kBitsPerByte
Definition: globals.h:237
static int SizeFor(int length)
Definition: objects.h:2434
static const int kElementsOffset
Definition: objects.h:2172
bool IsPowerOf2(T x)
Definition: utils.h:50
const FPURegister f2
const uint32_t kStringTag
Definition: objects.h:456
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kCallTargetAddressOffset
static const int kOffsetOffset
Definition: objects.h:7706
friend class BlockTrampolinePoolScope
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
const SwVfpRegister s1
static const int kLastMatchOverhead
Definition: jsregexp.h:186
static const int kHeaderSize
Definition: objects.h:2296
const intptr_t kPointerAlignmentMask
Definition: v8globals.h:49
void Generate(MacroAssembler *masm)
#define ISOLATE
Definition: isolate.h:1435
void GenerateCall(MacroAssembler *masm, ExternalReference function)
static const int kMapOffset
Definition: objects.h:1261
static const int kMantissaBitsInTopWord
Definition: objects.h:1358
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:414
const uint32_t kIsNotStringMask
Definition: objects.h:455
const int kNumCalleeSaved
Definition: frames-arm.h:83
const uint32_t kSlicedNotConsMask
Definition: objects.h:492
static const int kLengthOffset
Definition: objects.h:2295
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static const int kSize
Definition: objects.h:1350
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
#define kDoubleRegZero
static const int kSecondOffset
Definition: objects.h:7654
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kCallerFPOffset
Definition: frames-arm.h:117
static const int kArgumentsLengthIndex
Definition: heap.h:901
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
Definition: v8globals.h:44
static const int kFirstCaptureOffset
Definition: jsregexp.h:195
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7345
static const uint32_t kSignMask
Definition: objects.h:1351
static const int kLastInputOffset
Definition: jsregexp.h:193
const int kSmiShiftSize
Definition: v8.h:4060
const int kSmiTagSize
Definition: v8.h:4015
static const int kHeaderSize
Definition: objects.h:4549
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6666
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static void GenerateAheadOfTime()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
static const int kArgumentsCalleeIndex
Definition: heap.h:903
const int kSmiTag
Definition: v8.h:4014
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
const FPURegister f12
void Generate(MacroAssembler *masm)
static const int kEntryLength
Definition: objects.h:5403
const FPURegister f6
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:62
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kInstrSize
static const int kDataTagOffset
Definition: objects.h:6664
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
static const int kSize
Definition: objects.h:6191
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
Definition: objects.h:7386
static const int kValueOffset
Definition: objects.h:6385
bool Contains(Type type) const
Definition: code-stubs.h:1055
const uint32_t kSymbolTag
Definition: objects.h:464
const Register fp
static const int kNativeContextOffset
Definition: objects.h:6286
const uint32_t kAsciiStringTag
Definition: objects.h:470
static const int kConstructStubOffset
Definition: objects.h:5799
static const int kExponentBits
Definition: objects.h:1355
static const int kHashShift
Definition: objects.h:7341
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
Definition: flags.cc:495
const FPURegister f14
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:5161
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:630
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1348
static const int kDataUC16CodeOffset
Definition: objects.h:6668
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:468
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kIndexOffset
Definition: objects.h:8353
const FPURegister f8
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
Definition: objects.h:1347
void Generate(MacroAssembler *masm)