v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_MIPS)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "codegen.h"
35 #include "regexp-macro-assembler.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 #define __ ACCESS_MASM(masm)
42 
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44  Label* slow,
45  Condition cc,
46  bool never_nan_nan);
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48  Register lhs,
49  Register rhs,
50  Label* rhs_not_nan,
51  Label* slow,
52  bool strict);
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55  Register lhs,
56  Register rhs);
57 
58 
59 // Check if the operand is a heap number.
60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61  Register scratch1, Register scratch2,
62  Label* not_a_heap_number) {
63  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66 }
67 
68 
69 void ToNumberStub::Generate(MacroAssembler* masm) {
70  // The ToNumber stub takes one argument in a0.
71  Label check_heap_number, call_builtin;
72  __ JumpIfNotSmi(a0, &check_heap_number);
73  __ Ret(USE_DELAY_SLOT);
74  __ mov(v0, a0);
75 
76  __ bind(&check_heap_number);
77  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78  __ Ret(USE_DELAY_SLOT);
79  __ mov(v0, a0);
80 
81  __ bind(&call_builtin);
82  __ push(a0);
83  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
84 }
85 
86 
87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88  // Create a new closure from the given function info in new
89  // space. Set the context to the current context in cp.
90  Label gc;
91 
92  // Pop the function info from the stack.
93  __ pop(a3);
94 
95  // Attempt to allocate new JSFunction in new space.
96  __ AllocateInNewSpace(JSFunction::kSize,
97  v0,
98  a1,
99  a2,
100  &gc,
101  TAG_OBJECT);
102 
103  int map_index = (language_mode_ == CLASSIC_MODE)
106 
107  // Compute the function map in the current global context and set that
108  // as the map of the allocated object.
111  __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
113 
114  // Initialize the rest of the function. We don't have to update the
115  // write barrier because the allocated object is in new space.
116  __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
126 
127  // Initialize the code pointer in the function to be the one
128  // found in the shared function info object.
130  __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 
132  // Return result. The argument function info has been popped already.
134  __ Ret();
135 
136  // Create a new closure through the slower runtime call.
137  __ bind(&gc);
138  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139  __ Push(cp, a3, t0);
140  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
141 }
142 
143 
144 void FastNewContextStub::Generate(MacroAssembler* masm) {
145  // Try to allocate the context in new space.
146  Label gc;
147  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148 
149  // Attempt to allocate the context in new space.
150  __ AllocateInNewSpace(FixedArray::SizeFor(length),
151  v0,
152  a1,
153  a2,
154  &gc,
155  TAG_OBJECT);
156 
157  // Load the function from the stack.
158  __ lw(a3, MemOperand(sp, 0));
159 
160  // Set up the object header.
161  __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
162  __ li(a2, Operand(Smi::FromInt(length)));
165 
166  // Set up the fixed slots, copy the global object from the previous context.
168  __ li(a1, Operand(Smi::FromInt(0)));
173 
174  // Initialize the rest of the slots to undefined.
175  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
176  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
177  __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
178  }
179 
180  // Remove the on-stack argument and return.
181  __ mov(cp, v0);
182  __ DropAndRet(1);
183 
184  // Need to collect. Call into runtime system.
185  __ bind(&gc);
186  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
187 }
188 
189 
190 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
191  // Stack layout on entry:
192  //
193  // [sp]: function.
194  // [sp + kPointerSize]: serialized scope info
195 
196  // Try to allocate the context in new space.
197  Label gc;
198  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
199  __ AllocateInNewSpace(FixedArray::SizeFor(length),
200  v0, a1, a2, &gc, TAG_OBJECT);
201 
202  // Load the function from the stack.
203  __ lw(a3, MemOperand(sp, 0));
204 
205  // Load the serialized scope info from the stack.
206  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
207 
208  // Set up the object header.
209  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
211  __ li(a2, Operand(Smi::FromInt(length)));
213 
214  // If this block context is nested in the global context we get a smi
215  // sentinel instead of a function. The block context should get the
216  // canonical empty function of the global context as its closure which
217  // we still have to look up.
218  Label after_sentinel;
219  __ JumpIfNotSmi(a3, &after_sentinel);
220  if (FLAG_debug_code) {
221  const char* message = "Expected 0 as a Smi sentinel";
222  __ Assert(eq, message, a3, Operand(zero_reg));
223  }
224  __ lw(a3, GlobalObjectOperand());
227  __ bind(&after_sentinel);
228 
229  // Set up the fixed slots, copy the global object from the previous context.
235 
236  // Initialize the rest of the slots to the hole value.
237  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
238  for (int i = 0; i < slots_; i++) {
239  __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
240  }
241 
242  // Remove the on-stack argument and return.
243  __ mov(cp, v0);
244  __ DropAndRet(2);
245 
246  // Need to collect. Call into runtime system.
247  __ bind(&gc);
248  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
249 }
250 
251 
252 static void GenerateFastCloneShallowArrayCommon(
253  MacroAssembler* masm,
254  int length,
256  Label* fail) {
257  // Registers on entry:
258  // a3: boilerplate literal array.
260 
261  // All sizes here are multiples of kPointerSize.
262  int elements_size = 0;
263  if (length > 0) {
265  ? FixedDoubleArray::SizeFor(length)
266  : FixedArray::SizeFor(length);
267  }
268  int size = JSArray::kSize + elements_size;
269 
270  // Allocate both the JS array and the elements array in one big
271  // allocation. This avoids multiple limit checks.
272  __ AllocateInNewSpace(size,
273  v0,
274  a1,
275  a2,
276  fail,
277  TAG_OBJECT);
278 
279  // Copy the JS array part.
280  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
281  if ((i != JSArray::kElementsOffset) || (length == 0)) {
282  __ lw(a1, FieldMemOperand(a3, i));
283  __ sw(a1, FieldMemOperand(v0, i));
284  }
285  }
286 
287  if (length > 0) {
288  // Get hold of the elements array of the boilerplate and setup the
289  // elements pointer in the resulting object.
291  __ Addu(a2, v0, Operand(JSArray::kSize));
293 
294  // Copy the elements array.
295  ASSERT((elements_size % kPointerSize) == 0);
296  __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
297  }
298 }
299 
300 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
301  // Stack layout on entry:
302  //
303  // [sp]: constant elements.
304  // [sp + kPointerSize]: literal index.
305  // [sp + (2 * kPointerSize)]: literals array.
306 
307  // Load boilerplate object into r3 and check if we need to create a
308  // boilerplate.
309  Label slow_case;
310  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
311  __ lw(a0, MemOperand(sp, 1 * kPointerSize));
312  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
313  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
314  __ Addu(t0, a3, t0);
315  __ lw(a3, MemOperand(t0));
316  __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
317  __ Branch(&slow_case, eq, a3, Operand(t1));
318 
319  FastCloneShallowArrayStub::Mode mode = mode_;
320  if (mode == CLONE_ANY_ELEMENTS) {
321  Label double_elements, check_fast_elements;
324  __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
325  __ Branch(&check_fast_elements, ne, v0, Operand(t1));
326  GenerateFastCloneShallowArrayCommon(masm, 0,
327  COPY_ON_WRITE_ELEMENTS, &slow_case);
328  // Return and remove the on-stack parameters.
329  __ DropAndRet(3);
330 
331  __ bind(&check_fast_elements);
332  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
333  __ Branch(&double_elements, ne, v0, Operand(t1));
334  GenerateFastCloneShallowArrayCommon(masm, length_,
335  CLONE_ELEMENTS, &slow_case);
336  // Return and remove the on-stack parameters.
337  __ DropAndRet(3);
338 
339  __ bind(&double_elements);
340  mode = CLONE_DOUBLE_ELEMENTS;
341  // Fall through to generate the code to handle double elements.
342  }
343 
344  if (FLAG_debug_code) {
345  const char* message;
346  Heap::RootListIndex expected_map_index;
347  if (mode == CLONE_ELEMENTS) {
348  message = "Expected (writable) fixed array";
349  expected_map_index = Heap::kFixedArrayMapRootIndex;
350  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
351  message = "Expected (writable) fixed double array";
352  expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
353  } else {
355  message = "Expected copy-on-write fixed array";
356  expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
357  }
358  __ push(a3);
361  __ LoadRoot(at, expected_map_index);
362  __ Assert(eq, message, a3, Operand(at));
363  __ pop(a3);
364  }
365 
366  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
367 
368  // Return and remove the on-stack parameters.
369  __ DropAndRet(3);
370 
371  __ bind(&slow_case);
372  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
373 }
374 
375 
376 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
377  // Stack layout on entry:
378  //
379  // [sp]: object literal flags.
380  // [sp + kPointerSize]: constant properties.
381  // [sp + (2 * kPointerSize)]: literal index.
382  // [sp + (3 * kPointerSize)]: literals array.
383 
384  // Load boilerplate object into a3 and check if we need to create a
385  // boilerplate.
386  Label slow_case;
387  __ lw(a3, MemOperand(sp, 3 * kPointerSize));
388  __ lw(a0, MemOperand(sp, 2 * kPointerSize));
389  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
390  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
391  __ Addu(a3, t0, a3);
392  __ lw(a3, MemOperand(a3));
393  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
394  __ Branch(&slow_case, eq, a3, Operand(t0));
395 
396  // Check that the boilerplate contains only fast properties and we can
397  // statically determine the instance size.
398  int size = JSObject::kHeaderSize + length_ * kPointerSize;
401  __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
402 
403  // Allocate the JS object and copy header together with all in-object
404  // properties from the boilerplate.
405  __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
406  for (int i = 0; i < size; i += kPointerSize) {
407  __ lw(a1, FieldMemOperand(a3, i));
408  __ sw(a1, FieldMemOperand(v0, i));
409  }
410 
411  // Return and remove the on-stack parameters.
412  __ DropAndRet(4);
413 
414  __ bind(&slow_case);
415  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
416 }
417 
418 
419 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
420 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
421 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
422 // scratch register. Destroys the source register. No GC occurs during this
423 // stub so you don't have to set up the frame.
424 class ConvertToDoubleStub : public CodeStub {
425  public:
426  ConvertToDoubleStub(Register result_reg_1,
427  Register result_reg_2,
428  Register source_reg,
429  Register scratch_reg)
430  : result1_(result_reg_1),
431  result2_(result_reg_2),
432  source_(source_reg),
433  zeros_(scratch_reg) { }
434 
435  private:
436  Register result1_;
437  Register result2_;
438  Register source_;
439  Register zeros_;
440 
441  // Minor key encoding in 16 bits.
442  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
443  class OpBits: public BitField<Token::Value, 2, 14> {};
444 
445  Major MajorKey() { return ConvertToDouble; }
446  int MinorKey() {
447  // Encode the parameters in a unique 16 bit value.
448  return result1_.code() +
449  (result2_.code() << 4) +
450  (source_.code() << 8) +
451  (zeros_.code() << 12);
452  }
453 
454  void Generate(MacroAssembler* masm);
455 };
456 
457 
458 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
459 #ifndef BIG_ENDIAN_FLOATING_POINT
460  Register exponent = result1_;
461  Register mantissa = result2_;
462 #else
463  Register exponent = result2_;
464  Register mantissa = result1_;
465 #endif
466  Label not_special;
467  // Convert from Smi to integer.
468  __ sra(source_, source_, kSmiTagSize);
469  // Move sign bit from source to destination. This works because the sign bit
470  // in the exponent word of the double has the same position and polarity as
471  // the 2's complement sign bit in a Smi.
472  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
473  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
474  // Subtract from 0 if source was negative.
475  __ subu(at, zero_reg, source_);
476  __ Movn(source_, at, exponent);
477 
478  // We have -1, 0 or 1, which we treat specially. Register source_ contains
479  // absolute value: it is either equal to 1 (special case of -1 and 1),
480  // greater than 1 (not a special case) or less than 1 (special case of 0).
481  __ Branch(&not_special, gt, source_, Operand(1));
482 
483  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
484  const uint32_t exponent_word_for_1 =
486  // Safe to use 'at' as dest reg here.
487  __ Or(at, exponent, Operand(exponent_word_for_1));
488  __ Movn(exponent, at, source_); // Write exp when source not 0.
489  // 1, 0 and -1 all have 0 for the second word.
490  __ Ret(USE_DELAY_SLOT);
491  __ mov(mantissa, zero_reg);
492 
493  __ bind(&not_special);
494  // Count leading zeros.
495  // Gets the wrong answer for 0, but we already checked for that case above.
496  __ Clz(zeros_, source_);
497  // Compute exponent and or it into the exponent register.
498  // We use mantissa as a scratch register here.
499  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
500  __ subu(mantissa, mantissa, zeros_);
501  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
502  __ Or(exponent, exponent, mantissa);
503 
504  // Shift up the source chopping the top bit off.
505  __ Addu(zeros_, zeros_, Operand(1));
506  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
507  __ sllv(source_, source_, zeros_);
508  // Compute lower part of fraction (last 12 bits).
509  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
510  // And the top (top 20 bits).
511  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
512 
513  __ Ret(USE_DELAY_SLOT);
514  __ or_(exponent, exponent, source_);
515 }
516 
517 
518 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
520  Register scratch1,
521  Register scratch2) {
523  CpuFeatures::Scope scope(FPU);
524  __ sra(scratch1, a0, kSmiTagSize);
525  __ mtc1(scratch1, f14);
526  __ cvt_d_w(f14, f14);
527  __ sra(scratch1, a1, kSmiTagSize);
528  __ mtc1(scratch1, f12);
529  __ cvt_d_w(f12, f12);
530  if (destination == kCoreRegisters) {
531  __ Move(a2, a3, f14);
532  __ Move(a0, a1, f12);
533  }
534  } else {
535  ASSERT(destination == kCoreRegisters);
536  // Write Smi from a0 to a3 and a2 in double format.
537  __ mov(scratch1, a0);
538  ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
539  __ push(ra);
540  __ Call(stub1.GetCode());
541  // Write Smi from a1 to a1 and a0 in double format.
542  __ mov(scratch1, a1);
543  ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
544  __ Call(stub2.GetCode());
545  __ pop(ra);
546  }
547 }
548 
549 
551  MacroAssembler* masm,
553  Register heap_number_map,
554  Register scratch1,
555  Register scratch2,
556  Label* slow) {
557 
558  // Load right operand (a0) to f12 or a2/a3.
559  LoadNumber(masm, destination,
560  a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
561 
562  // Load left operand (a1) to f14 or a0/a1.
563  LoadNumber(masm, destination,
564  a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
565 }
566 
567 
568 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
569  Destination destination,
570  Register object,
571  FPURegister dst,
572  Register dst1,
573  Register dst2,
574  Register heap_number_map,
575  Register scratch1,
576  Register scratch2,
577  Label* not_number) {
578  if (FLAG_debug_code) {
579  __ AbortIfNotRootValue(heap_number_map,
580  Heap::kHeapNumberMapRootIndex,
581  "HeapNumberMap register clobbered.");
582  }
583 
584  Label is_smi, done;
585 
586  // Smi-check
587  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
588  // Heap number check
589  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
590 
591  // Handle loading a double from a heap number.
593  destination == kFPURegisters) {
594  CpuFeatures::Scope scope(FPU);
595  // Load the double from tagged HeapNumber to double register.
596 
597  // ARM uses a workaround here because of the unaligned HeapNumber
598  // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
599  // point in generating even more instructions.
600  __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
601  } else {
602  ASSERT(destination == kCoreRegisters);
603  // Load the double from heap number to dst1 and dst2 in double format.
604  __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
605  __ lw(dst2, FieldMemOperand(object,
606  HeapNumber::kValueOffset + kPointerSize));
607  }
608  __ Branch(&done);
609 
610  // Handle loading a double from a smi.
611  __ bind(&is_smi);
613  CpuFeatures::Scope scope(FPU);
614  // Convert smi to double using FPU instructions.
615  __ mtc1(scratch1, dst);
616  __ cvt_d_w(dst, dst);
617  if (destination == kCoreRegisters) {
618  // Load the converted smi to dst1 and dst2 in double format.
619  __ Move(dst1, dst2, dst);
620  }
621  } else {
622  ASSERT(destination == kCoreRegisters);
623  // Write smi to dst1 and dst2 double format.
624  __ mov(scratch1, object);
625  ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
626  __ push(ra);
627  __ Call(stub.GetCode());
628  __ pop(ra);
629  }
630 
631  __ bind(&done);
632 }
633 
634 
635 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
636  Register object,
637  Register dst,
638  Register heap_number_map,
639  Register scratch1,
640  Register scratch2,
641  Register scratch3,
642  FPURegister double_scratch,
643  Label* not_number) {
644  if (FLAG_debug_code) {
645  __ AbortIfNotRootValue(heap_number_map,
646  Heap::kHeapNumberMapRootIndex,
647  "HeapNumberMap register clobbered.");
648  }
649  Label done;
650  Label not_in_int32_range;
651 
652  __ UntagAndJumpIfSmi(dst, object, &done);
653  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
654  __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
655  __ ConvertToInt32(object,
656  dst,
657  scratch1,
658  scratch2,
659  double_scratch,
660  &not_in_int32_range);
661  __ jmp(&done);
662 
663  __ bind(&not_in_int32_range);
664  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
665  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
666 
667  __ EmitOutOfInt32RangeTruncate(dst,
668  scratch1,
669  scratch2,
670  scratch3);
671 
672  __ bind(&done);
673 }
674 
675 
676 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
677  Register int_scratch,
678  Destination destination,
679  FPURegister double_dst,
680  Register dst1,
681  Register dst2,
682  Register scratch2,
683  FPURegister single_scratch) {
684  ASSERT(!int_scratch.is(scratch2));
685  ASSERT(!int_scratch.is(dst1));
686  ASSERT(!int_scratch.is(dst2));
687 
688  Label done;
689 
691  CpuFeatures::Scope scope(FPU);
692  __ mtc1(int_scratch, single_scratch);
693  __ cvt_d_w(double_dst, single_scratch);
694  if (destination == kCoreRegisters) {
695  __ Move(dst1, dst2, double_dst);
696  }
697  } else {
698  Label fewer_than_20_useful_bits;
699  // Expected output:
700  // | dst2 | dst1 |
701  // | s | exp | mantissa |
702 
703  // Check for zero.
704  __ mov(dst2, int_scratch);
705  __ mov(dst1, int_scratch);
706  __ Branch(&done, eq, int_scratch, Operand(zero_reg));
707 
708  // Preload the sign of the value.
709  __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
710  // Get the absolute value of the object (as an unsigned integer).
711  Label skip_sub;
712  __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
713  __ Subu(int_scratch, zero_reg, int_scratch);
714  __ bind(&skip_sub);
715 
716  // Get mantissa[51:20].
717 
718  // Get the position of the first set bit.
719  __ Clz(dst1, int_scratch);
720  __ li(scratch2, 31);
721  __ Subu(dst1, scratch2, dst1);
722 
723  // Set the exponent.
724  __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
725  __ Ins(dst2, scratch2,
726  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
727 
728  // Clear the first non null bit.
729  __ li(scratch2, Operand(1));
730  __ sllv(scratch2, scratch2, dst1);
731  __ li(at, -1);
732  __ Xor(scratch2, scratch2, at);
733  __ And(int_scratch, int_scratch, scratch2);
734 
735  // Get the number of bits to set in the lower part of the mantissa.
736  __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
737  __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
738  // Set the higher 20 bits of the mantissa.
739  __ srlv(at, int_scratch, scratch2);
740  __ or_(dst2, dst2, at);
741  __ li(at, 32);
742  __ subu(scratch2, at, scratch2);
743  __ sllv(dst1, int_scratch, scratch2);
744  __ Branch(&done);
745 
746  __ bind(&fewer_than_20_useful_bits);
748  __ subu(scratch2, at, dst1);
749  __ sllv(scratch2, int_scratch, scratch2);
750  __ Or(dst2, dst2, scratch2);
751  // Set dst1 to 0.
752  __ mov(dst1, zero_reg);
753  }
754  __ bind(&done);
755 }
756 
757 
758 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
759  Register object,
760  Destination destination,
761  DoubleRegister double_dst,
762  Register dst1,
763  Register dst2,
764  Register heap_number_map,
765  Register scratch1,
766  Register scratch2,
767  FPURegister single_scratch,
768  Label* not_int32) {
769  ASSERT(!scratch1.is(object) && !scratch2.is(object));
770  ASSERT(!scratch1.is(scratch2));
771  ASSERT(!heap_number_map.is(object) &&
772  !heap_number_map.is(scratch1) &&
773  !heap_number_map.is(scratch2));
774 
775  Label done, obj_is_not_smi;
776 
777  __ JumpIfNotSmi(object, &obj_is_not_smi);
778  __ SmiUntag(scratch1, object);
779  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
780  scratch2, single_scratch);
781  __ Branch(&done);
782 
783  __ bind(&obj_is_not_smi);
784  if (FLAG_debug_code) {
785  __ AbortIfNotRootValue(heap_number_map,
786  Heap::kHeapNumberMapRootIndex,
787  "HeapNumberMap register clobbered.");
788  }
789  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
790 
791  // Load the number.
793  CpuFeatures::Scope scope(FPU);
794  // Load the double value.
795  __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
796 
797  Register except_flag = scratch2;
798  __ EmitFPUTruncate(kRoundToZero,
799  single_scratch,
800  double_dst,
801  scratch1,
802  except_flag,
804 
805  // Jump to not_int32 if the operation did not succeed.
806  __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
807 
808  if (destination == kCoreRegisters) {
809  __ Move(dst1, dst2, double_dst);
810  }
811 
812  } else {
813  ASSERT(!scratch1.is(object) && !scratch2.is(object));
814  // Load the double value in the destination registers.
817 
818  // Check for 0 and -0.
819  __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
820  __ Or(scratch1, scratch1, Operand(dst2));
821  __ Branch(&done, eq, scratch1, Operand(zero_reg));
822 
823  // Check that the value can be exactly represented by a 32-bit integer.
824  // Jump to not_int32 if that's not the case.
825  DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
826 
827  // dst1 and dst2 were trashed. Reload the double value.
830  }
831 
832  __ bind(&done);
833 }
834 
835 
836 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
837  Register object,
838  Register dst,
839  Register heap_number_map,
840  Register scratch1,
841  Register scratch2,
842  Register scratch3,
843  DoubleRegister double_scratch,
844  Label* not_int32) {
845  ASSERT(!dst.is(object));
846  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
847  ASSERT(!scratch1.is(scratch2) &&
848  !scratch1.is(scratch3) &&
849  !scratch2.is(scratch3));
850 
851  Label done;
852 
853  __ UntagAndJumpIfSmi(dst, object, &done);
854 
855  if (FLAG_debug_code) {
856  __ AbortIfNotRootValue(heap_number_map,
857  Heap::kHeapNumberMapRootIndex,
858  "HeapNumberMap register clobbered.");
859  }
860  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
861 
862  // Object is a heap number.
863  // Convert the floating point value to a 32-bit integer.
865  CpuFeatures::Scope scope(FPU);
866  // Load the double value.
867  __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
868 
869  FPURegister single_scratch = double_scratch.low();
870  Register except_flag = scratch2;
871  __ EmitFPUTruncate(kRoundToZero,
872  single_scratch,
873  double_scratch,
874  scratch1,
875  except_flag,
877 
878  // Jump to not_int32 if the operation did not succeed.
879  __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
880  // Get the result in the destination register.
881  __ mfc1(dst, single_scratch);
882 
883  } else {
884  // Load the double value in the destination registers.
885  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
886  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
887 
888  // Check for 0 and -0.
889  __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
890  __ Or(dst, scratch2, Operand(dst));
891  __ Branch(&done, eq, dst, Operand(zero_reg));
892 
893  DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
894 
895  // Registers state after DoubleIs32BitInteger.
896  // dst: mantissa[51:20].
897  // scratch2: 1
898 
899  // Shift back the higher bits of the mantissa.
900  __ srlv(dst, dst, scratch3);
901  // Set the implicit first bit.
902  __ li(at, 32);
903  __ subu(scratch3, at, scratch3);
904  __ sllv(scratch2, scratch2, scratch3);
905  __ Or(dst, dst, scratch2);
906  // Set the sign.
907  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
908  __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
909  Label skip_sub;
910  __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
911  __ Subu(dst, zero_reg, dst);
912  __ bind(&skip_sub);
913  }
914 
915  __ bind(&done);
916 }
917 
918 
919 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
920  Register src1,
921  Register src2,
922  Register dst,
923  Register scratch,
924  Label* not_int32) {
925  // Get exponent alone in scratch.
926  __ Ext(scratch,
927  src1,
928  HeapNumber::kExponentShift,
930 
931  // Substract the bias from the exponent.
932  __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
933 
934  // src1: higher (exponent) part of the double value.
935  // src2: lower (mantissa) part of the double value.
936  // scratch: unbiased exponent.
937 
938  // Fast cases. Check for obvious non 32-bit integer values.
939  // Negative exponent cannot yield 32-bit integers.
940  __ Branch(not_int32, lt, scratch, Operand(zero_reg));
941  // Exponent greater than 31 cannot yield 32-bit integers.
942  // Also, a positive value with an exponent equal to 31 is outside of the
943  // signed 32-bit integer range.
944  // Another way to put it is that if (exponent - signbit) > 30 then the
945  // number cannot be represented as an int32.
946  Register tmp = dst;
947  __ srl(at, src1, 31);
948  __ subu(tmp, scratch, at);
949  __ Branch(not_int32, gt, tmp, Operand(30));
950  // - Bits [21:0] in the mantissa are not null.
951  __ And(tmp, src2, 0x3fffff);
952  __ Branch(not_int32, ne, tmp, Operand(zero_reg));
953 
954  // Otherwise the exponent needs to be big enough to shift left all the
955  // non zero bits left. So we need the (30 - exponent) last bits of the
956  // 31 higher bits of the mantissa to be null.
957  // Because bits [21:0] are null, we can check instead that the
958  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
959 
960  // Get the 32 higher bits of the mantissa in dst.
961  __ Ext(dst,
962  src2,
966  __ or_(dst, dst, at);
967 
968  // Create the mask and test the lower bits (of the higher bits).
969  __ li(at, 32);
970  __ subu(scratch, at, scratch);
971  __ li(src2, 1);
972  __ sllv(src1, src2, scratch);
973  __ Subu(src1, src1, Operand(1));
974  __ And(src1, dst, src1);
975  __ Branch(not_int32, ne, src1, Operand(zero_reg));
976 }
977 
978 
980  MacroAssembler* masm,
981  Token::Value op,
982  Register heap_number_result,
983  Register scratch) {
984  // Using core registers:
985  // a0: Left value (least significant part of mantissa).
986  // a1: Left value (sign, exponent, top of mantissa).
987  // a2: Right value (least significant part of mantissa).
988  // a3: Right value (sign, exponent, top of mantissa).
989 
990  // Assert that heap_number_result is saved.
991  // We currently always use s0 to pass it.
992  ASSERT(heap_number_result.is(s0));
993 
994  // Push the current return address before the C call.
995  __ push(ra);
996  __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
997  if (!IsMipsSoftFloatABI) {
998  CpuFeatures::Scope scope(FPU);
999  // We are not using MIPS FPU instructions, and parameters for the runtime
1000  // function call are prepaired in a0-a3 registers, but function we are
1001  // calling is compiled with hard-float flag and expecting hard float ABI
1002  // (parameters in f12/f14 registers). We need to copy parameters from
1003  // a0-a3 registers to f12/f14 register pairs.
1004  __ Move(f12, a0, a1);
1005  __ Move(f14, a2, a3);
1006  }
1007  {
1008  AllowExternalCallThatCantCauseGC scope(masm);
1009  __ CallCFunction(
1010  ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1011  }
1012  // Store answer in the overwritable heap number.
1013  if (!IsMipsSoftFloatABI) {
1014  CpuFeatures::Scope scope(FPU);
1015  // Double returned in register f0.
1016  __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1017  } else {
1018  // Double returned in registers v0 and v1.
1019  __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1020  __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1021  }
1022  // Place heap_number_result in v0 and return to the pushed return address.
1023  __ pop(ra);
1024  __ Ret(USE_DELAY_SLOT);
1025  __ mov(v0, heap_number_result);
1026 }
1027 
1028 
1030  // These variants are compiled ahead of time. See next method.
1031  if (the_int_.is(a1) &&
1032  the_heap_number_.is(v0) &&
1033  scratch_.is(a2) &&
1034  sign_.is(a3)) {
1035  return true;
1036  }
1037  if (the_int_.is(a2) &&
1038  the_heap_number_.is(v0) &&
1039  scratch_.is(a3) &&
1040  sign_.is(a0)) {
1041  return true;
1042  }
1043  // Other register combinations are generated as and when they are needed,
1044  // so it is unsafe to call them from stubs (we can't generate a stub while
1045  // we are generating a stub).
1046  return false;
1047 }
1048 
1049 
1051  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1052  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1053  stub1.GetCode()->set_is_pregenerated(true);
1054  stub2.GetCode()->set_is_pregenerated(true);
1055 }
1056 
1057 
1058 // See comment for class, this does NOT work for int32's that are in Smi range.
1059 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1060  Label max_negative_int;
1061  // the_int_ has the answer which is a signed int32 but not a Smi.
1062  // We test for the special value that has a different exponent.
1063  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1064  // Test sign, and save for later conditionals.
1065  __ And(sign_, the_int_, Operand(0x80000000u));
1066  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1067 
1068  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
1069  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1070  uint32_t non_smi_exponent =
1071  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1072  __ li(scratch_, Operand(non_smi_exponent));
1073  // Set the sign bit in scratch_ if the value was negative.
1074  __ or_(scratch_, scratch_, sign_);
1075  // Subtract from 0 if the value was negative.
1076  __ subu(at, zero_reg, the_int_);
1077  __ Movn(the_int_, at, sign_);
1078  // We should be masking the implict first digit of the mantissa away here,
1079  // but it just ends up combining harmlessly with the last digit of the
1080  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
1081  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1082  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1083  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1084  __ srl(at, the_int_, shift_distance);
1085  __ or_(scratch_, scratch_, at);
1086  __ sw(scratch_, FieldMemOperand(the_heap_number_,
1088  __ sll(scratch_, the_int_, 32 - shift_distance);
1089  __ sw(scratch_, FieldMemOperand(the_heap_number_,
1091  __ Ret();
1092 
1093  __ bind(&max_negative_int);
1094  // The max negative int32 is stored as a positive number in the mantissa of
1095  // a double because it uses a sign bit instead of using two's complement.
1096  // The actual mantissa bits stored are all 0 because the implicit most
1097  // significant 1 bit is not stored.
1098  non_smi_exponent += 1 << HeapNumber::kExponentShift;
1099  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1100  __ sw(scratch_,
1101  FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1102  __ mov(scratch_, zero_reg);
1103  __ sw(scratch_,
1104  FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1105  __ Ret();
1106 }
1107 
1108 
1109 // Handle the case where the lhs and rhs are the same object.
1110 // Equality is almost reflexive (everything but NaN), so this is a test
1111 // for "identity and not NaN".
1112 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1113  Label* slow,
1114  Condition cc,
1115  bool never_nan_nan) {
1116  Label not_identical;
1117  Label heap_number, return_equal;
1118  Register exp_mask_reg = t5;
1119 
1120  __ Branch(&not_identical, ne, a0, Operand(a1));
1121 
1122  // The two objects are identical. If we know that one of them isn't NaN then
1123  // we now know they test equal.
1124  if (cc != eq || !never_nan_nan) {
1125  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1126 
1127  // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1128  // so we do the second best thing - test it ourselves.
1129  // They are both equal and they are not both Smis so both of them are not
1130  // Smis. If it's not a heap number, then return equal.
1131  if (cc == less || cc == greater) {
1132  __ GetObjectType(a0, t4, t4);
1133  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1134  } else {
1135  __ GetObjectType(a0, t4, t4);
1136  __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1137  // Comparing JS objects with <=, >= is complicated.
1138  if (cc != eq) {
1139  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1140  // Normally here we fall through to return_equal, but undefined is
1141  // special: (undefined == undefined) == true, but
1142  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
1143  if (cc == less_equal || cc == greater_equal) {
1144  __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1145  __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1146  __ Branch(&return_equal, ne, a0, Operand(t2));
1147  if (cc == le) {
1148  // undefined <= undefined should fail.
1149  __ li(v0, Operand(GREATER));
1150  } else {
1151  // undefined >= undefined should fail.
1152  __ li(v0, Operand(LESS));
1153  }
1154  __ Ret();
1155  }
1156  }
1157  }
1158  }
1159 
1160  __ bind(&return_equal);
1161 
1162  if (cc == less) {
1163  __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
1164  } else if (cc == greater) {
1165  __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
1166  } else {
1167  __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
1168  }
1169  __ Ret();
1170 
1171  if (cc != eq || !never_nan_nan) {
1172  // For less and greater we don't have to check for NaN since the result of
1173  // x < x is false regardless. For the others here is some code to check
1174  // for NaN.
1175  if (cc != lt && cc != gt) {
1176  __ bind(&heap_number);
1177  // It is a heap number, so return non-equal if it's NaN and equal if it's
1178  // not NaN.
1179 
1180  // The representation of NaN values has all exponent bits (52..62) set,
1181  // and not all mantissa bits (0..51) clear.
1182  // Read top bits of double representation (second word of value).
1184  // Test that exponent bits are all set.
1185  __ And(t3, t2, Operand(exp_mask_reg));
1186  // If all bits not set (ne cond), then not a NaN, objects are equal.
1187  __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1188 
1189  // Shift out flag and all exponent bits, retaining only mantissa.
1191  // Or with all low-bits of mantissa.
1193  __ Or(v0, t3, Operand(t2));
1194  // For equal we already have the right value in v0: Return zero (equal)
1195  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1196  // not (it's a NaN). For <= and >= we need to load v0 with the failing
1197  // value if it's a NaN.
1198  if (cc != eq) {
1199  // All-zero means Infinity means equal.
1200  __ Ret(eq, v0, Operand(zero_reg));
1201  if (cc == le) {
1202  __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
1203  } else {
1204  __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
1205  }
1206  }
1207  __ Ret();
1208  }
1209  // No fall through here.
1210  }
1211 
1212  __ bind(&not_identical);
1213 }
1214 
1215 
1216 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1217  Register lhs,
1218  Register rhs,
1219  Label* both_loaded_as_doubles,
1220  Label* slow,
1221  bool strict) {
1222  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1223  (lhs.is(a1) && rhs.is(a0)));
1224 
1225  Label lhs_is_smi;
1226  __ JumpIfSmi(lhs, &lhs_is_smi);
1227  // Rhs is a Smi.
1228  // Check whether the non-smi is a heap number.
1229  __ GetObjectType(lhs, t4, t4);
1230  if (strict) {
1231  // If lhs was not a number and rhs was a Smi then strict equality cannot
1232  // succeed. Return non-equal (lhs is already not zero).
1233  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1234  __ mov(v0, lhs);
1235  } else {
1236  // Smi compared non-strictly with a non-Smi non-heap-number. Call
1237  // the runtime.
1238  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1239  }
1240 
1241  // Rhs is a smi, lhs is a number.
1242  // Convert smi rhs to double.
1244  CpuFeatures::Scope scope(FPU);
1245  __ sra(at, rhs, kSmiTagSize);
1246  __ mtc1(at, f14);
1247  __ cvt_d_w(f14, f14);
1249  } else {
1250  // Load lhs to a double in a2, a3.
1251  __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1253 
1254  // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1255  __ mov(t6, rhs);
1256  ConvertToDoubleStub stub1(a1, a0, t6, t5);
1257  __ push(ra);
1258  __ Call(stub1.GetCode());
1259 
1260  __ pop(ra);
1261  }
1262 
1263  // We now have both loaded as doubles.
1264  __ jmp(both_loaded_as_doubles);
1265 
1266  __ bind(&lhs_is_smi);
1267  // Lhs is a Smi. Check whether the non-smi is a heap number.
1268  __ GetObjectType(rhs, t4, t4);
1269  if (strict) {
1270  // If lhs was not a number and rhs was a Smi then strict equality cannot
1271  // succeed. Return non-equal.
1272  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1273  __ li(v0, Operand(1));
1274  } else {
1275  // Smi compared non-strictly with a non-Smi non-heap-number. Call
1276  // the runtime.
1277  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1278  }
1279 
1280  // Lhs is a smi, rhs is a number.
1281  // Convert smi lhs to double.
1283  CpuFeatures::Scope scope(FPU);
1284  __ sra(at, lhs, kSmiTagSize);
1285  __ mtc1(at, f12);
1286  __ cvt_d_w(f12, f12);
1288  } else {
1289  // Convert lhs to a double format. t5 is scratch.
1290  __ mov(t6, lhs);
1291  ConvertToDoubleStub stub2(a3, a2, t6, t5);
1292  __ push(ra);
1293  __ Call(stub2.GetCode());
1294  __ pop(ra);
1295  // Load rhs to a double in a1, a0.
1296  if (rhs.is(a0)) {
1297  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1299  } else {
1301  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1302  }
1303  }
1304  // Fall through to both_loaded_as_doubles.
1305 }
1306 
1307 
1308 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1311  CpuFeatures::Scope scope(FPU);
1312  // Lhs and rhs are already loaded to f12 and f14 register pairs.
1313  __ Move(t0, t1, f14);
1314  __ Move(t2, t3, f12);
1315  } else {
1316  // Lhs and rhs are already loaded to GP registers.
1317  __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1318  __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1319  __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1320  __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1321  }
1322  Register rhs_exponent = exp_first ? t0 : t1;
1323  Register lhs_exponent = exp_first ? t2 : t3;
1324  Register rhs_mantissa = exp_first ? t1 : t0;
1325  Register lhs_mantissa = exp_first ? t3 : t2;
1326  Label one_is_nan, neither_is_nan;
1327  Label lhs_not_nan_exp_mask_is_loaded;
1328 
1329  Register exp_mask_reg = t4;
1330  __ li(exp_mask_reg, HeapNumber::kExponentMask);
1331  __ and_(t5, lhs_exponent, exp_mask_reg);
1332  __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1333 
1334  __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1335  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1336 
1337  __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1338 
1339  __ li(exp_mask_reg, HeapNumber::kExponentMask);
1340  __ bind(&lhs_not_nan_exp_mask_is_loaded);
1341  __ and_(t5, rhs_exponent, exp_mask_reg);
1342 
1343  __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1344 
1345  __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1346  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1347 
1348  __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1349 
1350  __ bind(&one_is_nan);
1351  // NaN comparisons always fail.
1352  // Load whatever we need in v0 to make the comparison fail.
1353 
1354  if (cc == lt || cc == le) {
1355  __ li(v0, Operand(GREATER));
1356  } else {
1357  __ li(v0, Operand(LESS));
1358  }
1359  __ Ret();
1360 
1361  __ bind(&neither_is_nan);
1362 }
1363 
1364 
1365 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1366  // f12 and f14 have the two doubles. Neither is a NaN.
1367  // Call a native function to do a comparison between two non-NaNs.
1368  // Call C routine that may not cause GC or other trouble.
1369  // We use a call_was and return manually because we need arguments slots to
1370  // be freed.
1371 
1372  Label return_result_not_equal, return_result_equal;
1373  if (cc == eq) {
1374  // Doubles are not equal unless they have the same bit pattern.
1375  // Exception: 0 and -0.
1378  CpuFeatures::Scope scope(FPU);
1379  // Lhs and rhs are already loaded to f12 and f14 register pairs.
1380  __ Move(t0, t1, f14);
1381  __ Move(t2, t3, f12);
1382  } else {
1383  // Lhs and rhs are already loaded to GP registers.
1384  __ mov(t0, a0); // a0 has LS 32 bits of rhs.
1385  __ mov(t1, a1); // a1 has MS 32 bits of rhs.
1386  __ mov(t2, a2); // a2 has LS 32 bits of lhs.
1387  __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1388  }
1389  Register rhs_exponent = exp_first ? t0 : t1;
1390  Register lhs_exponent = exp_first ? t2 : t3;
1391  Register rhs_mantissa = exp_first ? t1 : t0;
1392  Register lhs_mantissa = exp_first ? t3 : t2;
1393 
1394  __ xor_(v0, rhs_mantissa, lhs_mantissa);
1395  __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1396 
1397  __ subu(v0, rhs_exponent, lhs_exponent);
1398  __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1399  // 0, -0 case.
1400  __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1401  __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1402  __ or_(t4, rhs_exponent, lhs_exponent);
1403  __ or_(t4, t4, rhs_mantissa);
1404 
1405  __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1406 
1407  __ bind(&return_result_equal);
1408 
1409  __ li(v0, Operand(EQUAL));
1410  __ Ret();
1411  }
1412 
1413  __ bind(&return_result_not_equal);
1414 
1415  if (!CpuFeatures::IsSupported(FPU)) {
1416  __ push(ra);
1417  __ PrepareCallCFunction(0, 2, t4);
1418  if (!IsMipsSoftFloatABI) {
1419  // We are not using MIPS FPU instructions, and parameters for the runtime
1420  // function call are prepaired in a0-a3 registers, but function we are
1421  // calling is compiled with hard-float flag and expecting hard float ABI
1422  // (parameters in f12/f14 registers). We need to copy parameters from
1423  // a0-a3 registers to f12/f14 register pairs.
1424  __ Move(f12, a0, a1);
1425  __ Move(f14, a2, a3);
1426  }
1427 
1428  AllowExternalCallThatCantCauseGC scope(masm);
1429  __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1430  0, 2);
1431  __ pop(ra); // Because this function returns int, result is in v0.
1432  __ Ret();
1433  } else {
1434  CpuFeatures::Scope scope(FPU);
1435  Label equal, less_than;
1436  __ BranchF(&equal, NULL, eq, f12, f14);
1437  __ BranchF(&less_than, NULL, lt, f12, f14);
1438 
1439  // Not equal, not less, not NaN, must be greater.
1440 
1441  __ li(v0, Operand(GREATER));
1442  __ Ret();
1443 
1444  __ bind(&equal);
1445  __ li(v0, Operand(EQUAL));
1446  __ Ret();
1447 
1448  __ bind(&less_than);
1449  __ li(v0, Operand(LESS));
1450  __ Ret();
1451  }
1452 }
1453 
1454 
1455 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1456  Register lhs,
1457  Register rhs) {
1458  // If either operand is a JS object or an oddball value, then they are
1459  // not equal since their pointers are different.
1460  // There is no test for undetectability in strict equality.
1462  Label first_non_object;
1463  // Get the type of the first operand into a2 and compare it with
1464  // FIRST_SPEC_OBJECT_TYPE.
1465  __ GetObjectType(lhs, a2, a2);
1466  __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1467 
1468  // Return non-zero.
1469  Label return_not_equal;
1470  __ bind(&return_not_equal);
1471  __ Ret(USE_DELAY_SLOT);
1472  __ li(v0, Operand(1));
1473 
1474  __ bind(&first_non_object);
1475  // Check for oddballs: true, false, null, undefined.
1476  __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1477 
1478  __ GetObjectType(rhs, a3, a3);
1479  __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1480 
1481  // Check for oddballs: true, false, null, undefined.
1482  __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1483 
1484  // Now that we have the types we might as well check for symbol-symbol.
1485  // Ensure that no non-strings have the symbol bit set.
1487  STATIC_ASSERT(kSymbolTag != 0);
1488  __ And(t2, a2, Operand(a3));
1489  __ And(t0, t2, Operand(kIsSymbolMask));
1490  __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1491 }
1492 
1493 
1494 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1495  Register lhs,
1496  Register rhs,
1497  Label* both_loaded_as_doubles,
1498  Label* not_heap_numbers,
1499  Label* slow) {
1500  __ GetObjectType(lhs, a3, a2);
1501  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1503  // If first was a heap number & second wasn't, go to slow case.
1504  __ Branch(slow, ne, a3, Operand(a2));
1505 
1506  // Both are heap numbers. Load them up then jump to the code we have
1507  // for that.
1509  CpuFeatures::Scope scope(FPU);
1512  } else {
1514  __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1515  if (rhs.is(a0)) {
1516  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1518  } else {
1520  __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1521  }
1522  }
1523  __ jmp(both_loaded_as_doubles);
1524 }
1525 
1526 
1527 // Fast negative check for symbol-to-symbol equality.
1528 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1529  Register lhs,
1530  Register rhs,
1531  Label* possible_strings,
1532  Label* not_both_strings) {
1533  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1534  (lhs.is(a1) && rhs.is(a0)));
1535 
1536  // a2 is object type of lhs.
1537  // Ensure that no non-strings have the symbol bit set.
1538  Label object_test;
1539  STATIC_ASSERT(kSymbolTag != 0);
1540  __ And(at, a2, Operand(kIsNotStringMask));
1541  __ Branch(&object_test, ne, at, Operand(zero_reg));
1542  __ And(at, a2, Operand(kIsSymbolMask));
1543  __ Branch(possible_strings, eq, at, Operand(zero_reg));
1544  __ GetObjectType(rhs, a3, a3);
1545  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1546  __ And(at, a3, Operand(kIsSymbolMask));
1547  __ Branch(possible_strings, eq, at, Operand(zero_reg));
1548 
1549  // Both are symbols. We already checked they weren't the same pointer
1550  // so they are not equal.
1551  __ Ret(USE_DELAY_SLOT);
1552  __ li(v0, Operand(1)); // Non-zero indicates not equal.
1553 
1554  __ bind(&object_test);
1555  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1556  __ GetObjectType(rhs, a2, a3);
1557  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1558 
1559  // If both objects are undetectable, they are equal. Otherwise, they
1560  // are not equal, since they are different objects and an object is not
1561  // equal to undefined.
1563  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1564  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1565  __ and_(a0, a2, a3);
1566  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1567  __ Ret(USE_DELAY_SLOT);
1568  __ xori(v0, a0, 1 << Map::kIsUndetectable);
1569 }
1570 
1571 
1572 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1573  Register object,
1574  Register result,
1575  Register scratch1,
1576  Register scratch2,
1577  Register scratch3,
1578  bool object_is_smi,
1579  Label* not_found) {
1580  // Use of registers. Register result is used as a temporary.
1581  Register number_string_cache = result;
1582  Register mask = scratch3;
1583 
1584  // Load the number string cache.
1585  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1586 
1587  // Make the hash mask from the length of the number string cache. It
1588  // contains two elements (number and string) for each cache entry.
1589  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1590  // Divide length by two (length is a smi).
1591  __ sra(mask, mask, kSmiTagSize + 1);
1592  __ Addu(mask, mask, -1); // Make mask.
1593 
1594  // Calculate the entry in the number string cache. The hash value in the
1595  // number string cache for smis is just the smi value, and the hash for
1596  // doubles is the xor of the upper and lower words. See
1597  // Heap::GetNumberStringCache.
1598  Isolate* isolate = masm->isolate();
1599  Label is_smi;
1600  Label load_result_from_cache;
1601  if (!object_is_smi) {
1602  __ JumpIfSmi(object, &is_smi);
1604  CpuFeatures::Scope scope(FPU);
1605  __ CheckMap(object,
1606  scratch1,
1607  Heap::kHeapNumberMapRootIndex,
1608  not_found,
1610 
1611  STATIC_ASSERT(8 == kDoubleSize);
1612  __ Addu(scratch1,
1613  object,
1615  __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1616  __ lw(scratch1, MemOperand(scratch1, 0));
1617  __ Xor(scratch1, scratch1, Operand(scratch2));
1618  __ And(scratch1, scratch1, Operand(mask));
1619 
1620  // Calculate address of entry in string cache: each entry consists
1621  // of two pointer sized fields.
1622  __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1623  __ Addu(scratch1, number_string_cache, scratch1);
1624 
1625  Register probe = mask;
1626  __ lw(probe,
1628  __ JumpIfSmi(probe, not_found);
1631  __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1632  __ Branch(not_found);
1633  } else {
1634  // Note that there is no cache check for non-FPU case, even though
1635  // it seems there could be. May be a tiny opimization for non-FPU
1636  // cores.
1637  __ Branch(not_found);
1638  }
1639  }
1640 
1641  __ bind(&is_smi);
1642  Register scratch = scratch1;
1643  __ sra(scratch, object, 1); // Shift away the tag.
1644  __ And(scratch, mask, Operand(scratch));
1645 
1646  // Calculate address of entry in string cache: each entry consists
1647  // of two pointer sized fields.
1648  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1649  __ Addu(scratch, number_string_cache, scratch);
1650 
1651  // Check if the entry is the smi we are looking for.
1652  Register probe = mask;
1653  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1654  __ Branch(not_found, ne, object, Operand(probe));
1655 
1656  // Get the result from the cache.
1657  __ bind(&load_result_from_cache);
1658  __ lw(result,
1659  FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1660 
1661  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1662  1,
1663  scratch1,
1664  scratch2);
1665 }
1666 
1667 
1668 void NumberToStringStub::Generate(MacroAssembler* masm) {
1669  Label runtime;
1670 
1671  __ lw(a1, MemOperand(sp, 0));
1672 
1673  // Generate code to lookup number in the number string cache.
1674  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1675  __ DropAndRet(1);
1676 
1677  __ bind(&runtime);
1678  // Handle number to string in the runtime system if not found in the cache.
1679  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1680 }
1681 
1682 
1683 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1684 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1685 // of the comparison.
1686 void CompareStub::Generate(MacroAssembler* masm) {
1687  Label slow; // Call builtin.
1688  Label not_smis, both_loaded_as_doubles;
1689 
1690 
1691  if (include_smi_compare_) {
1692  Label not_two_smis, smi_done;
1693  __ Or(a2, a1, a0);
1694  __ JumpIfNotSmi(a2, &not_two_smis);
1695  __ sra(a1, a1, 1);
1696  __ sra(a0, a0, 1);
1697  __ Ret(USE_DELAY_SLOT);
1698  __ subu(v0, a1, a0);
1699  __ bind(&not_two_smis);
1700  } else if (FLAG_debug_code) {
1701  __ Or(a2, a1, a0);
1702  __ And(a2, a2, kSmiTagMask);
1703  __ Assert(ne, "CompareStub: unexpected smi operands.",
1704  a2, Operand(zero_reg));
1705  }
1706 
1707 
1708  // NOTICE! This code is only reached after a smi-fast-case check, so
1709  // it is certain that at least one operand isn't a smi.
1710 
1711  // Handle the case where the objects are identical. Either returns the answer
1712  // or goes to slow. Only falls through if the objects were not identical.
1713  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1714 
1715  // If either is a Smi (we know that not both are), then they can only
1716  // be strictly equal if the other is a HeapNumber.
1717  STATIC_ASSERT(kSmiTag == 0);
1718  ASSERT_EQ(0, Smi::FromInt(0));
1719  __ And(t2, lhs_, Operand(rhs_));
1720  __ JumpIfNotSmi(t2, &not_smis, t0);
1721  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1722  // 1) Return the answer.
1723  // 2) Go to slow.
1724  // 3) Fall through to both_loaded_as_doubles.
1725  // 4) Jump to rhs_not_nan.
1726  // In cases 3 and 4 we have found out we were dealing with a number-number
1727  // comparison and the numbers have been loaded into f12 and f14 as doubles,
1728  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1729  EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1730  &both_loaded_as_doubles, &slow, strict_);
1731 
1732  __ bind(&both_loaded_as_doubles);
1733  // f12, f14 are the double representations of the left hand side
1734  // and the right hand side if we have FPU. Otherwise a2, a3 represent
1735  // left hand side and a0, a1 represent right hand side.
1736 
1737  Isolate* isolate = masm->isolate();
1739  CpuFeatures::Scope scope(FPU);
1740  Label nan;
1741  __ li(t0, Operand(LESS));
1742  __ li(t1, Operand(GREATER));
1743  __ li(t2, Operand(EQUAL));
1744 
1745  // Check if either rhs or lhs is NaN.
1746  __ BranchF(NULL, &nan, eq, f12, f14);
1747 
1748  // Check if LESS condition is satisfied. If true, move conditionally
1749  // result to v0.
1750  __ c(OLT, D, f12, f14);
1751  __ Movt(v0, t0);
1752  // Use previous check to store conditionally to v0 oposite condition
1753  // (GREATER). If rhs is equal to lhs, this will be corrected in next
1754  // check.
1755  __ Movf(v0, t1);
1756  // Check if EQUAL condition is satisfied. If true, move conditionally
1757  // result to v0.
1758  __ c(EQ, D, f12, f14);
1759  __ Movt(v0, t2);
1760 
1761  __ Ret();
1762 
1763  __ bind(&nan);
1764  // NaN comparisons always fail.
1765  // Load whatever we need in v0 to make the comparison fail.
1766  if (cc_ == lt || cc_ == le) {
1767  __ li(v0, Operand(GREATER));
1768  } else {
1769  __ li(v0, Operand(LESS));
1770  }
1771  __ Ret();
1772  } else {
1773  // Checks for NaN in the doubles we have loaded. Can return the answer or
1774  // fall through if neither is a NaN. Also binds rhs_not_nan.
1775  EmitNanCheck(masm, cc_);
1776 
1777  // Compares two doubles that are not NaNs. Returns the answer.
1778  // Never falls through.
1779  EmitTwoNonNanDoubleComparison(masm, cc_);
1780  }
1781 
1782  __ bind(&not_smis);
1783  // At this point we know we are dealing with two different objects,
1784  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1785  if (strict_) {
1786  // This returns non-equal for some object types, or falls through if it
1787  // was not lucky.
1788  EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1789  }
1790 
1791  Label check_for_symbols;
1792  Label flat_string_check;
1793  // Check for heap-number-heap-number comparison. Can jump to slow case,
1794  // or load both doubles and jump to the code that handles
1795  // that case. If the inputs are not doubles then jumps to check_for_symbols.
1796  // In this case a2 will contain the type of lhs_.
1797  EmitCheckForTwoHeapNumbers(masm,
1798  lhs_,
1799  rhs_,
1800  &both_loaded_as_doubles,
1801  &check_for_symbols,
1802  &flat_string_check);
1803 
1804  __ bind(&check_for_symbols);
1805  if (cc_ == eq && !strict_) {
1806  // Returns an answer for two symbols or two detectable objects.
1807  // Otherwise jumps to string case or not both strings case.
1808  // Assumes that a2 is the type of lhs_ on entry.
1809  EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1810  }
1811 
1812  // Check for both being sequential ASCII strings, and inline if that is the
1813  // case.
1814  __ bind(&flat_string_check);
1815 
1816  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1817 
1818  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1819  if (cc_ == eq) {
1821  lhs_,
1822  rhs_,
1823  a2,
1824  a3,
1825  t0);
1826  } else {
1828  lhs_,
1829  rhs_,
1830  a2,
1831  a3,
1832  t0,
1833  t1);
1834  }
1835  // Never falls through to here.
1836 
1837  __ bind(&slow);
1838  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1839  // a1 (rhs) second.
1840  __ Push(lhs_, rhs_);
1841  // Figure out which native to call and setup the arguments.
1842  Builtins::JavaScript native;
1843  if (cc_ == eq) {
1844  native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1845  } else {
1846  native = Builtins::COMPARE;
1847  int ncr; // NaN compare result.
1848  if (cc_ == lt || cc_ == le) {
1849  ncr = GREATER;
1850  } else {
1851  ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
1852  ncr = LESS;
1853  }
1854  __ li(a0, Operand(Smi::FromInt(ncr)));
1855  __ push(a0);
1856  }
1857 
1858  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1859  // tagged as a small integer.
1860  __ InvokeBuiltin(native, JUMP_FUNCTION);
1861 }
1862 
1863 
1864 // The stub expects its argument in the tos_ register and returns its result in
1865 // it, too: zero for false, and a non-zero value for true.
1866 void ToBooleanStub::Generate(MacroAssembler* masm) {
1867  // This stub uses FPU instructions.
1868  CpuFeatures::Scope scope(FPU);
1869 
1870  Label patch;
1871  const Register map = t5.is(tos_) ? t3 : t5;
1872 
1873  // undefined -> false.
1874  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1875 
1876  // Boolean -> its value.
1877  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1878  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1879 
1880  // 'null' -> false.
1881  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1882 
1883  if (types_.Contains(SMI)) {
1884  // Smis: 0 -> false, all other -> true
1885  __ And(at, tos_, kSmiTagMask);
1886  // tos_ contains the correct return value already
1887  __ Ret(eq, at, Operand(zero_reg));
1888  } else if (types_.NeedsMap()) {
1889  // If we need a map later and have a Smi -> patch.
1890  __ JumpIfSmi(tos_, &patch);
1891  }
1892 
1893  if (types_.NeedsMap()) {
1894  __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1895 
1896  if (types_.CanBeUndetectable()) {
1897  __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1898  __ And(at, at, Operand(1 << Map::kIsUndetectable));
1899  // Undetectable -> false.
1900  __ Movn(tos_, zero_reg, at);
1901  __ Ret(ne, at, Operand(zero_reg));
1902  }
1903  }
1904 
1905  if (types_.Contains(SPEC_OBJECT)) {
1906  // Spec object -> true.
1908  // tos_ contains the correct non-zero return value already.
1909  __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1910  }
1911 
1912  if (types_.Contains(STRING)) {
1913  // String value -> false iff empty.
1915  Label skip;
1916  __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1917  __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
1918  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1919  __ bind(&skip);
1920  }
1921 
1922  if (types_.Contains(HEAP_NUMBER)) {
1923  // Heap number -> false iff +0, -0, or NaN.
1924  Label not_heap_number;
1925  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1926  __ Branch(&not_heap_number, ne, map, Operand(at));
1927  Label zero_or_nan, number;
1929  __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1930  // "tos_" is a register, and contains a non zero value by default.
1931  // Hence we only need to overwrite "tos_" with zero to return false for
1932  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1933  __ bind(&zero_or_nan);
1934  __ mov(tos_, zero_reg);
1935  __ bind(&number);
1936  __ Ret();
1937  __ bind(&not_heap_number);
1938  }
1939 
1940  __ bind(&patch);
1941  GenerateTypeTransition(masm);
1942 }
1943 
1944 
1945 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1946  Type type,
1947  Heap::RootListIndex value,
1948  bool result) {
1949  if (types_.Contains(type)) {
1950  // If we see an expected oddball, return its ToBoolean value tos_.
1951  __ LoadRoot(at, value);
1952  __ Subu(at, at, tos_); // This is a check for equality for the movz below.
1953  // The value of a root is never NULL, so we can avoid loading a non-null
1954  // value into tos_ when we want to return 'true'.
1955  if (!result) {
1956  __ Movz(tos_, zero_reg, at);
1957  }
1958  __ Ret(eq, at, Operand(zero_reg));
1959  }
1960 }
1961 
1962 
1963 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1964  __ Move(a3, tos_);
1965  __ li(a2, Operand(Smi::FromInt(tos_.code())));
1966  __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1967  __ Push(a3, a2, a1);
1968  // Patch the caller to an appropriate specialized stub and return the
1969  // operation result to the caller of the stub.
1970  __ TailCallExternalReference(
1971  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1972  3,
1973  1);
1974 }
1975 
1976 
1977 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1978  // We don't allow a GC during a store buffer overflow so there is no need to
1979  // store the registers in any particular way, but we do have to store and
1980  // restore them.
1981  __ MultiPush(kJSCallerSaved | ra.bit());
1982  if (save_doubles_ == kSaveFPRegs) {
1983  CpuFeatures::Scope scope(FPU);
1984  __ MultiPushFPU(kCallerSavedFPU);
1985  }
1986  const int argument_count = 1;
1987  const int fp_argument_count = 0;
1988  const Register scratch = a1;
1989 
1990  AllowExternalCallThatCantCauseGC scope(masm);
1991  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1992  __ li(a0, Operand(ExternalReference::isolate_address()));
1993  __ CallCFunction(
1994  ExternalReference::store_buffer_overflow_function(masm->isolate()),
1995  argument_count);
1996  if (save_doubles_ == kSaveFPRegs) {
1997  CpuFeatures::Scope scope(FPU);
1998  __ MultiPopFPU(kCallerSavedFPU);
1999  }
2000 
2001  __ MultiPop(kJSCallerSaved | ra.bit());
2002  __ Ret();
2003 }
2004 
2005 
2006 void UnaryOpStub::PrintName(StringStream* stream) {
2007  const char* op_name = Token::Name(op_);
2008  const char* overwrite_name = NULL; // Make g++ happy.
2009  switch (mode_) {
2010  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2011  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2012  }
2013  stream->Add("UnaryOpStub_%s_%s_%s",
2014  op_name,
2015  overwrite_name,
2016  UnaryOpIC::GetName(operand_type_));
2017 }
2018 
2019 
2020 // TODO(svenpanne): Use virtual functions instead of switch.
2021 void UnaryOpStub::Generate(MacroAssembler* masm) {
2022  switch (operand_type_) {
2024  GenerateTypeTransition(masm);
2025  break;
2026  case UnaryOpIC::SMI:
2027  GenerateSmiStub(masm);
2028  break;
2030  GenerateHeapNumberStub(masm);
2031  break;
2032  case UnaryOpIC::GENERIC:
2033  GenerateGenericStub(masm);
2034  break;
2035  }
2036 }
2037 
2038 
2039 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2040  // Argument is in a0 and v0 at this point, so we can overwrite a0.
2041  __ li(a2, Operand(Smi::FromInt(op_)));
2042  __ li(a1, Operand(Smi::FromInt(mode_)));
2043  __ li(a0, Operand(Smi::FromInt(operand_type_)));
2044  __ Push(v0, a2, a1, a0);
2045 
2046  __ TailCallExternalReference(
2047  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2048 }
2049 
2050 
2051 // TODO(svenpanne): Use virtual functions instead of switch.
2052 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2053  switch (op_) {
2054  case Token::SUB:
2055  GenerateSmiStubSub(masm);
2056  break;
2057  case Token::BIT_NOT:
2058  GenerateSmiStubBitNot(masm);
2059  break;
2060  default:
2061  UNREACHABLE();
2062  }
2063 }
2064 
2065 
2066 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2067  Label non_smi, slow;
2068  GenerateSmiCodeSub(masm, &non_smi, &slow);
2069  __ bind(&non_smi);
2070  __ bind(&slow);
2071  GenerateTypeTransition(masm);
2072 }
2073 
2074 
2075 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2076  Label non_smi;
2077  GenerateSmiCodeBitNot(masm, &non_smi);
2078  __ bind(&non_smi);
2079  GenerateTypeTransition(masm);
2080 }
2081 
2082 
2083 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2084  Label* non_smi,
2085  Label* slow) {
2086  __ JumpIfNotSmi(a0, non_smi);
2087 
2088  // The result of negating zero or the smallest negative smi is not a smi.
2089  __ And(t0, a0, ~0x80000000);
2090  __ Branch(slow, eq, t0, Operand(zero_reg));
2091 
2092  // Return '0 - value'.
2093  __ Ret(USE_DELAY_SLOT);
2094  __ subu(v0, zero_reg, a0);
2095 }
2096 
2097 
2098 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2099  Label* non_smi) {
2100  __ JumpIfNotSmi(a0, non_smi);
2101 
2102  // Flip bits and revert inverted smi-tag.
2103  __ Neg(v0, a0);
2104  __ And(v0, v0, ~kSmiTagMask);
2105  __ Ret();
2106 }
2107 
2108 
2109 // TODO(svenpanne): Use virtual functions instead of switch.
2110 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2111  switch (op_) {
2112  case Token::SUB:
2113  GenerateHeapNumberStubSub(masm);
2114  break;
2115  case Token::BIT_NOT:
2116  GenerateHeapNumberStubBitNot(masm);
2117  break;
2118  default:
2119  UNREACHABLE();
2120  }
2121 }
2122 
2123 
2124 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2125  Label non_smi, slow, call_builtin;
2126  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2127  __ bind(&non_smi);
2128  GenerateHeapNumberCodeSub(masm, &slow);
2129  __ bind(&slow);
2130  GenerateTypeTransition(masm);
2131  __ bind(&call_builtin);
2132  GenerateGenericCodeFallback(masm);
2133 }
2134 
2135 
2136 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2137  Label non_smi, slow;
2138  GenerateSmiCodeBitNot(masm, &non_smi);
2139  __ bind(&non_smi);
2140  GenerateHeapNumberCodeBitNot(masm, &slow);
2141  __ bind(&slow);
2142  GenerateTypeTransition(masm);
2143 }
2144 
2145 
2146 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2147  Label* slow) {
2148  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2149  // a0 is a heap number. Get a new heap number in a1.
2150  if (mode_ == UNARY_OVERWRITE) {
2152  __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2154  } else {
2155  Label slow_allocate_heapnumber, heapnumber_allocated;
2156  __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2157  __ jmp(&heapnumber_allocated);
2158 
2159  __ bind(&slow_allocate_heapnumber);
2160  {
2161  FrameScope scope(masm, StackFrame::INTERNAL);
2162  __ push(a0);
2163  __ CallRuntime(Runtime::kNumberAlloc, 0);
2164  __ mov(a1, v0);
2165  __ pop(a0);
2166  }
2167 
2168  __ bind(&heapnumber_allocated);
2172  __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign.
2174  __ mov(v0, a1);
2175  }
2176  __ Ret();
2177 }
2178 
2179 
2180 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2181  MacroAssembler* masm,
2182  Label* slow) {
2183  Label impossible;
2184 
2185  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2186  // Convert the heap number in a0 to an untagged integer in a1.
2187  __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2188 
2189  // Do the bitwise operation and check if the result fits in a smi.
2190  Label try_float;
2191  __ Neg(a1, a1);
2192  __ Addu(a2, a1, Operand(0x40000000));
2193  __ Branch(&try_float, lt, a2, Operand(zero_reg));
2194 
2195  // Tag the result as a smi and we're done.
2196  __ SmiTag(v0, a1);
2197  __ Ret();
2198 
2199  // Try to store the result in a heap number.
2200  __ bind(&try_float);
2201  if (mode_ == UNARY_NO_OVERWRITE) {
2202  Label slow_allocate_heapnumber, heapnumber_allocated;
2203  // Allocate a new heap number without zapping v0, which we need if it fails.
2204  __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2205  __ jmp(&heapnumber_allocated);
2206 
2207  __ bind(&slow_allocate_heapnumber);
2208  {
2209  FrameScope scope(masm, StackFrame::INTERNAL);
2210  __ push(v0); // Push the heap number, not the untagged int32.
2211  __ CallRuntime(Runtime::kNumberAlloc, 0);
2212  __ mov(a2, v0); // Move the new heap number into a2.
2213  // Get the heap number into v0, now that the new heap number is in a2.
2214  __ pop(v0);
2215  }
2216 
2217  // Convert the heap number in v0 to an untagged integer in a1.
2218  // This can't go slow-case because it's the same number we already
2219  // converted once again.
2220  __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2221  // Negate the result.
2222  __ Xor(a1, a1, -1);
2223 
2224  __ bind(&heapnumber_allocated);
2225  __ mov(v0, a2); // Move newly allocated heap number to v0.
2226  }
2227 
2229  // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2230  CpuFeatures::Scope scope(FPU);
2231  __ mtc1(a1, f0);
2232  __ cvt_d_w(f0, f0);
2234  __ Ret();
2235  } else {
2236  // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2237  // have to set up a frame.
2238  WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2239  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2240  }
2241 
2242  __ bind(&impossible);
2243  if (FLAG_debug_code) {
2244  __ stop("Incorrect assumption in bit-not stub");
2245  }
2246 }
2247 
2248 
2249 // TODO(svenpanne): Use virtual functions instead of switch.
2250 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2251  switch (op_) {
2252  case Token::SUB:
2253  GenerateGenericStubSub(masm);
2254  break;
2255  case Token::BIT_NOT:
2256  GenerateGenericStubBitNot(masm);
2257  break;
2258  default:
2259  UNREACHABLE();
2260  }
2261 }
2262 
2263 
2264 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2265  Label non_smi, slow;
2266  GenerateSmiCodeSub(masm, &non_smi, &slow);
2267  __ bind(&non_smi);
2268  GenerateHeapNumberCodeSub(masm, &slow);
2269  __ bind(&slow);
2270  GenerateGenericCodeFallback(masm);
2271 }
2272 
2273 
2274 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2275  Label non_smi, slow;
2276  GenerateSmiCodeBitNot(masm, &non_smi);
2277  __ bind(&non_smi);
2278  GenerateHeapNumberCodeBitNot(masm, &slow);
2279  __ bind(&slow);
2280  GenerateGenericCodeFallback(masm);
2281 }
2282 
2283 
2284 void UnaryOpStub::GenerateGenericCodeFallback(
2285  MacroAssembler* masm) {
2286  // Handle the slow case by jumping to the JavaScript builtin.
2287  __ push(a0);
2288  switch (op_) {
2289  case Token::SUB:
2290  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2291  break;
2292  case Token::BIT_NOT:
2293  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2294  break;
2295  default:
2296  UNREACHABLE();
2297  }
2298 }
2299 
2300 
2301 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2302  Label get_result;
2303 
2304  __ Push(a1, a0);
2305 
2306  __ li(a2, Operand(Smi::FromInt(MinorKey())));
2307  __ li(a1, Operand(Smi::FromInt(op_)));
2308  __ li(a0, Operand(Smi::FromInt(operands_type_)));
2309  __ Push(a2, a1, a0);
2310 
2311  __ TailCallExternalReference(
2312  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2313  masm->isolate()),
2314  5,
2315  1);
2316 }
2317 
2318 
2319 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2320  MacroAssembler* masm) {
2321  UNIMPLEMENTED();
2322 }
2323 
2324 
2325 void BinaryOpStub::Generate(MacroAssembler* masm) {
2326  // Explicitly allow generation of nested stubs. It is safe here because
2327  // generation code does not use any raw pointers.
2328  AllowStubCallsScope allow_stub_calls(masm, true);
2329  switch (operands_type_) {
2331  GenerateTypeTransition(masm);
2332  break;
2333  case BinaryOpIC::SMI:
2334  GenerateSmiStub(masm);
2335  break;
2336  case BinaryOpIC::INT32:
2337  GenerateInt32Stub(masm);
2338  break;
2340  GenerateHeapNumberStub(masm);
2341  break;
2342  case BinaryOpIC::ODDBALL:
2343  GenerateOddballStub(masm);
2344  break;
2346  GenerateBothStringStub(masm);
2347  break;
2348  case BinaryOpIC::STRING:
2349  GenerateStringStub(masm);
2350  break;
2351  case BinaryOpIC::GENERIC:
2352  GenerateGeneric(masm);
2353  break;
2354  default:
2355  UNREACHABLE();
2356  }
2357 }
2358 
2359 
2360 void BinaryOpStub::PrintName(StringStream* stream) {
2361  const char* op_name = Token::Name(op_);
2362  const char* overwrite_name;
2363  switch (mode_) {
2364  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2365  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2366  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2367  default: overwrite_name = "UnknownOverwrite"; break;
2368  }
2369  stream->Add("BinaryOpStub_%s_%s_%s",
2370  op_name,
2371  overwrite_name,
2372  BinaryOpIC::GetName(operands_type_));
2373 }
2374 
2375 
2376 
2377 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2378  Register left = a1;
2379  Register right = a0;
2380 
2381  Register scratch1 = t0;
2382  Register scratch2 = t1;
2383 
2384  ASSERT(right.is(a0));
2385  STATIC_ASSERT(kSmiTag == 0);
2386 
2387  Label not_smi_result;
2388  switch (op_) {
2389  case Token::ADD:
2390  __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2391  __ RetOnNoOverflow(scratch1);
2392  // No need to revert anything - right and left are intact.
2393  break;
2394  case Token::SUB:
2395  __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2396  __ RetOnNoOverflow(scratch1);
2397  // No need to revert anything - right and left are intact.
2398  break;
2399  case Token::MUL: {
2400  // Remove tag from one of the operands. This way the multiplication result
2401  // will be a smi if it fits the smi range.
2402  __ SmiUntag(scratch1, right);
2403  // Do multiplication.
2404  // lo = lower 32 bits of scratch1 * left.
2405  // hi = higher 32 bits of scratch1 * left.
2406  __ Mult(left, scratch1);
2407  // Check for overflowing the smi range - no overflow if higher 33 bits of
2408  // the result are identical.
2409  __ mflo(scratch1);
2410  __ mfhi(scratch2);
2411  __ sra(scratch1, scratch1, 31);
2412  __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2413  // Go slow on zero result to handle -0.
2414  __ mflo(v0);
2415  __ Ret(ne, v0, Operand(zero_reg));
2416  // We need -0 if we were multiplying a negative number with 0 to get 0.
2417  // We know one of them was zero.
2418  __ Addu(scratch2, right, left);
2419  Label skip;
2420  // ARM uses the 'pl' condition, which is 'ge'.
2421  // Negating it results in 'lt'.
2422  __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2423  ASSERT(Smi::FromInt(0) == 0);
2424  __ Ret(USE_DELAY_SLOT);
2425  __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
2426  __ bind(&skip);
2427  // We fall through here if we multiplied a negative number with 0, because
2428  // that would mean we should produce -0.
2429  }
2430  break;
2431  case Token::DIV: {
2432  Label done;
2433  __ SmiUntag(scratch2, right);
2434  __ SmiUntag(scratch1, left);
2435  __ Div(scratch1, scratch2);
2436  // A minor optimization: div may be calculated asynchronously, so we check
2437  // for division by zero before getting the result.
2438  __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2439  // If the result is 0, we need to make sure the dividsor (right) is
2440  // positive, otherwise it is a -0 case.
2441  // Quotient is in 'lo', remainder is in 'hi'.
2442  // Check for no remainder first.
2443  __ mfhi(scratch1);
2444  __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2445  __ mflo(scratch1);
2446  __ Branch(&done, ne, scratch1, Operand(zero_reg));
2447  __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2448  __ bind(&done);
2449  // Check that the signed result fits in a Smi.
2450  __ Addu(scratch2, scratch1, Operand(0x40000000));
2451  __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2452  __ SmiTag(v0, scratch1);
2453  __ Ret();
2454  }
2455  break;
2456  case Token::MOD: {
2457  Label done;
2458  __ SmiUntag(scratch2, right);
2459  __ SmiUntag(scratch1, left);
2460  __ Div(scratch1, scratch2);
2461  // A minor optimization: div may be calculated asynchronously, so we check
2462  // for division by 0 before calling mfhi.
2463  // Check for zero on the right hand side.
2464  __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2465  // If the result is 0, we need to make sure the dividend (left) is
2466  // positive (or 0), otherwise it is a -0 case.
2467  // Remainder is in 'hi'.
2468  __ mfhi(scratch2);
2469  __ Branch(&done, ne, scratch2, Operand(zero_reg));
2470  __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2471  __ bind(&done);
2472  // Check that the signed result fits in a Smi.
2473  __ Addu(scratch1, scratch2, Operand(0x40000000));
2474  __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2475  __ SmiTag(v0, scratch2);
2476  __ Ret();
2477  }
2478  break;
2479  case Token::BIT_OR:
2480  __ Ret(USE_DELAY_SLOT);
2481  __ or_(v0, left, right);
2482  break;
2483  case Token::BIT_AND:
2484  __ Ret(USE_DELAY_SLOT);
2485  __ and_(v0, left, right);
2486  break;
2487  case Token::BIT_XOR:
2488  __ Ret(USE_DELAY_SLOT);
2489  __ xor_(v0, left, right);
2490  break;
2491  case Token::SAR:
2492  // Remove tags from right operand.
2493  __ GetLeastBitsFromSmi(scratch1, right, 5);
2494  __ srav(scratch1, left, scratch1);
2495  // Smi tag result.
2496  __ And(v0, scratch1, ~kSmiTagMask);
2497  __ Ret();
2498  break;
2499  case Token::SHR:
2500  // Remove tags from operands. We can't do this on a 31 bit number
2501  // because then the 0s get shifted into bit 30 instead of bit 31.
2502  __ SmiUntag(scratch1, left);
2503  __ GetLeastBitsFromSmi(scratch2, right, 5);
2504  __ srlv(v0, scratch1, scratch2);
2505  // Unsigned shift is not allowed to produce a negative number, so
2506  // check the sign bit and the sign bit after Smi tagging.
2507  __ And(scratch1, v0, Operand(0xc0000000));
2508  __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2509  // Smi tag result.
2510  __ SmiTag(v0);
2511  __ Ret();
2512  break;
2513  case Token::SHL:
2514  // Remove tags from operands.
2515  __ SmiUntag(scratch1, left);
2516  __ GetLeastBitsFromSmi(scratch2, right, 5);
2517  __ sllv(scratch1, scratch1, scratch2);
2518  // Check that the signed result fits in a Smi.
2519  __ Addu(scratch2, scratch1, Operand(0x40000000));
2520  __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2521  __ SmiTag(v0, scratch1);
2522  __ Ret();
2523  break;
2524  default:
2525  UNREACHABLE();
2526  }
2527  __ bind(&not_smi_result);
2528 }
2529 
2530 
2531 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2532  bool smi_operands,
2533  Label* not_numbers,
2534  Label* gc_required) {
2535  Register left = a1;
2536  Register right = a0;
2537  Register scratch1 = t3;
2538  Register scratch2 = t5;
2539  Register scratch3 = t0;
2540 
2541  ASSERT(smi_operands || (not_numbers != NULL));
2542  if (smi_operands && FLAG_debug_code) {
2543  __ AbortIfNotSmi(left);
2544  __ AbortIfNotSmi(right);
2545  }
2546 
2547  Register heap_number_map = t2;
2548  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2549 
2550  switch (op_) {
2551  case Token::ADD:
2552  case Token::SUB:
2553  case Token::MUL:
2554  case Token::DIV:
2555  case Token::MOD: {
2556  // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2557  // depending on whether FPU is available or not.
2558  FloatingPointHelper::Destination destination =
2560  op_ != Token::MOD ?
2563 
2564  // Allocate new heap number for result.
2565  Register result = s0;
2566  GenerateHeapResultAllocation(
2567  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2568 
2569  // Load the operands.
2570  if (smi_operands) {
2571  FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2572  } else {
2574  destination,
2575  heap_number_map,
2576  scratch1,
2577  scratch2,
2578  not_numbers);
2579  }
2580 
2581  // Calculate the result.
2582  if (destination == FloatingPointHelper::kFPURegisters) {
2583  // Using FPU registers:
2584  // f12: Left value.
2585  // f14: Right value.
2586  CpuFeatures::Scope scope(FPU);
2587  switch (op_) {
2588  case Token::ADD:
2589  __ add_d(f10, f12, f14);
2590  break;
2591  case Token::SUB:
2592  __ sub_d(f10, f12, f14);
2593  break;
2594  case Token::MUL:
2595  __ mul_d(f10, f12, f14);
2596  break;
2597  case Token::DIV:
2598  __ div_d(f10, f12, f14);
2599  break;
2600  default:
2601  UNREACHABLE();
2602  }
2603 
2604  // ARM uses a workaround here because of the unaligned HeapNumber
2605  // kValueOffset. On MIPS this workaround is built into sdc1 so
2606  // there's no point in generating even more instructions.
2608  __ Ret(USE_DELAY_SLOT);
2609  __ mov(v0, result);
2610  } else {
2611  // Call the C function to handle the double operation.
2613  op_,
2614  result,
2615  scratch1);
2616  if (FLAG_debug_code) {
2617  __ stop("Unreachable code.");
2618  }
2619  }
2620  break;
2621  }
2622  case Token::BIT_OR:
2623  case Token::BIT_XOR:
2624  case Token::BIT_AND:
2625  case Token::SAR:
2626  case Token::SHR:
2627  case Token::SHL: {
2628  if (smi_operands) {
2629  __ SmiUntag(a3, left);
2630  __ SmiUntag(a2, right);
2631  } else {
2632  // Convert operands to 32-bit integers. Right in a2 and left in a3.
2634  left,
2635  a3,
2636  heap_number_map,
2637  scratch1,
2638  scratch2,
2639  scratch3,
2640  f0,
2641  not_numbers);
2643  right,
2644  a2,
2645  heap_number_map,
2646  scratch1,
2647  scratch2,
2648  scratch3,
2649  f0,
2650  not_numbers);
2651  }
2652  Label result_not_a_smi;
2653  switch (op_) {
2654  case Token::BIT_OR:
2655  __ Or(a2, a3, Operand(a2));
2656  break;
2657  case Token::BIT_XOR:
2658  __ Xor(a2, a3, Operand(a2));
2659  break;
2660  case Token::BIT_AND:
2661  __ And(a2, a3, Operand(a2));
2662  break;
2663  case Token::SAR:
2664  // Use only the 5 least significant bits of the shift count.
2665  __ GetLeastBitsFromInt32(a2, a2, 5);
2666  __ srav(a2, a3, a2);
2667  break;
2668  case Token::SHR:
2669  // Use only the 5 least significant bits of the shift count.
2670  __ GetLeastBitsFromInt32(a2, a2, 5);
2671  __ srlv(a2, a3, a2);
2672  // SHR is special because it is required to produce a positive answer.
2673  // The code below for writing into heap numbers isn't capable of
2674  // writing the register as an unsigned int so we go to slow case if we
2675  // hit this case.
2677  __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2678  } else {
2679  __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2680  }
2681  break;
2682  case Token::SHL:
2683  // Use only the 5 least significant bits of the shift count.
2684  __ GetLeastBitsFromInt32(a2, a2, 5);
2685  __ sllv(a2, a3, a2);
2686  break;
2687  default:
2688  UNREACHABLE();
2689  }
2690  // Check that the *signed* result fits in a smi.
2691  __ Addu(a3, a2, Operand(0x40000000));
2692  __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2693  __ SmiTag(v0, a2);
2694  __ Ret();
2695 
2696  // Allocate new heap number for result.
2697  __ bind(&result_not_a_smi);
2698  Register result = t1;
2699  if (smi_operands) {
2700  __ AllocateHeapNumber(
2701  result, scratch1, scratch2, heap_number_map, gc_required);
2702  } else {
2703  GenerateHeapResultAllocation(
2704  masm, result, heap_number_map, scratch1, scratch2, gc_required);
2705  }
2706 
2707  // a2: Answer as signed int32.
2708  // t1: Heap number to write answer into.
2709 
2710  // Nothing can go wrong now, so move the heap number to v0, which is the
2711  // result.
2712  __ mov(v0, t1);
2713 
2715  // Convert the int32 in a2 to the heap number in a0. As
2716  // mentioned above SHR needs to always produce a positive result.
2717  CpuFeatures::Scope scope(FPU);
2718  __ mtc1(a2, f0);
2719  if (op_ == Token::SHR) {
2720  __ Cvt_d_uw(f0, f0, f22);
2721  } else {
2722  __ cvt_d_w(f0, f0);
2723  }
2724  // ARM uses a workaround here because of the unaligned HeapNumber
2725  // kValueOffset. On MIPS this workaround is built into sdc1 so
2726  // there's no point in generating even more instructions.
2728  __ Ret();
2729  } else {
2730  // Tail call that writes the int32 in a2 to the heap number in v0, using
2731  // a3 and a0 as scratch. v0 is preserved and returned.
2732  WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2733  __ TailCallStub(&stub);
2734  }
2735  break;
2736  }
2737  default:
2738  UNREACHABLE();
2739  }
2740 }
2741 
2742 
2743 // Generate the smi code. If the operation on smis are successful this return is
2744 // generated. If the result is not a smi and heap number allocation is not
2745 // requested the code falls through. If number allocation is requested but a
2746 // heap number cannot be allocated the code jumps to the lable gc_required.
2747 void BinaryOpStub::GenerateSmiCode(
2748  MacroAssembler* masm,
2749  Label* use_runtime,
2750  Label* gc_required,
2751  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2752  Label not_smis;
2753 
2754  Register left = a1;
2755  Register right = a0;
2756  Register scratch1 = t3;
2757 
2758  // Perform combined smi check on both operands.
2759  __ Or(scratch1, left, Operand(right));
2760  STATIC_ASSERT(kSmiTag == 0);
2761  __ JumpIfNotSmi(scratch1, &not_smis);
2762 
2763  // If the smi-smi operation results in a smi return is generated.
2764  GenerateSmiSmiOperation(masm);
2765 
2766  // If heap number results are possible generate the result in an allocated
2767  // heap number.
2768  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2769  GenerateFPOperation(masm, true, use_runtime, gc_required);
2770  }
2771  __ bind(&not_smis);
2772 }
2773 
2774 
2775 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2776  Label not_smis, call_runtime;
2777 
2778  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2779  result_type_ == BinaryOpIC::SMI) {
2780  // Only allow smi results.
2781  GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2782  } else {
2783  // Allow heap number result and don't make a transition if a heap number
2784  // cannot be allocated.
2785  GenerateSmiCode(masm,
2786  &call_runtime,
2787  &call_runtime,
2788  ALLOW_HEAPNUMBER_RESULTS);
2789  }
2790 
2791  // Code falls through if the result is not returned as either a smi or heap
2792  // number.
2793  GenerateTypeTransition(masm);
2794 
2795  __ bind(&call_runtime);
2796  GenerateCallRuntime(masm);
2797 }
2798 
2799 
2800 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2801  ASSERT(operands_type_ == BinaryOpIC::STRING);
2802  // Try to add arguments as strings, otherwise, transition to the generic
2803  // BinaryOpIC type.
2804  GenerateAddStrings(masm);
2805  GenerateTypeTransition(masm);
2806 }
2807 
2808 
2809 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2810  Label call_runtime;
2811  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2812  ASSERT(op_ == Token::ADD);
2813  // If both arguments are strings, call the string add stub.
2814  // Otherwise, do a transition.
2815 
2816  // Registers containing left and right operands respectively.
2817  Register left = a1;
2818  Register right = a0;
2819 
2820  // Test if left operand is a string.
2821  __ JumpIfSmi(left, &call_runtime);
2822  __ GetObjectType(left, a2, a2);
2823  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2824 
2825  // Test if right operand is a string.
2826  __ JumpIfSmi(right, &call_runtime);
2827  __ GetObjectType(right, a2, a2);
2828  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2829 
2830  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2831  GenerateRegisterArgsPush(masm);
2832  __ TailCallStub(&string_add_stub);
2833 
2834  __ bind(&call_runtime);
2835  GenerateTypeTransition(masm);
2836 }
2837 
2838 
2839 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2840  ASSERT(operands_type_ == BinaryOpIC::INT32);
2841 
2842  Register left = a1;
2843  Register right = a0;
2844  Register scratch1 = t3;
2845  Register scratch2 = t5;
2846  FPURegister double_scratch = f0;
2847  FPURegister single_scratch = f6;
2848 
2849  Register heap_number_result = no_reg;
2850  Register heap_number_map = t2;
2851  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2852 
2853  Label call_runtime;
2854  // Labels for type transition, used for wrong input or output types.
2855  // Both label are currently actually bound to the same position. We use two
2856  // different label to differentiate the cause leading to type transition.
2857  Label transition;
2858 
2859  // Smi-smi fast case.
2860  Label skip;
2861  __ Or(scratch1, left, right);
2862  __ JumpIfNotSmi(scratch1, &skip);
2863  GenerateSmiSmiOperation(masm);
2864  // Fall through if the result is not a smi.
2865  __ bind(&skip);
2866 
2867  switch (op_) {
2868  case Token::ADD:
2869  case Token::SUB:
2870  case Token::MUL:
2871  case Token::DIV:
2872  case Token::MOD: {
2873  // Load both operands and check that they are 32-bit integer.
2874  // Jump to type transition if they are not. The registers a0 and a1 (right
2875  // and left) are preserved for the runtime call.
2876  FloatingPointHelper::Destination destination =
2877  (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2880 
2882  right,
2883  destination,
2884  f14,
2885  a2,
2886  a3,
2887  heap_number_map,
2888  scratch1,
2889  scratch2,
2890  f2,
2891  &transition);
2893  left,
2894  destination,
2895  f12,
2896  t0,
2897  t1,
2898  heap_number_map,
2899  scratch1,
2900  scratch2,
2901  f2,
2902  &transition);
2903 
2904  if (destination == FloatingPointHelper::kFPURegisters) {
2905  CpuFeatures::Scope scope(FPU);
2906  Label return_heap_number;
2907  switch (op_) {
2908  case Token::ADD:
2909  __ add_d(f10, f12, f14);
2910  break;
2911  case Token::SUB:
2912  __ sub_d(f10, f12, f14);
2913  break;
2914  case Token::MUL:
2915  __ mul_d(f10, f12, f14);
2916  break;
2917  case Token::DIV:
2918  __ div_d(f10, f12, f14);
2919  break;
2920  default:
2921  UNREACHABLE();
2922  }
2923 
2924  if (op_ != Token::DIV) {
2925  // These operations produce an integer result.
2926  // Try to return a smi if we can.
2927  // Otherwise return a heap number if allowed, or jump to type
2928  // transition.
2929 
2930  Register except_flag = scratch2;
2931  __ EmitFPUTruncate(kRoundToZero,
2932  single_scratch,
2933  f10,
2934  scratch1,
2935  except_flag);
2936 
2937  if (result_type_ <= BinaryOpIC::INT32) {
2938  // If except_flag != 0, result does not fit in a 32-bit integer.
2939  __ Branch(&transition, ne, except_flag, Operand(zero_reg));
2940  }
2941 
2942  // Check if the result fits in a smi.
2943  __ mfc1(scratch1, single_scratch);
2944  __ Addu(scratch2, scratch1, Operand(0x40000000));
2945  // If not try to return a heap number.
2946  __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2947  // Check for minus zero. Return heap number for minus zero.
2948  Label not_zero;
2949  __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2950  __ mfc1(scratch2, f11);
2951  __ And(scratch2, scratch2, HeapNumber::kSignMask);
2952  __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2953  __ bind(&not_zero);
2954 
2955  // Tag the result and return.
2956  __ SmiTag(v0, scratch1);
2957  __ Ret();
2958  } else {
2959  // DIV just falls through to allocating a heap number.
2960  }
2961 
2962  __ bind(&return_heap_number);
2963  // Return a heap number, or fall through to type transition or runtime
2964  // call if we can't.
2965  if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2966  : BinaryOpIC::INT32)) {
2967  // We are using FPU registers so s0 is available.
2968  heap_number_result = s0;
2969  GenerateHeapResultAllocation(masm,
2970  heap_number_result,
2971  heap_number_map,
2972  scratch1,
2973  scratch2,
2974  &call_runtime);
2975  __ mov(v0, heap_number_result);
2977  __ Ret();
2978  }
2979 
2980  // A DIV operation expecting an integer result falls through
2981  // to type transition.
2982 
2983  } else {
2984  // We preserved a0 and a1 to be able to call runtime.
2985  // Save the left value on the stack.
2986  __ Push(t1, t0);
2987 
2988  Label pop_and_call_runtime;
2989 
2990  // Allocate a heap number to store the result.
2991  heap_number_result = s0;
2992  GenerateHeapResultAllocation(masm,
2993  heap_number_result,
2994  heap_number_map,
2995  scratch1,
2996  scratch2,
2997  &pop_and_call_runtime);
2998 
2999  // Load the left value from the value saved on the stack.
3000  __ Pop(a1, a0);
3001 
3002  // Call the C function to handle the double operation.
3004  masm, op_, heap_number_result, scratch1);
3005  if (FLAG_debug_code) {
3006  __ stop("Unreachable code.");
3007  }
3008 
3009  __ bind(&pop_and_call_runtime);
3010  __ Drop(2);
3011  __ Branch(&call_runtime);
3012  }
3013 
3014  break;
3015  }
3016 
3017  case Token::BIT_OR:
3018  case Token::BIT_XOR:
3019  case Token::BIT_AND:
3020  case Token::SAR:
3021  case Token::SHR:
3022  case Token::SHL: {
3023  Label return_heap_number;
3024  Register scratch3 = t1;
3025  // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3026  // registers a0 and a1 (right and left) are preserved for the runtime
3027  // call.
3029  left,
3030  a3,
3031  heap_number_map,
3032  scratch1,
3033  scratch2,
3034  scratch3,
3035  f0,
3036  &transition);
3038  right,
3039  a2,
3040  heap_number_map,
3041  scratch1,
3042  scratch2,
3043  scratch3,
3044  f0,
3045  &transition);
3046 
3047  // The ECMA-262 standard specifies that, for shift operations, only the
3048  // 5 least significant bits of the shift value should be used.
3049  switch (op_) {
3050  case Token::BIT_OR:
3051  __ Or(a2, a3, Operand(a2));
3052  break;
3053  case Token::BIT_XOR:
3054  __ Xor(a2, a3, Operand(a2));
3055  break;
3056  case Token::BIT_AND:
3057  __ And(a2, a3, Operand(a2));
3058  break;
3059  case Token::SAR:
3060  __ And(a2, a2, Operand(0x1f));
3061  __ srav(a2, a3, a2);
3062  break;
3063  case Token::SHR:
3064  __ And(a2, a2, Operand(0x1f));
3065  __ srlv(a2, a3, a2);
3066  // SHR is special because it is required to produce a positive answer.
3067  // We only get a negative result if the shift value (a2) is 0.
3068  // This result cannot be respresented as a signed 32-bit integer, try
3069  // to return a heap number if we can.
3070  // The non FPU code does not support this special case, so jump to
3071  // runtime if we don't support it.
3073  __ Branch((result_type_ <= BinaryOpIC::INT32)
3074  ? &transition
3075  : &return_heap_number,
3076  lt,
3077  a2,
3078  Operand(zero_reg));
3079  } else {
3080  __ Branch((result_type_ <= BinaryOpIC::INT32)
3081  ? &transition
3082  : &call_runtime,
3083  lt,
3084  a2,
3085  Operand(zero_reg));
3086  }
3087  break;
3088  case Token::SHL:
3089  __ And(a2, a2, Operand(0x1f));
3090  __ sllv(a2, a3, a2);
3091  break;
3092  default:
3093  UNREACHABLE();
3094  }
3095 
3096  // Check if the result fits in a smi.
3097  __ Addu(scratch1, a2, Operand(0x40000000));
3098  // If not try to return a heap number. (We know the result is an int32.)
3099  __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3100  // Tag the result and return.
3101  __ SmiTag(v0, a2);
3102  __ Ret();
3103 
3104  __ bind(&return_heap_number);
3105  heap_number_result = t1;
3106  GenerateHeapResultAllocation(masm,
3107  heap_number_result,
3108  heap_number_map,
3109  scratch1,
3110  scratch2,
3111  &call_runtime);
3112 
3114  CpuFeatures::Scope scope(FPU);
3115 
3116  if (op_ != Token::SHR) {
3117  // Convert the result to a floating point value.
3118  __ mtc1(a2, double_scratch);
3119  __ cvt_d_w(double_scratch, double_scratch);
3120  } else {
3121  // The result must be interpreted as an unsigned 32-bit integer.
3122  __ mtc1(a2, double_scratch);
3123  __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3124  }
3125 
3126  // Store the result.
3127  __ mov(v0, heap_number_result);
3128  __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3129  __ Ret();
3130  } else {
3131  // Tail call that writes the int32 in a2 to the heap number in v0, using
3132  // a3 and a0 as scratch. v0 is preserved and returned.
3133  __ mov(v0, t1);
3134  WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3135  __ TailCallStub(&stub);
3136  }
3137 
3138  break;
3139  }
3140 
3141  default:
3142  UNREACHABLE();
3143  }
3144 
3145  // We never expect DIV to yield an integer result, so we always generate
3146  // type transition code for DIV operations expecting an integer result: the
3147  // code will fall through to this type transition.
3148  if (transition.is_linked() ||
3149  ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3150  __ bind(&transition);
3151  GenerateTypeTransition(masm);
3152  }
3153 
3154  __ bind(&call_runtime);
3155  GenerateCallRuntime(masm);
3156 }
3157 
3158 
3159 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3160  Label call_runtime;
3161 
3162  if (op_ == Token::ADD) {
3163  // Handle string addition here, because it is the only operation
3164  // that does not do a ToNumber conversion on the operands.
3165  GenerateAddStrings(masm);
3166  }
3167 
3168  // Convert oddball arguments to numbers.
3169  Label check, done;
3170  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3171  __ Branch(&check, ne, a1, Operand(t0));
3172  if (Token::IsBitOp(op_)) {
3173  __ li(a1, Operand(Smi::FromInt(0)));
3174  } else {
3175  __ LoadRoot(a1, Heap::kNanValueRootIndex);
3176  }
3177  __ jmp(&done);
3178  __ bind(&check);
3179  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3180  __ Branch(&done, ne, a0, Operand(t0));
3181  if (Token::IsBitOp(op_)) {
3182  __ li(a0, Operand(Smi::FromInt(0)));
3183  } else {
3184  __ LoadRoot(a0, Heap::kNanValueRootIndex);
3185  }
3186  __ bind(&done);
3187 
3188  GenerateHeapNumberStub(masm);
3189 }
3190 
3191 
3192 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3193  Label call_runtime;
3194  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3195 
3196  __ bind(&call_runtime);
3197  GenerateCallRuntime(masm);
3198 }
3199 
3200 
3201 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3202  Label call_runtime, call_string_add_or_runtime;
3203 
3204  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3205 
3206  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3207 
3208  __ bind(&call_string_add_or_runtime);
3209  if (op_ == Token::ADD) {
3210  GenerateAddStrings(masm);
3211  }
3212 
3213  __ bind(&call_runtime);
3214  GenerateCallRuntime(masm);
3215 }
3216 
3217 
3218 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3219  ASSERT(op_ == Token::ADD);
3220  Label left_not_string, call_runtime;
3221 
3222  Register left = a1;
3223  Register right = a0;
3224 
3225  // Check if left argument is a string.
3226  __ JumpIfSmi(left, &left_not_string);
3227  __ GetObjectType(left, a2, a2);
3228  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3229 
3230  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3231  GenerateRegisterArgsPush(masm);
3232  __ TailCallStub(&string_add_left_stub);
3233 
3234  // Left operand is not a string, test right.
3235  __ bind(&left_not_string);
3236  __ JumpIfSmi(right, &call_runtime);
3237  __ GetObjectType(right, a2, a2);
3238  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3239 
3240  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3241  GenerateRegisterArgsPush(masm);
3242  __ TailCallStub(&string_add_right_stub);
3243 
3244  // At least one argument is not a string.
3245  __ bind(&call_runtime);
3246 }
3247 
3248 
3249 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3250  GenerateRegisterArgsPush(masm);
3251  switch (op_) {
3252  case Token::ADD:
3253  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3254  break;
3255  case Token::SUB:
3256  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3257  break;
3258  case Token::MUL:
3259  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3260  break;
3261  case Token::DIV:
3262  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3263  break;
3264  case Token::MOD:
3265  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3266  break;
3267  case Token::BIT_OR:
3268  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3269  break;
3270  case Token::BIT_AND:
3271  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3272  break;
3273  case Token::BIT_XOR:
3274  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3275  break;
3276  case Token::SAR:
3277  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3278  break;
3279  case Token::SHR:
3280  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3281  break;
3282  case Token::SHL:
3283  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3284  break;
3285  default:
3286  UNREACHABLE();
3287  }
3288 }
3289 
3290 
3291 void BinaryOpStub::GenerateHeapResultAllocation(
3292  MacroAssembler* masm,
3293  Register result,
3294  Register heap_number_map,
3295  Register scratch1,
3296  Register scratch2,
3297  Label* gc_required) {
3298 
3299  // Code below will scratch result if allocation fails. To keep both arguments
3300  // intact for the runtime call result cannot be one of these.
3301  ASSERT(!result.is(a0) && !result.is(a1));
3302 
3303  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3304  Label skip_allocation, allocated;
3305  Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3306  // If the overwritable operand is already an object, we skip the
3307  // allocation of a heap number.
3308  __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3309  // Allocate a heap number for the result.
3310  __ AllocateHeapNumber(
3311  result, scratch1, scratch2, heap_number_map, gc_required);
3312  __ Branch(&allocated);
3313  __ bind(&skip_allocation);
3314  // Use object holding the overwritable operand for result.
3315  __ mov(result, overwritable_operand);
3316  __ bind(&allocated);
3317  } else {
3318  ASSERT(mode_ == NO_OVERWRITE);
3319  __ AllocateHeapNumber(
3320  result, scratch1, scratch2, heap_number_map, gc_required);
3321  }
3322 }
3323 
3324 
3325 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3326  __ Push(a1, a0);
3327 }
3328 
3329 
3330 
3331 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3332  // Untagged case: double input in f4, double result goes
3333  // into f4.
3334  // Tagged case: tagged input on top of stack and in a0,
3335  // tagged result (heap number) goes into v0.
3336 
3337  Label input_not_smi;
3338  Label loaded;
3339  Label calculate;
3340  Label invalid_cache;
3341  const Register scratch0 = t5;
3342  const Register scratch1 = t3;
3343  const Register cache_entry = a0;
3344  const bool tagged = (argument_type_ == TAGGED);
3345 
3347  CpuFeatures::Scope scope(FPU);
3348 
3349  if (tagged) {
3350  // Argument is a number and is on stack and in a0.
3351  // Load argument and check if it is a smi.
3352  __ JumpIfNotSmi(a0, &input_not_smi);
3353 
3354  // Input is a smi. Convert to double and load the low and high words
3355  // of the double into a2, a3.
3356  __ sra(t0, a0, kSmiTagSize);
3357  __ mtc1(t0, f4);
3358  __ cvt_d_w(f4, f4);
3359  __ Move(a2, a3, f4);
3360  __ Branch(&loaded);
3361 
3362  __ bind(&input_not_smi);
3363  // Check if input is a HeapNumber.
3364  __ CheckMap(a0,
3365  a1,
3366  Heap::kHeapNumberMapRootIndex,
3367  &calculate,
3369  // Input is a HeapNumber. Store the
3370  // low and high words into a2, a3.
3372  __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3373  } else {
3374  // Input is untagged double in f4. Output goes to f4.
3375  __ Move(a2, a3, f4);
3376  }
3377  __ bind(&loaded);
3378  // a2 = low 32 bits of double value.
3379  // a3 = high 32 bits of double value.
3380  // Compute hash (the shifts are arithmetic):
3381  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3382  __ Xor(a1, a2, a3);
3383  __ sra(t0, a1, 16);
3384  __ Xor(a1, a1, t0);
3385  __ sra(t0, a1, 8);
3386  __ Xor(a1, a1, t0);
3387  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3388  __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3389 
3390  // a2 = low 32 bits of double value.
3391  // a3 = high 32 bits of double value.
3392  // a1 = TranscendentalCache::hash(double value).
3393  __ li(cache_entry, Operand(
3394  ExternalReference::transcendental_cache_array_address(
3395  masm->isolate())));
3396  // a0 points to cache array.
3397  __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3398  Isolate::Current()->transcendental_cache()->caches_[0])));
3399  // a0 points to the cache for the type type_.
3400  // If NULL, the cache hasn't been initialized yet, so go through runtime.
3401  __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3402 
3403 #ifdef DEBUG
3404  // Check that the layout of cache elements match expectations.
3405  { TranscendentalCache::SubCache::Element test_elem[2];
3406  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3407  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3408  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3409  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3410  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3411  CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
3412  CHECK_EQ(0, elem_in0 - elem_start);
3413  CHECK_EQ(kIntSize, elem_in1 - elem_start);
3414  CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3415  }
3416 #endif
3417 
3418  // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3419  __ sll(t0, a1, 1);
3420  __ Addu(a1, a1, t0);
3421  __ sll(t0, a1, 2);
3422  __ Addu(cache_entry, cache_entry, t0);
3423 
3424  // Check if cache matches: Double value is stored in uint32_t[2] array.
3425  __ lw(t0, MemOperand(cache_entry, 0));
3426  __ lw(t1, MemOperand(cache_entry, 4));
3427  __ lw(t2, MemOperand(cache_entry, 8));
3428  __ Branch(&calculate, ne, a2, Operand(t0));
3429  __ Branch(&calculate, ne, a3, Operand(t1));
3430  // Cache hit. Load result, cleanup and return.
3431  Counters* counters = masm->isolate()->counters();
3432  __ IncrementCounter(
3433  counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3434  if (tagged) {
3435  // Pop input value from stack and load result into v0.
3436  __ Drop(1);
3437  __ mov(v0, t2);
3438  } else {
3439  // Load result into f4.
3441  }
3442  __ Ret();
3443  } // if (CpuFeatures::IsSupported(FPU))
3444 
3445  __ bind(&calculate);
3446  Counters* counters = masm->isolate()->counters();
3447  __ IncrementCounter(
3448  counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3449  if (tagged) {
3450  __ bind(&invalid_cache);
3451  __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3452  masm->isolate()),
3453  1,
3454  1);
3455  } else {
3457  CpuFeatures::Scope scope(FPU);
3458 
3459  Label no_update;
3460  Label skip_cache;
3461 
3462  // Call C function to calculate the result and update the cache.
3463  // Register a0 holds precalculated cache entry address; preserve
3464  // it on the stack and pop it into register cache_entry after the
3465  // call.
3466  __ Push(cache_entry, a2, a3);
3467  GenerateCallCFunction(masm, scratch0);
3468  __ GetCFunctionDoubleResult(f4);
3469 
3470  // Try to update the cache. If we cannot allocate a
3471  // heap number, we return the result without updating.
3472  __ Pop(cache_entry, a2, a3);
3473  __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3474  __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3476 
3477  __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3478  __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3479  __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3480 
3481  __ Ret(USE_DELAY_SLOT);
3482  __ mov(v0, cache_entry);
3483 
3484  __ bind(&invalid_cache);
3485  // The cache is invalid. Call runtime which will recreate the
3486  // cache.
3487  __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3488  __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3490  {
3491  FrameScope scope(masm, StackFrame::INTERNAL);
3492  __ push(a0);
3493  __ CallRuntime(RuntimeFunction(), 1);
3494  }
3496  __ Ret();
3497 
3498  __ bind(&skip_cache);
3499  // Call C function to calculate the result and answer directly
3500  // without updating the cache.
3501  GenerateCallCFunction(masm, scratch0);
3502  __ GetCFunctionDoubleResult(f4);
3503  __ bind(&no_update);
3504 
3505  // We return the value in f4 without adding it to the cache, but
3506  // we cause a scavenging GC so that future allocations will succeed.
3507  {
3508  FrameScope scope(masm, StackFrame::INTERNAL);
3509 
3510  // Allocate an aligned object larger than a HeapNumber.
3511  ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3512  __ li(scratch0, Operand(4 * kPointerSize));
3513  __ push(scratch0);
3514  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3515  }
3516  __ Ret();
3517  }
3518 }
3519 
3520 
3521 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3522  Register scratch) {
3523  __ push(ra);
3524  __ PrepareCallCFunction(2, scratch);
3525  if (IsMipsSoftFloatABI) {
3526  __ Move(a0, a1, f4);
3527  } else {
3528  __ mov_d(f12, f4);
3529  }
3530  AllowExternalCallThatCantCauseGC scope(masm);
3531  Isolate* isolate = masm->isolate();
3532  switch (type_) {
3534  __ CallCFunction(
3535  ExternalReference::math_sin_double_function(isolate),
3536  0, 1);
3537  break;
3539  __ CallCFunction(
3540  ExternalReference::math_cos_double_function(isolate),
3541  0, 1);
3542  break;
3544  __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3545  0, 1);
3546  break;
3548  __ CallCFunction(
3549  ExternalReference::math_log_double_function(isolate),
3550  0, 1);
3551  break;
3552  default:
3553  UNIMPLEMENTED();
3554  break;
3555  }
3556  __ pop(ra);
3557 }
3558 
3559 
3560 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3561  switch (type_) {
3562  // Add more cases when necessary.
3563  case TranscendentalCache::SIN: return Runtime::kMath_sin;
3564  case TranscendentalCache::COS: return Runtime::kMath_cos;
3565  case TranscendentalCache::TAN: return Runtime::kMath_tan;
3566  case TranscendentalCache::LOG: return Runtime::kMath_log;
3567  default:
3568  UNIMPLEMENTED();
3569  return Runtime::kAbort;
3570  }
3571 }
3572 
3573 
3574 void StackCheckStub::Generate(MacroAssembler* masm) {
3575  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3576 }
3577 
3578 
3579 void InterruptStub::Generate(MacroAssembler* masm) {
3580  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3581 }
3582 
3583 
3584 void MathPowStub::Generate(MacroAssembler* masm) {
3585  CpuFeatures::Scope fpu_scope(FPU);
3586  const Register base = a1;
3587  const Register exponent = a2;
3588  const Register heapnumbermap = t1;
3589  const Register heapnumber = v0;
3590  const DoubleRegister double_base = f2;
3591  const DoubleRegister double_exponent = f4;
3592  const DoubleRegister double_result = f0;
3593  const DoubleRegister double_scratch = f6;
3594  const FPURegister single_scratch = f8;
3595  const Register scratch = t5;
3596  const Register scratch2 = t3;
3597 
3598  Label call_runtime, done, int_exponent;
3599  if (exponent_type_ == ON_STACK) {
3600  Label base_is_smi, unpack_exponent;
3601  // The exponent and base are supplied as arguments on the stack.
3602  // This can only happen if the stub is called from non-optimized code.
3603  // Load input parameters from stack to double registers.
3604  __ lw(base, MemOperand(sp, 1 * kPointerSize));
3605  __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3606 
3607  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3608 
3609  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3610  __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3611  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3612 
3613  __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3614  __ jmp(&unpack_exponent);
3615 
3616  __ bind(&base_is_smi);
3617  __ mtc1(scratch, single_scratch);
3618  __ cvt_d_w(double_base, single_scratch);
3619  __ bind(&unpack_exponent);
3620 
3621  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3622 
3623  __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3624  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3625  __ ldc1(double_exponent,
3627  } else if (exponent_type_ == TAGGED) {
3628  // Base is already in double_base.
3629  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3630 
3631  __ ldc1(double_exponent,
3633  }
3634 
3635  if (exponent_type_ != INTEGER) {
3636  Label int_exponent_convert;
3637  // Detect integer exponents stored as double.
3638  __ EmitFPUTruncate(kRoundToMinusInf,
3639  single_scratch,
3640  double_exponent,
3641  scratch,
3642  scratch2,
3644  // scratch2 == 0 means there was no conversion error.
3645  __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
3646 
3647  if (exponent_type_ == ON_STACK) {
3648  // Detect square root case. Crankshaft detects constant +/-0.5 at
3649  // compile time and uses DoMathPowHalf instead. We then skip this check
3650  // for non-constant cases of +/-0.5 as these hardly occur.
3651  Label not_plus_half;
3652 
3653  // Test for 0.5.
3654  __ Move(double_scratch, 0.5);
3655  __ BranchF(USE_DELAY_SLOT,
3656  &not_plus_half,
3657  NULL,
3658  ne,
3659  double_exponent,
3660  double_scratch);
3661  // double_scratch can be overwritten in the delay slot.
3662  // Calculates square root of base. Check for the special case of
3663  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3664  __ Move(double_scratch, -V8_INFINITY);
3665  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3666  __ neg_d(double_result, double_scratch);
3667 
3668  // Add +0 to convert -0 to +0.
3669  __ add_d(double_scratch, double_base, kDoubleRegZero);
3670  __ sqrt_d(double_result, double_scratch);
3671  __ jmp(&done);
3672 
3673  __ bind(&not_plus_half);
3674  __ Move(double_scratch, -0.5);
3675  __ BranchF(USE_DELAY_SLOT,
3676  &call_runtime,
3677  NULL,
3678  ne,
3679  double_exponent,
3680  double_scratch);
3681  // double_scratch can be overwritten in the delay slot.
3682  // Calculates square root of base. Check for the special case of
3683  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3684  __ Move(double_scratch, -V8_INFINITY);
3685  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3686  __ Move(double_result, kDoubleRegZero);
3687 
3688  // Add +0 to convert -0 to +0.
3689  __ add_d(double_scratch, double_base, kDoubleRegZero);
3690  __ Move(double_result, 1);
3691  __ sqrt_d(double_scratch, double_scratch);
3692  __ div_d(double_result, double_result, double_scratch);
3693  __ jmp(&done);
3694  }
3695 
3696  __ push(ra);
3697  {
3698  AllowExternalCallThatCantCauseGC scope(masm);
3699  __ PrepareCallCFunction(0, 2, scratch);
3700  __ SetCallCDoubleArguments(double_base, double_exponent);
3701  __ CallCFunction(
3702  ExternalReference::power_double_double_function(masm->isolate()),
3703  0, 2);
3704  }
3705  __ pop(ra);
3706  __ GetCFunctionDoubleResult(double_result);
3707  __ jmp(&done);
3708 
3709  __ bind(&int_exponent_convert);
3710  __ mfc1(scratch, single_scratch);
3711  }
3712 
3713  // Calculate power with integer exponent.
3714  __ bind(&int_exponent);
3715 
3716  // Get two copies of exponent in the registers scratch and exponent.
3717  if (exponent_type_ == INTEGER) {
3718  __ mov(scratch, exponent);
3719  } else {
3720  // Exponent has previously been stored into scratch as untagged integer.
3721  __ mov(exponent, scratch);
3722  }
3723 
3724  __ mov_d(double_scratch, double_base); // Back up base.
3725  __ Move(double_result, 1.0);
3726 
3727  // Get absolute value of exponent.
3728  Label positive_exponent;
3729  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
3730  __ Subu(scratch, zero_reg, scratch);
3731  __ bind(&positive_exponent);
3732 
3733  Label while_true, no_carry, loop_end;
3734  __ bind(&while_true);
3735 
3736  __ And(scratch2, scratch, 1);
3737 
3738  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
3739  __ mul_d(double_result, double_result, double_scratch);
3740  __ bind(&no_carry);
3741 
3742  __ sra(scratch, scratch, 1);
3743 
3744  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
3745  __ mul_d(double_scratch, double_scratch, double_scratch);
3746 
3747  __ Branch(&while_true);
3748 
3749  __ bind(&loop_end);
3750 
3751  __ Branch(&done, ge, exponent, Operand(zero_reg));
3752  __ Move(double_scratch, 1.0);
3753  __ div_d(double_result, double_scratch, double_result);
3754  // Test whether result is zero. Bail out to check for subnormal result.
3755  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3756  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
3757 
3758  // double_exponent may not contain the exponent value if the input was a
3759  // smi. We set it with exponent value before bailing out.
3760  __ mtc1(exponent, single_scratch);
3761  __ cvt_d_w(double_exponent, single_scratch);
3762 
3763  // Returning or bailing out.
3764  Counters* counters = masm->isolate()->counters();
3765  if (exponent_type_ == ON_STACK) {
3766  // The arguments are still on the stack.
3767  __ bind(&call_runtime);
3768  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3769 
3770  // The stub is called from non-optimized code, which expects the result
3771  // as heap number in exponent.
3772  __ bind(&done);
3773  __ AllocateHeapNumber(
3774  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3775  __ sdc1(double_result,
3777  ASSERT(heapnumber.is(v0));
3778  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3779  __ DropAndRet(2);
3780  } else {
3781  __ push(ra);
3782  {
3783  AllowExternalCallThatCantCauseGC scope(masm);
3784  __ PrepareCallCFunction(0, 2, scratch);
3785  __ SetCallCDoubleArguments(double_base, double_exponent);
3786  __ CallCFunction(
3787  ExternalReference::power_double_double_function(masm->isolate()),
3788  0, 2);
3789  }
3790  __ pop(ra);
3791  __ GetCFunctionDoubleResult(double_result);
3792 
3793  __ bind(&done);
3794  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3795  __ Ret();
3796  }
3797 }
3798 
3799 
3800 bool CEntryStub::NeedsImmovableCode() {
3801  return true;
3802 }
3803 
3804 
3806  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3807  result_size_ == 1;
3808 }
3809 
3810 
3811 void CodeStub::GenerateStubsAheadOfTime() {
3816 }
3817 
3818 
3819 void CodeStub::GenerateFPStubs() {
3820  CEntryStub save_doubles(1, kSaveFPRegs);
3821  Handle<Code> code = save_doubles.GetCode();
3822  code->set_is_pregenerated(true);
3823  StoreBufferOverflowStub stub(kSaveFPRegs);
3824  stub.GetCode()->set_is_pregenerated(true);
3825  code->GetIsolate()->set_fp_stubs_generated(true);
3826 }
3827 
3828 
3830  CEntryStub stub(1, kDontSaveFPRegs);
3831  Handle<Code> code = stub.GetCode();
3832  code->set_is_pregenerated(true);
3833 }
3834 
3835 
3836 void CEntryStub::GenerateCore(MacroAssembler* masm,
3837  Label* throw_normal_exception,
3838  Label* throw_termination_exception,
3839  Label* throw_out_of_memory_exception,
3840  bool do_gc,
3841  bool always_allocate) {
3842  // v0: result parameter for PerformGC, if any
3843  // s0: number of arguments including receiver (C callee-saved)
3844  // s1: pointer to the first argument (C callee-saved)
3845  // s2: pointer to builtin function (C callee-saved)
3846 
3847  Isolate* isolate = masm->isolate();
3848 
3849  if (do_gc) {
3850  // Move result passed in v0 into a0 to call PerformGC.
3851  __ mov(a0, v0);
3852  __ PrepareCallCFunction(1, 0, a1);
3853  __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3854  }
3855 
3856  ExternalReference scope_depth =
3857  ExternalReference::heap_always_allocate_scope_depth(isolate);
3858  if (always_allocate) {
3859  __ li(a0, Operand(scope_depth));
3860  __ lw(a1, MemOperand(a0));
3861  __ Addu(a1, a1, Operand(1));
3862  __ sw(a1, MemOperand(a0));
3863  }
3864 
3865  // Prepare arguments for C routine.
3866  // a0 = argc
3867  __ mov(a0, s0);
3868  // a1 = argv (set in the delay slot after find_ra below).
3869 
3870  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3871  // also need to reserve the 4 argument slots on the stack.
3872 
3873  __ AssertStackIsAligned();
3874 
3875  __ li(a2, Operand(ExternalReference::isolate_address()));
3876 
3877  // To let the GC traverse the return address of the exit frames, we need to
3878  // know where the return address is. The CEntryStub is unmovable, so
3879  // we can store the address on the stack to be able to find it again and
3880  // we never have to restore it, because it will not change.
3881  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3882  // This branch-and-link sequence is needed to find the current PC on mips,
3883  // saved to the ra register.
3884  // Use masm-> here instead of the double-underscore macro since extra
3885  // coverage code can interfere with the proper calculation of ra.
3886  Label find_ra;
3887  masm->bal(&find_ra); // bal exposes branch delay slot.
3888  masm->mov(a1, s1);
3889  masm->bind(&find_ra);
3890 
3891  // Adjust the value in ra to point to the correct return location, 2nd
3892  // instruction past the real call into C code (the jalr(t9)), and push it.
3893  // This is the return address of the exit frame.
3894  const int kNumInstructionsToJump = 5;
3895  masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3896  masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
3897  // Stack space reservation moved to the branch delay slot below.
3898  // Stack is still aligned.
3899 
3900  // Call the C routine.
3901  masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
3902  masm->jalr(t9);
3903  // Set up sp in the delay slot.
3904  masm->addiu(sp, sp, -kCArgsSlotsSize);
3905  // Make sure the stored 'ra' points to this position.
3906  ASSERT_EQ(kNumInstructionsToJump,
3907  masm->InstructionsGeneratedSince(&find_ra));
3908  }
3909 
3910  if (always_allocate) {
3911  // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3912  __ li(a2, Operand(scope_depth));
3913  __ lw(a3, MemOperand(a2));
3914  __ Subu(a3, a3, Operand(1));
3915  __ sw(a3, MemOperand(a2));
3916  }
3917 
3918  // Check for failure result.
3919  Label failure_returned;
3920  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3921  __ addiu(a2, v0, 1);
3922  __ andi(t0, a2, kFailureTagMask);
3923  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
3924  // Restore stack (remove arg slots) in branch delay slot.
3925  __ addiu(sp, sp, kCArgsSlotsSize);
3926 
3927 
3928  // Exit C frame and return.
3929  // v0:v1: result
3930  // sp: stack pointer
3931  // fp: frame pointer
3932  __ LeaveExitFrame(save_doubles_, s0, true);
3933 
3934  // Check if we should retry or throw exception.
3935  Label retry;
3936  __ bind(&failure_returned);
3938  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3939  __ Branch(&retry, eq, t0, Operand(zero_reg));
3940 
3941  // Special handling of out of memory exceptions.
3942  Failure* out_of_memory = Failure::OutOfMemoryException();
3943  __ Branch(USE_DELAY_SLOT,
3944  throw_out_of_memory_exception,
3945  eq,
3946  v0,
3947  Operand(reinterpret_cast<int32_t>(out_of_memory)));
3948  // If we throw the OOM exception, the value of a3 doesn't matter.
3949  // Any instruction can be in the delay slot that's not a jump.
3950 
3951  // Retrieve the pending exception and clear the variable.
3952  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
3953  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3954  isolate)));
3955  __ lw(v0, MemOperand(t0));
3956  __ sw(a3, MemOperand(t0));
3957 
3958  // Special handling of termination exceptions which are uncatchable
3959  // by javascript code.
3960  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
3961  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
3962 
3963  // Handle normal exception.
3964  __ jmp(throw_normal_exception);
3965 
3966  __ bind(&retry);
3967  // Last failure (v0) will be moved to (a0) for parameter when retrying.
3968 }
3969 
3970 
3971 void CEntryStub::Generate(MacroAssembler* masm) {
3972  // Called from JavaScript; parameters are on stack as if calling JS function
3973  // s0: number of arguments including receiver
3974  // s1: size of arguments excluding receiver
3975  // s2: pointer to builtin function
3976  // fp: frame pointer (restored after C call)
3977  // sp: stack pointer (restored as callee's sp after C call)
3978  // cp: current context (C callee-saved)
3979 
3980  // NOTE: Invocations of builtins may return failure objects
3981  // instead of a proper result. The builtin entry handles
3982  // this by performing a garbage collection and retrying the
3983  // builtin once.
3984 
3985  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
3986  // The reason for this is that these arguments would need to be saved anyway
3987  // so it's faster to set them up directly.
3988  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
3989 
3990  // Compute the argv pointer in a callee-saved register.
3991  __ Addu(s1, sp, s1);
3992 
3993  // Enter the exit frame that transitions from JavaScript to C++.
3994  FrameScope scope(masm, StackFrame::MANUAL);
3995  __ EnterExitFrame(save_doubles_);
3996 
3997  // s0: number of arguments (C callee-saved)
3998  // s1: pointer to first argument (C callee-saved)
3999  // s2: pointer to builtin function (C callee-saved)
4000 
4001  Label throw_normal_exception;
4002  Label throw_termination_exception;
4003  Label throw_out_of_memory_exception;
4004 
4005  // Call into the runtime system.
4006  GenerateCore(masm,
4007  &throw_normal_exception,
4008  &throw_termination_exception,
4009  &throw_out_of_memory_exception,
4010  false,
4011  false);
4012 
4013  // Do space-specific GC and retry runtime call.
4014  GenerateCore(masm,
4015  &throw_normal_exception,
4016  &throw_termination_exception,
4017  &throw_out_of_memory_exception,
4018  true,
4019  false);
4020 
4021  // Do full GC and retry runtime call one final time.
4022  Failure* failure = Failure::InternalError();
4023  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4024  GenerateCore(masm,
4025  &throw_normal_exception,
4026  &throw_termination_exception,
4027  &throw_out_of_memory_exception,
4028  true,
4029  true);
4030 
4031  __ bind(&throw_out_of_memory_exception);
4032  // Set external caught exception to false.
4033  Isolate* isolate = masm->isolate();
4034  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4035  isolate);
4036  __ li(a0, Operand(false, RelocInfo::NONE));
4037  __ li(a2, Operand(external_caught));
4038  __ sw(a0, MemOperand(a2));
4039 
4040  // Set pending exception and v0 to out of memory exception.
4041  Failure* out_of_memory = Failure::OutOfMemoryException();
4042  __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4043  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4044  isolate)));
4045  __ sw(v0, MemOperand(a2));
4046  // Fall through to the next label.
4047 
4048  __ bind(&throw_termination_exception);
4049  __ ThrowUncatchable(v0);
4050 
4051  __ bind(&throw_normal_exception);
4052  __ Throw(v0);
4053 }
4054 
4055 
4056 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4057  Label invoke, handler_entry, exit;
4058  Isolate* isolate = masm->isolate();
4059 
4060  // Registers:
4061  // a0: entry address
4062  // a1: function
4063  // a2: receiver
4064  // a3: argc
4065  //
4066  // Stack:
4067  // 4 args slots
4068  // args
4069 
4070  // Save callee saved registers on the stack.
4071  __ MultiPush(kCalleeSaved | ra.bit());
4072 
4074  CpuFeatures::Scope scope(FPU);
4075  // Save callee-saved FPU registers.
4076  __ MultiPushFPU(kCalleeSavedFPU);
4077  // Set up the reserved register for 0.0.
4078  __ Move(kDoubleRegZero, 0.0);
4079  }
4080 
4081 
4082  // Load argv in s0 register.
4083  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4085  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
4086  }
4087 
4088  __ InitializeRootRegister();
4089  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
4090 
4091  // We build an EntryFrame.
4092  __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
4093  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4094  __ li(t2, Operand(Smi::FromInt(marker)));
4095  __ li(t1, Operand(Smi::FromInt(marker)));
4096  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4097  isolate)));
4098  __ lw(t0, MemOperand(t0));
4099  __ Push(t3, t2, t1, t0);
4100  // Set up frame pointer for the frame to be pushed.
4102 
4103  // Registers:
4104  // a0: entry_address
4105  // a1: function
4106  // a2: receiver_pointer
4107  // a3: argc
4108  // s0: argv
4109  //
4110  // Stack:
4111  // caller fp |
4112  // function slot | entry frame
4113  // context slot |
4114  // bad fp (0xff...f) |
4115  // callee saved registers + ra
4116  // 4 args slots
4117  // args
4118 
4119  // If this is the outermost JS call, set js_entry_sp value.
4120  Label non_outermost_js;
4121  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4122  __ li(t1, Operand(ExternalReference(js_entry_sp)));
4123  __ lw(t2, MemOperand(t1));
4124  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4125  __ sw(fp, MemOperand(t1));
4126  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4127  Label cont;
4128  __ b(&cont);
4129  __ nop(); // Branch delay slot nop.
4130  __ bind(&non_outermost_js);
4131  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4132  __ bind(&cont);
4133  __ push(t0);
4134 
4135  // Jump to a faked try block that does the invoke, with a faked catch
4136  // block that sets the pending exception.
4137  __ jmp(&invoke);
4138  __ bind(&handler_entry);
4139  handler_offset_ = handler_entry.pos();
4140  // Caught exception: Store result (exception) in the pending exception
4141  // field in the JSEnv and return a failure sentinel. Coming in here the
4142  // fp will be invalid because the PushTryHandler below sets it to 0 to
4143  // signal the existence of the JSEntry frame.
4144  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4145  isolate)));
4146  __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
4147  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4148  __ b(&exit); // b exposes branch delay slot.
4149  __ nop(); // Branch delay slot nop.
4150 
4151  // Invoke: Link this frame into the handler chain. There's only one
4152  // handler block in this code object, so its index is 0.
4153  __ bind(&invoke);
4154  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4155  // If an exception not caught by another handler occurs, this handler
4156  // returns control to the code after the bal(&invoke) above, which
4157  // restores all kCalleeSaved registers (including cp and fp) to their
4158  // saved values before returning a failure to C.
4159 
4160  // Clear any pending exceptions.
4161  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4162  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4163  isolate)));
4164  __ sw(t1, MemOperand(t0));
4165 
4166  // Invoke the function by calling through JS entry trampoline builtin.
4167  // Notice that we cannot store a reference to the trampoline code directly in
4168  // this stub, because runtime stubs are not traversed when doing GC.
4169 
4170  // Registers:
4171  // a0: entry_address
4172  // a1: function
4173  // a2: receiver_pointer
4174  // a3: argc
4175  // s0: argv
4176  //
4177  // Stack:
4178  // handler frame
4179  // entry frame
4180  // callee saved registers + ra
4181  // 4 args slots
4182  // args
4183 
4184  if (is_construct) {
4185  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4186  isolate);
4187  __ li(t0, Operand(construct_entry));
4188  } else {
4189  ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4190  __ li(t0, Operand(entry));
4191  }
4192  __ lw(t9, MemOperand(t0)); // Deref address.
4193 
4194  // Call JSEntryTrampoline.
4195  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4196  __ Call(t9);
4197 
4198  // Unlink this frame from the handler chain.
4199  __ PopTryHandler();
4200 
4201  __ bind(&exit); // v0 holds result
4202  // Check if the current stack frame is marked as the outermost JS frame.
4203  Label non_outermost_js_2;
4204  __ pop(t1);
4205  __ Branch(&non_outermost_js_2,
4206  ne,
4207  t1,
4208  Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4209  __ li(t1, Operand(ExternalReference(js_entry_sp)));
4210  __ sw(zero_reg, MemOperand(t1));
4211  __ bind(&non_outermost_js_2);
4212 
4213  // Restore the top frame descriptors from the stack.
4214  __ pop(t1);
4215  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4216  isolate)));
4217  __ sw(t1, MemOperand(t0));
4218 
4219  // Reset the stack to the callee saved registers.
4221 
4223  CpuFeatures::Scope scope(FPU);
4224  // Restore callee-saved fpu registers.
4225  __ MultiPopFPU(kCalleeSavedFPU);
4226  }
4227 
4228  // Restore callee saved registers from the stack.
4229  __ MultiPop(kCalleeSaved | ra.bit());
4230  // Return.
4231  __ Jump(ra);
4232 }
4233 
4234 
4235 // Uses registers a0 to t0.
4236 // Expected input (depending on whether args are in registers or on the stack):
4237 // * object: a0 or at sp + 1 * kPointerSize.
4238 // * function: a1 or at sp.
4239 //
4240 // An inlined call site may have been generated before calling this stub.
4241 // In this case the offset to the inline site to patch is passed on the stack,
4242 // in the safepoint slot for register t0.
4243 void InstanceofStub::Generate(MacroAssembler* masm) {
4244  // Call site inlining and patching implies arguments in registers.
4245  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4246  // ReturnTrueFalse is only implemented for inlined call sites.
4247  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4248 
4249  // Fixed register usage throughout the stub:
4250  const Register object = a0; // Object (lhs).
4251  Register map = a3; // Map of the object.
4252  const Register function = a1; // Function (rhs).
4253  const Register prototype = t0; // Prototype of the function.
4254  const Register inline_site = t5;
4255  const Register scratch = a2;
4256 
4257  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4258 
4259  Label slow, loop, is_instance, is_not_instance, not_js_object;
4260 
4261  if (!HasArgsInRegisters()) {
4262  __ lw(object, MemOperand(sp, 1 * kPointerSize));
4263  __ lw(function, MemOperand(sp, 0));
4264  }
4265 
4266  // Check that the left hand is a JS object and load map.
4267  __ JumpIfSmi(object, &not_js_object);
4268  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4269 
4270  // If there is a call site cache don't look in the global cache, but do the
4271  // real lookup and update the call site cache.
4272  if (!HasCallSiteInlineCheck()) {
4273  Label miss;
4274  __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4275  __ Branch(&miss, ne, function, Operand(at));
4276  __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4277  __ Branch(&miss, ne, map, Operand(at));
4278  __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4279  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4280 
4281  __ bind(&miss);
4282  }
4283 
4284  // Get the prototype of the function.
4285  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4286 
4287  // Check that the function prototype is a JS object.
4288  __ JumpIfSmi(prototype, &slow);
4289  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4290 
4291  // Update the global instanceof or call site inlined cache with the current
4292  // map and function. The cached answer will be set when it is known below.
4293  if (!HasCallSiteInlineCheck()) {
4294  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4295  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4296  } else {
4297  ASSERT(HasArgsInRegisters());
4298  // Patch the (relocated) inlined map check.
4299 
4300  // The offset was stored in t0 safepoint slot.
4301  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4302  __ LoadFromSafepointRegisterSlot(scratch, t0);
4303  __ Subu(inline_site, ra, scratch);
4304  // Get the map location in scratch and patch it.
4305  __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
4307  }
4308 
4309  // Register mapping: a3 is object map and t0 is function prototype.
4310  // Get prototype of object into a2.
4311  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4312 
4313  // We don't need map any more. Use it as a scratch register.
4314  Register scratch2 = map;
4315  map = no_reg;
4316 
4317  // Loop through the prototype chain looking for the function prototype.
4318  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4319  __ bind(&loop);
4320  __ Branch(&is_instance, eq, scratch, Operand(prototype));
4321  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4322  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4323  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4324  __ Branch(&loop);
4325 
4326  __ bind(&is_instance);
4327  ASSERT(Smi::FromInt(0) == 0);
4328  if (!HasCallSiteInlineCheck()) {
4329  __ mov(v0, zero_reg);
4330  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4331  } else {
4332  // Patch the call site to return true.
4333  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4334  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4335  // Get the boolean result location in scratch and patch it.
4336  __ PatchRelocatedValue(inline_site, scratch, v0);
4337 
4338  if (!ReturnTrueFalseObject()) {
4339  ASSERT_EQ(Smi::FromInt(0), 0);
4340  __ mov(v0, zero_reg);
4341  }
4342  }
4343  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4344 
4345  __ bind(&is_not_instance);
4346  if (!HasCallSiteInlineCheck()) {
4347  __ li(v0, Operand(Smi::FromInt(1)));
4348  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4349  } else {
4350  // Patch the call site to return false.
4351  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4352  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4353  // Get the boolean result location in scratch and patch it.
4354  __ PatchRelocatedValue(inline_site, scratch, v0);
4355 
4356  if (!ReturnTrueFalseObject()) {
4357  __ li(v0, Operand(Smi::FromInt(1)));
4358  }
4359  }
4360 
4361  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4362 
4363  Label object_not_null, object_not_null_or_smi;
4364  __ bind(&not_js_object);
4365  // Before null, smi and string value checks, check that the rhs is a function
4366  // as for a non-function rhs an exception needs to be thrown.
4367  __ JumpIfSmi(function, &slow);
4368  __ GetObjectType(function, scratch2, scratch);
4369  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4370 
4371  // Null is not instance of anything.
4372  __ Branch(&object_not_null,
4373  ne,
4374  scratch,
4375  Operand(masm->isolate()->factory()->null_value()));
4376  __ li(v0, Operand(Smi::FromInt(1)));
4377  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4378 
4379  __ bind(&object_not_null);
4380  // Smi values are not instances of anything.
4381  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4382  __ li(v0, Operand(Smi::FromInt(1)));
4383  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4384 
4385  __ bind(&object_not_null_or_smi);
4386  // String values are not instances of anything.
4387  __ IsObjectJSStringType(object, scratch, &slow);
4388  __ li(v0, Operand(Smi::FromInt(1)));
4389  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4390 
4391  // Slow-case. Tail call builtin.
4392  __ bind(&slow);
4393  if (!ReturnTrueFalseObject()) {
4394  if (HasArgsInRegisters()) {
4395  __ Push(a0, a1);
4396  }
4397  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4398  } else {
4399  {
4400  FrameScope scope(masm, StackFrame::INTERNAL);
4401  __ Push(a0, a1);
4402  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4403  }
4404  __ mov(a0, v0);
4405  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4406  __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4407  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4408  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4409  }
4410 }
4411 
4412 
4413 Register InstanceofStub::left() { return a0; }
4414 
4415 
4416 Register InstanceofStub::right() { return a1; }
4417 
4418 
4419 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4420  // The displacement is the offset of the last parameter (if any)
4421  // relative to the frame pointer.
4422  const int kDisplacement =
4424 
4425  // Check that the key is a smiGenerateReadElement.
4426  Label slow;
4427  __ JumpIfNotSmi(a1, &slow);
4428 
4429  // Check if the calling frame is an arguments adaptor frame.
4430  Label adaptor;
4433  __ Branch(&adaptor,
4434  eq,
4435  a3,
4437 
4438  // Check index (a1) against formal parameters count limit passed in
4439  // through register a0. Use unsigned comparison to get negative
4440  // check for free.
4441  __ Branch(&slow, hs, a1, Operand(a0));
4442 
4443  // Read the argument from the stack and return it.
4444  __ subu(a3, a0, a1);
4445  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4446  __ Addu(a3, fp, Operand(t3));
4447  __ lw(v0, MemOperand(a3, kDisplacement));
4448  __ Ret();
4449 
4450  // Arguments adaptor case: Check index (a1) against actual arguments
4451  // limit found in the arguments adaptor frame. Use unsigned
4452  // comparison to get negative check for free.
4453  __ bind(&adaptor);
4455  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4456 
4457  // Read the argument from the adaptor frame and return it.
4458  __ subu(a3, a0, a1);
4459  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4460  __ Addu(a3, a2, Operand(t3));
4461  __ lw(v0, MemOperand(a3, kDisplacement));
4462  __ Ret();
4463 
4464  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4465  // by calling the runtime system.
4466  __ bind(&slow);
4467  __ push(a1);
4468  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4469 }
4470 
4471 
4472 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4473  // sp[0] : number of parameters
4474  // sp[4] : receiver displacement
4475  // sp[8] : function
4476  // Check if the calling frame is an arguments adaptor frame.
4477  Label runtime;
4480  __ Branch(&runtime,
4481  ne,
4482  a2,
4484 
4485  // Patch the arguments.length and the parameters pointer in the current frame.
4487  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4488  __ sll(t3, a2, 1);
4489  __ Addu(a3, a3, Operand(t3));
4491  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4492 
4493  __ bind(&runtime);
4494  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4495 }
4496 
4497 
4498 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4499  // Stack layout:
4500  // sp[0] : number of parameters (tagged)
4501  // sp[4] : address of receiver argument
4502  // sp[8] : function
4503  // Registers used over whole function:
4504  // t2 : allocated object (tagged)
4505  // t5 : mapped parameter count (tagged)
4506 
4507  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4508  // a1 = parameter count (tagged)
4509 
4510  // Check if the calling frame is an arguments adaptor frame.
4511  Label runtime;
4512  Label adaptor_frame, try_allocate;
4515  __ Branch(&adaptor_frame,
4516  eq,
4517  a2,
4519 
4520  // No adaptor, parameter count = argument count.
4521  __ mov(a2, a1);
4522  __ b(&try_allocate);
4523  __ nop(); // Branch delay slot nop.
4524 
4525  // We have an adaptor frame. Patch the parameters pointer.
4526  __ bind(&adaptor_frame);
4528  __ sll(t6, a2, 1);
4529  __ Addu(a3, a3, Operand(t6));
4530  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4531  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4532 
4533  // a1 = parameter count (tagged)
4534  // a2 = argument count (tagged)
4535  // Compute the mapped parameter count = min(a1, a2) in a1.
4536  Label skip_min;
4537  __ Branch(&skip_min, lt, a1, Operand(a2));
4538  __ mov(a1, a2);
4539  __ bind(&skip_min);
4540 
4541  __ bind(&try_allocate);
4542 
4543  // Compute the sizes of backing store, parameter map, and arguments object.
4544  // 1. Parameter map, has 2 extra words containing context and backing store.
4545  const int kParameterMapHeaderSize =
4547  // If there are no mapped parameters, we do not need the parameter_map.
4548  Label param_map_size;
4549  ASSERT_EQ(0, Smi::FromInt(0));
4550  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4551  __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
4552  __ sll(t5, a1, 1);
4553  __ addiu(t5, t5, kParameterMapHeaderSize);
4554  __ bind(&param_map_size);
4555 
4556  // 2. Backing store.
4557  __ sll(t6, a2, 1);
4558  __ Addu(t5, t5, Operand(t6));
4559  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4560 
4561  // 3. Arguments object.
4562  __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4563 
4564  // Do the allocation of all three objects in one go.
4565  __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4566 
4567  // v0 = address of new object(s) (tagged)
4568  // a2 = argument count (tagged)
4569  // Get the arguments boilerplate from the current (global) context into t0.
4570  const int kNormalOffset =
4572  const int kAliasedOffset =
4574 
4577  Label skip2_ne, skip2_eq;
4578  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4579  __ lw(t0, MemOperand(t0, kNormalOffset));
4580  __ bind(&skip2_ne);
4581 
4582  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4583  __ lw(t0, MemOperand(t0, kAliasedOffset));
4584  __ bind(&skip2_eq);
4585 
4586  // v0 = address of new object (tagged)
4587  // a1 = mapped parameter count (tagged)
4588  // a2 = argument count (tagged)
4589  // t0 = address of boilerplate object (tagged)
4590  // Copy the JS object part.
4591  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4592  __ lw(a3, FieldMemOperand(t0, i));
4593  __ sw(a3, FieldMemOperand(v0, i));
4594  }
4595 
4596  // Set up the callee in-object property.
4598  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4599  const int kCalleeOffset = JSObject::kHeaderSize +
4601  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4602 
4603  // Use the length (smi tagged) and set that as an in-object property too.
4605  const int kLengthOffset = JSObject::kHeaderSize +
4607  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4608 
4609  // Set up the elements pointer in the allocated arguments object.
4610  // If we allocated a parameter map, t0 will point there, otherwise
4611  // it will point to the backing store.
4612  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4614 
4615  // v0 = address of new object (tagged)
4616  // a1 = mapped parameter count (tagged)
4617  // a2 = argument count (tagged)
4618  // t0 = address of parameter map or backing store (tagged)
4619  // Initialize parameter map. If there are no mapped arguments, we're done.
4620  Label skip_parameter_map;
4621  Label skip3;
4622  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4623  // Move backing store address to a3, because it is
4624  // expected there when filling in the unmapped arguments.
4625  __ mov(a3, t0);
4626  __ bind(&skip3);
4627 
4628  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4629 
4630  __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4632  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4634  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4635  __ sll(t6, a1, 1);
4636  __ Addu(t2, t0, Operand(t6));
4637  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4638  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4639 
4640  // Copy the parameter slots and the holes in the arguments.
4641  // We need to fill in mapped_parameter_count slots. They index the context,
4642  // where parameters are stored in reverse order, at
4643  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4644  // The mapped parameter thus need to get indices
4645  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
4646  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4647  // We loop from right to left.
4648  Label parameters_loop, parameters_test;
4649  __ mov(t2, a1);
4650  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4651  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4652  __ Subu(t5, t5, Operand(a1));
4653  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4654  __ sll(t6, t2, 1);
4655  __ Addu(a3, t0, Operand(t6));
4656  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4657 
4658  // t2 = loop variable (tagged)
4659  // a1 = mapping index (tagged)
4660  // a3 = address of backing store (tagged)
4661  // t0 = address of parameter map (tagged)
4662  // t1 = temporary scratch (a.o., for address calculation)
4663  // t3 = the hole value
4664  __ jmp(&parameters_test);
4665 
4666  __ bind(&parameters_loop);
4667  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4668  __ sll(t1, t2, 1);
4669  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4670  __ Addu(t6, t0, t1);
4671  __ sw(t5, MemOperand(t6));
4672  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4673  __ Addu(t6, a3, t1);
4674  __ sw(t3, MemOperand(t6));
4675  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4676  __ bind(&parameters_test);
4677  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4678 
4679  __ bind(&skip_parameter_map);
4680  // a2 = argument count (tagged)
4681  // a3 = address of backing store (tagged)
4682  // t1 = scratch
4683  // Copy arguments header and remaining slots (if there are any).
4684  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4687 
4688  Label arguments_loop, arguments_test;
4689  __ mov(t5, a1);
4690  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4691  __ sll(t6, t5, 1);
4692  __ Subu(t0, t0, Operand(t6));
4693  __ jmp(&arguments_test);
4694 
4695  __ bind(&arguments_loop);
4696  __ Subu(t0, t0, Operand(kPointerSize));
4697  __ lw(t2, MemOperand(t0, 0));
4698  __ sll(t6, t5, 1);
4699  __ Addu(t1, a3, Operand(t6));
4701  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4702 
4703  __ bind(&arguments_test);
4704  __ Branch(&arguments_loop, lt, t5, Operand(a2));
4705 
4706  // Return and remove the on-stack parameters.
4707  __ DropAndRet(3);
4708 
4709  // Do the runtime call to allocate the arguments object.
4710  // a2 = argument count (tagged)
4711  __ bind(&runtime);
4712  __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
4713  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4714 }
4715 
4716 
4717 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4718  // sp[0] : number of parameters
4719  // sp[4] : receiver displacement
4720  // sp[8] : function
4721  // Check if the calling frame is an arguments adaptor frame.
4722  Label adaptor_frame, try_allocate, runtime;
4725  __ Branch(&adaptor_frame,
4726  eq,
4727  a3,
4729 
4730  // Get the length from the frame.
4731  __ lw(a1, MemOperand(sp, 0));
4732  __ Branch(&try_allocate);
4733 
4734  // Patch the arguments.length and the parameters pointer.
4735  __ bind(&adaptor_frame);
4737  __ sw(a1, MemOperand(sp, 0));
4738  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4739  __ Addu(a3, a2, Operand(at));
4740 
4741  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4742  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4743 
4744  // Try the new space allocation. Start out with computing the size
4745  // of the arguments object and the elements array in words.
4746  Label add_arguments_object;
4747  __ bind(&try_allocate);
4748  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4749  __ srl(a1, a1, kSmiTagSize);
4750 
4751  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4752  __ bind(&add_arguments_object);
4753  __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4754 
4755  // Do the allocation of both objects in one go.
4756  __ AllocateInNewSpace(a1,
4757  v0,
4758  a2,
4759  a3,
4760  &runtime,
4761  static_cast<AllocationFlags>(TAG_OBJECT |
4762  SIZE_IN_WORDS));
4763 
4764  // Get the arguments boilerplate from the current (global) context.
4767  __ lw(t0, MemOperand(t0, Context::SlotOffset(
4769 
4770  // Copy the JS object part.
4771  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4772 
4773  // Get the length (smi tagged) and set that as an in-object property too.
4775  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4776  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4777  Heap::kArgumentsLengthIndex * kPointerSize));
4778 
4779  Label done;
4780  __ Branch(&done, eq, a1, Operand(zero_reg));
4781 
4782  // Get the parameters pointer from the stack.
4783  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4784 
4785  // Set up the elements pointer in the allocated arguments object and
4786  // initialize the header in the elements fixed array.
4787  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4789  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4792  // Untag the length for the loop.
4793  __ srl(a1, a1, kSmiTagSize);
4794 
4795  // Copy the fixed array slots.
4796  Label loop;
4797  // Set up t0 to point to the first array slot.
4798  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4799  __ bind(&loop);
4800  // Pre-decrement a2 with kPointerSize on each iteration.
4801  // Pre-decrement in order to skip receiver.
4802  __ Addu(a2, a2, Operand(-kPointerSize));
4803  __ lw(a3, MemOperand(a2));
4804  // Post-increment t0 with kPointerSize on each iteration.
4805  __ sw(a3, MemOperand(t0));
4806  __ Addu(t0, t0, Operand(kPointerSize));
4807  __ Subu(a1, a1, Operand(1));
4808  __ Branch(&loop, ne, a1, Operand(zero_reg));
4809 
4810  // Return and remove the on-stack parameters.
4811  __ bind(&done);
4812  __ DropAndRet(3);
4813 
4814  // Do the runtime call to allocate the arguments object.
4815  __ bind(&runtime);
4816  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4817 }
4818 
4819 
4820 void RegExpExecStub::Generate(MacroAssembler* masm) {
4821  // Just jump directly to runtime if native RegExp is not selected at compile
4822  // time or if regexp entry in generated code is turned off runtime switch or
4823  // at compilation.
4824 #ifdef V8_INTERPRETED_REGEXP
4825  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4826 #else // V8_INTERPRETED_REGEXP
4827 
4828  // Stack frame on entry.
4829  // sp[0]: last_match_info (expected JSArray)
4830  // sp[4]: previous index
4831  // sp[8]: subject string
4832  // sp[12]: JSRegExp object
4833 
4834  const int kLastMatchInfoOffset = 0 * kPointerSize;
4835  const int kPreviousIndexOffset = 1 * kPointerSize;
4836  const int kSubjectOffset = 2 * kPointerSize;
4837  const int kJSRegExpOffset = 3 * kPointerSize;
4838 
4839  Isolate* isolate = masm->isolate();
4840 
4841  Label runtime, invoke_regexp;
4842 
4843  // Allocation of registers for this function. These are in callee save
4844  // registers and will be preserved by the call to the native RegExp code, as
4845  // this code is called using the normal C calling convention. When calling
4846  // directly from generated code the native RegExp code will not do a GC and
4847  // therefore the content of these registers are safe to use after the call.
4848  // MIPS - using s0..s2, since we are not using CEntry Stub.
4849  Register subject = s0;
4850  Register regexp_data = s1;
4851  Register last_match_info_elements = s2;
4852 
4853  // Ensure that a RegExp stack is allocated.
4854  ExternalReference address_of_regexp_stack_memory_address =
4855  ExternalReference::address_of_regexp_stack_memory_address(
4856  isolate);
4857  ExternalReference address_of_regexp_stack_memory_size =
4858  ExternalReference::address_of_regexp_stack_memory_size(isolate);
4859  __ li(a0, Operand(address_of_regexp_stack_memory_size));
4860  __ lw(a0, MemOperand(a0, 0));
4861  __ Branch(&runtime, eq, a0, Operand(zero_reg));
4862 
4863  // Check that the first argument is a JSRegExp object.
4864  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4865  STATIC_ASSERT(kSmiTag == 0);
4866  __ JumpIfSmi(a0, &runtime);
4867  __ GetObjectType(a0, a1, a1);
4868  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4869 
4870  // Check that the RegExp has been compiled (data contains a fixed array).
4871  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4872  if (FLAG_debug_code) {
4873  __ And(t0, regexp_data, Operand(kSmiTagMask));
4874  __ Check(nz,
4875  "Unexpected type for RegExp data, FixedArray expected",
4876  t0,
4877  Operand(zero_reg));
4878  __ GetObjectType(regexp_data, a0, a0);
4879  __ Check(eq,
4880  "Unexpected type for RegExp data, FixedArray expected",
4881  a0,
4882  Operand(FIXED_ARRAY_TYPE));
4883  }
4884 
4885  // regexp_data: RegExp data (FixedArray)
4886  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4887  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4888  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4889 
4890  // regexp_data: RegExp data (FixedArray)
4891  // Check that the number of captures fit in the static offsets vector buffer.
4892  __ lw(a2,
4894  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4895  // uses the asumption that smis are 2 * their untagged value.
4896  STATIC_ASSERT(kSmiTag == 0);
4898  __ Addu(a2, a2, Operand(2)); // a2 was a smi.
4899  // Check that the static offsets vector buffer is large enough.
4900  __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4901 
4902  // a2: Number of capture registers
4903  // regexp_data: RegExp data (FixedArray)
4904  // Check that the second argument is a string.
4905  __ lw(subject, MemOperand(sp, kSubjectOffset));
4906  __ JumpIfSmi(subject, &runtime);
4907  __ GetObjectType(subject, a0, a0);
4908  __ And(a0, a0, Operand(kIsNotStringMask));
4909  STATIC_ASSERT(kStringTag == 0);
4910  __ Branch(&runtime, ne, a0, Operand(zero_reg));
4911 
4912  // Get the length of the string to r3.
4913  __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4914 
4915  // a2: Number of capture registers
4916  // a3: Length of subject string as a smi
4917  // subject: Subject string
4918  // regexp_data: RegExp data (FixedArray)
4919  // Check that the third argument is a positive smi less than the subject
4920  // string length. A negative value will be greater (unsigned comparison).
4921  __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4922  __ JumpIfNotSmi(a0, &runtime);
4923  __ Branch(&runtime, ls, a3, Operand(a0));
4924 
4925  // a2: Number of capture registers
4926  // subject: Subject string
4927  // regexp_data: RegExp data (FixedArray)
4928  // Check that the fourth object is a JSArray object.
4929  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4930  __ JumpIfSmi(a0, &runtime);
4931  __ GetObjectType(a0, a1, a1);
4932  __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4933  // Check that the JSArray is in fast case.
4934  __ lw(last_match_info_elements,
4936  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4937  __ Branch(&runtime, ne, a0, Operand(
4938  isolate->factory()->fixed_array_map()));
4939  // Check that the last match info has space for the capture registers and the
4940  // additional information.
4941  __ lw(a0,
4942  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4943  __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4944  __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
4945  __ Branch(&runtime, gt, a2, Operand(at));
4946 
4947  // Reset offset for possibly sliced string.
4948  __ mov(t0, zero_reg);
4949  // subject: Subject string
4950  // regexp_data: RegExp data (FixedArray)
4951  // Check the representation and encoding of the subject string.
4952  Label seq_string;
4953  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4955  // First check for flat string. None of the following string type tests will
4956  // succeed if subject is not a string or a short external string.
4957  __ And(a1,
4958  a0,
4959  Operand(kIsNotStringMask |
4963  __ Branch(&seq_string, eq, a1, Operand(zero_reg));
4964 
4965  // subject: Subject string
4966  // a0: instance type if Subject string
4967  // regexp_data: RegExp data (FixedArray)
4968  // a1: whether subject is a string and if yes, its string representation
4969  // Check for flat cons string or sliced string.
4970  // A flat cons string is a cons string where the second part is the empty
4971  // string. In that case the subject string is just the first part of the cons
4972  // string. Also in this case the first part of the cons string is known to be
4973  // a sequential string or an external string.
4974  // In the case of a sliced string its offset has to be taken into account.
4975  Label cons_string, external_string, check_encoding;
4980  __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
4981  __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
4982 
4983  // Catch non-string subject or short external string.
4985  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
4986  __ Branch(&runtime, ne, at, Operand(zero_reg));
4987 
4988  // String is sliced.
4990  __ sra(t0, t0, kSmiTagSize);
4991  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4992  // t5: offset of sliced string, smi-tagged.
4993  __ jmp(&check_encoding);
4994  // String is a cons string, check whether it is flat.
4995  __ bind(&cons_string);
4996  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4997  __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4998  __ Branch(&runtime, ne, a0, Operand(a1));
4999  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
5000  // Is first part of cons or parent of slice a flat string?
5001  __ bind(&check_encoding);
5002  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5005  __ And(at, a0, Operand(kStringRepresentationMask));
5006  __ Branch(&external_string, ne, at, Operand(zero_reg));
5007 
5008  __ bind(&seq_string);
5009  // subject: Subject string
5010  // regexp_data: RegExp data (FixedArray)
5011  // a0: Instance type of subject string
5015  // Find the code object based on the assumptions above.
5016  __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
5017  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
5018  __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
5019  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
5020  __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
5021 
5022  // Check that the irregexp code has been generated for the actual string
5023  // encoding. If it has, the field contains a code object otherwise it contains
5024  // a smi (code flushing support).
5025  __ JumpIfSmi(t9, &runtime);
5026 
5027  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5028  // t9: code
5029  // subject: Subject string
5030  // regexp_data: RegExp data (FixedArray)
5031  // Load used arguments before starting to push arguments for call to native
5032  // RegExp code to avoid handling changing stack height.
5033  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
5034  __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
5035 
5036  // a1: previous index
5037  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5038  // t9: code
5039  // subject: Subject string
5040  // regexp_data: RegExp data (FixedArray)
5041  // All checks done. Now push arguments for native regexp code.
5042  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
5043  1, a0, a2);
5044 
5045  // Isolates: note we add an additional parameter here (isolate pointer).
5046  const int kRegExpExecuteArguments = 9;
5047  const int kParameterRegisters = 4;
5048  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5049 
5050  // Stack pointer now points to cell where return address is to be written.
5051  // Arguments are before that on the stack or in registers, meaning we
5052  // treat the return address as argument 5. Thus every argument after that
5053  // needs to be shifted back by 1. Since DirectCEntryStub will handle
5054  // allocating space for the c argument slots, we don't need to calculate
5055  // that into the argument positions on the stack. This is how the stack will
5056  // look (sp meaning the value of sp at this moment):
5057  // [sp + 5] - Argument 9
5058  // [sp + 4] - Argument 8
5059  // [sp + 3] - Argument 7
5060  // [sp + 2] - Argument 6
5061  // [sp + 1] - Argument 5
5062  // [sp + 0] - saved ra
5063 
5064  // Argument 9: Pass current isolate address.
5065  // CFunctionArgumentOperand handles MIPS stack argument slots.
5066  __ li(a0, Operand(ExternalReference::isolate_address()));
5067  __ sw(a0, MemOperand(sp, 5 * kPointerSize));
5068 
5069  // Argument 8: Indicate that this is a direct call from JavaScript.
5070  __ li(a0, Operand(1));
5071  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5072 
5073  // Argument 7: Start (high end) of backtracking stack memory area.
5074  __ li(a0, Operand(address_of_regexp_stack_memory_address));
5075  __ lw(a0, MemOperand(a0, 0));
5076  __ li(a2, Operand(address_of_regexp_stack_memory_size));
5077  __ lw(a2, MemOperand(a2, 0));
5078  __ addu(a0, a0, a2);
5079  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5080 
5081  // Argument 6: Set the number of capture registers to zero to force global
5082  // regexps to behave as non-global. This does not affect non-global regexps.
5083  __ mov(a0, zero_reg);
5084  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5085 
5086  // Argument 5: static offsets vector buffer.
5087  __ li(a0, Operand(
5088  ExternalReference::address_of_static_offsets_vector(isolate)));
5089  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
5090 
5091  // For arguments 4 and 3 get string length, calculate start of string data
5092  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
5093  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
5094  __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
5095  // Load the length from the original subject string from the previous stack
5096  // frame. Therefore we have to use fp, which points exactly to two pointer
5097  // sizes below the previous sp. (Because creating a new stack frame pushes
5098  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
5099  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
5100  // If slice offset is not 0, load the length from the original sliced string.
5101  // Argument 4, a3: End of string data
5102  // Argument 3, a2: Start of string data
5103  // Prepare start and end index of the input.
5104  __ sllv(t1, t0, a3);
5105  __ addu(t0, t2, t1);
5106  __ sllv(t1, a1, a3);
5107  __ addu(a2, t0, t1);
5108 
5109  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
5110  __ sra(t2, t2, kSmiTagSize);
5111  __ sllv(t1, t2, a3);
5112  __ addu(a3, t0, t1);
5113  // Argument 2 (a1): Previous index.
5114  // Already there
5115 
5116  // Argument 1 (a0): Subject string.
5117  __ mov(a0, subject);
5118 
5119  // Locate the code entry and call it.
5120  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5121  DirectCEntryStub stub;
5122  stub.GenerateCall(masm, t9);
5123 
5124  __ LeaveExitFrame(false, no_reg);
5125 
5126  // v0: result
5127  // subject: subject string (callee saved)
5128  // regexp_data: RegExp data (callee saved)
5129  // last_match_info_elements: Last match info elements (callee saved)
5130 
5131  // Check the result.
5132 
5133  Label success;
5134  __ Branch(&success, eq, v0, Operand(1));
5135  // We expect exactly one result since we force the called regexp to behave
5136  // as non-global.
5137  Label failure;
5138  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
5139  // If not exception it can only be retry. Handle that in the runtime system.
5140  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
5141  // Result must now be exception. If there is no pending exception already a
5142  // stack overflow (on the backtrack stack) was detected in RegExp code but
5143  // haven't created the exception yet. Handle that in the runtime system.
5144  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5145  __ li(a1, Operand(isolate->factory()->the_hole_value()));
5146  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5147  isolate)));
5148  __ lw(v0, MemOperand(a2, 0));
5149  __ Branch(&runtime, eq, v0, Operand(a1));
5150 
5151  __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
5152 
5153  // Check if the exception is a termination. If so, throw as uncatchable.
5154  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5155  Label termination_exception;
5156  __ Branch(&termination_exception, eq, v0, Operand(a0));
5157 
5158  __ Throw(v0);
5159 
5160  __ bind(&termination_exception);
5161  __ ThrowUncatchable(v0);
5162 
5163  __ bind(&failure);
5164  // For failure and exception return null.
5165  __ li(v0, Operand(isolate->factory()->null_value()));
5166  __ DropAndRet(4);
5167 
5168  // Process the result from the native regexp code.
5169  __ bind(&success);
5170  __ lw(a1,
5172  // Calculate number of capture registers (number_of_captures + 1) * 2.
5173  STATIC_ASSERT(kSmiTag == 0);
5175  __ Addu(a1, a1, Operand(2)); // a1 was a smi.
5176 
5177  // a1: number of capture registers
5178  // subject: subject string
5179  // Store the capture count.
5180  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
5181  __ sw(a2, FieldMemOperand(last_match_info_elements,
5183  // Store last subject and last input.
5184  __ sw(subject,
5185  FieldMemOperand(last_match_info_elements,
5187  __ mov(a2, subject);
5188  __ RecordWriteField(last_match_info_elements,
5190  a2,
5191  t3,
5193  kDontSaveFPRegs);
5194  __ sw(subject,
5195  FieldMemOperand(last_match_info_elements,
5197  __ RecordWriteField(last_match_info_elements,
5199  subject,
5200  t3,
5202  kDontSaveFPRegs);
5203 
5204  // Get the static offsets vector filled by the native regexp code.
5205  ExternalReference address_of_static_offsets_vector =
5206  ExternalReference::address_of_static_offsets_vector(isolate);
5207  __ li(a2, Operand(address_of_static_offsets_vector));
5208 
5209  // a1: number of capture registers
5210  // a2: offsets vector
5211  Label next_capture, done;
5212  // Capture register counter starts from number of capture registers and
5213  // counts down until wrapping after zero.
5214  __ Addu(a0,
5215  last_match_info_elements,
5217  __ bind(&next_capture);
5218  __ Subu(a1, a1, Operand(1));
5219  __ Branch(&done, lt, a1, Operand(zero_reg));
5220  // Read the value from the static offsets vector buffer.
5221  __ lw(a3, MemOperand(a2, 0));
5222  __ addiu(a2, a2, kPointerSize);
5223  // Store the smi value in the last match info.
5224  __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5225  __ sw(a3, MemOperand(a0, 0));
5226  __ Branch(&next_capture, USE_DELAY_SLOT);
5227  __ addiu(a0, a0, kPointerSize); // In branch delay slot.
5228 
5229  __ bind(&done);
5230 
5231  // Return last match info.
5232  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5233  __ DropAndRet(4);
5234 
5235  // External string. Short external strings have already been ruled out.
5236  // a0: scratch
5237  __ bind(&external_string);
5238  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5240  if (FLAG_debug_code) {
5241  // Assert that we do not have a cons or slice (indirect strings) here.
5242  // Sequential strings have already been ruled out.
5243  __ And(at, a0, Operand(kIsIndirectStringMask));
5244  __ Assert(eq,
5245  "external string expected, but not found",
5246  at,
5247  Operand(zero_reg));
5248  }
5249  __ lw(subject,
5251  // Move the pointer so that offset-wise, it looks like a sequential string.
5253  __ Subu(subject,
5254  subject,
5256  __ jmp(&seq_string);
5257 
5258  // Do the runtime call to execute the regexp.
5259  __ bind(&runtime);
5260  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5261 #endif // V8_INTERPRETED_REGEXP
5262 }
5263 
5264 
5265 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5266  const int kMaxInlineLength = 100;
5267  Label slowcase;
5268  Label done;
5269  __ lw(a1, MemOperand(sp, kPointerSize * 2));
5270  STATIC_ASSERT(kSmiTag == 0);
5271  STATIC_ASSERT(kSmiTagSize == 1);
5272  __ JumpIfNotSmi(a1, &slowcase);
5273  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5274  // Smi-tagging is equivalent to multiplying by 2.
5275  // Allocate RegExpResult followed by FixedArray with size in ebx.
5276  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
5277  // Elements: [Map][Length][..elements..]
5278  // Size of JSArray with two in-object properties and the header of a
5279  // FixedArray.
5280  int objects_size =
5282  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5283  __ Addu(a2, t1, Operand(objects_size));
5284  __ AllocateInNewSpace(
5285  a2, // In: Size, in words.
5286  v0, // Out: Start of allocation (tagged).
5287  a3, // Scratch register.
5288  t0, // Scratch register.
5289  &slowcase,
5290  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5291  // v0: Start of allocated area, object-tagged.
5292  // a1: Number of elements in array, as smi.
5293  // t1: Number of elements, untagged.
5294 
5295  // Set JSArray map to global.regexp_result_map().
5296  // Set empty properties FixedArray.
5297  // Set elements to point to FixedArray allocated right after the JSArray.
5298  // Interleave operations for better latency.
5300  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5301  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5307 
5308  // Set input, index and length fields from arguments.
5309  __ lw(a1, MemOperand(sp, kPointerSize * 0));
5310  __ lw(a2, MemOperand(sp, kPointerSize * 1));
5311  __ lw(t2, MemOperand(sp, kPointerSize * 2));
5315 
5316  // Fill out the elements FixedArray.
5317  // v0: JSArray, tagged.
5318  // a3: FixedArray, tagged.
5319  // t1: Number of elements in array, untagged.
5320 
5321  // Set map.
5322  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5324  // Set FixedArray length.
5325  __ sll(t2, t1, kSmiTagSize);
5327  // Fill contents of fixed-array with the-hole.
5328  __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5329  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5330  // Fill fixed array elements with hole.
5331  // v0: JSArray, tagged.
5332  // a2: the hole.
5333  // a3: Start of elements in FixedArray.
5334  // t1: Number of elements to fill.
5335  Label loop;
5336  __ sll(t1, t1, kPointerSizeLog2); // Convert num elements to num bytes.
5337  __ addu(t1, t1, a3); // Point past last element to store.
5338  __ bind(&loop);
5339  __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5340  __ sw(a2, MemOperand(a3));
5341  __ Branch(&loop, USE_DELAY_SLOT);
5342  __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5343 
5344  __ bind(&done);
5345  __ DropAndRet(3);
5346 
5347  __ bind(&slowcase);
5348  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5349 }
5350 
5351 
5352 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5353  // Cache the called function in a global property cell. Cache states
5354  // are uninitialized, monomorphic (indicated by a JSFunction), and
5355  // megamorphic.
5356  // a1 : the function to call
5357  // a2 : cache cell for call target
5358  Label done;
5359 
5361  masm->isolate()->heap()->undefined_value());
5363  masm->isolate()->heap()->the_hole_value());
5364 
5365  // Load the cache state into a3.
5367 
5368  // A monomorphic cache hit or an already megamorphic state: invoke the
5369  // function without changing the state.
5370  __ Branch(&done, eq, a3, Operand(a1));
5371  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5372  __ Branch(&done, eq, a3, Operand(at));
5373 
5374  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5375  // megamorphic.
5376  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5377 
5378  __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
5379  // An uninitialized cache is patched with the function.
5380  // Store a1 in the delay slot. This may or may not get overwritten depending
5381  // on the result of the comparison.
5383  // No need for a write barrier here - cells are rescanned.
5384 
5385  // MegamorphicSentinel is an immortal immovable object (undefined) so no
5386  // write-barrier is needed.
5387  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5389 
5390  __ bind(&done);
5391 }
5392 
5393 
5394 void CallFunctionStub::Generate(MacroAssembler* masm) {
5395  // a1 : the function to call
5396  // a2 : cache cell for call target
5397  Label slow, non_function;
5398 
5399  // The receiver might implicitly be the global object. This is
5400  // indicated by passing the hole as the receiver to the call
5401  // function stub.
5402  if (ReceiverMightBeImplicit()) {
5403  Label call;
5404  // Get the receiver from the stack.
5405  // function, receiver [, arguments]
5406  __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5407  // Call as function is indicated with the hole.
5408  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5409  __ Branch(&call, ne, t0, Operand(at));
5410  // Patch the receiver on the stack with the global receiver object.
5413  __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
5414  __ bind(&call);
5415  }
5416 
5417  // Check that the function is really a JavaScript function.
5418  // a1: pushed function (to be verified)
5419  __ JumpIfSmi(a1, &non_function);
5420  // Get the map of the function object.
5421  __ GetObjectType(a1, a3, a3);
5422  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5423 
5424  if (RecordCallTarget()) {
5425  GenerateRecordCallTarget(masm);
5426  }
5427 
5428  // Fast-case: Invoke the function now.
5429  // a1: pushed function
5430  ParameterCount actual(argc_);
5431 
5432  if (ReceiverMightBeImplicit()) {
5433  Label call_as_function;
5434  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5435  __ Branch(&call_as_function, eq, t0, Operand(at));
5436  __ InvokeFunction(a1,
5437  actual,
5438  JUMP_FUNCTION,
5439  NullCallWrapper(),
5440  CALL_AS_METHOD);
5441  __ bind(&call_as_function);
5442  }
5443  __ InvokeFunction(a1,
5444  actual,
5445  JUMP_FUNCTION,
5446  NullCallWrapper(),
5448 
5449  // Slow-case: Non-function called.
5450  __ bind(&slow);
5451  if (RecordCallTarget()) {
5452  // If there is a call target cache, mark it megamorphic in the
5453  // non-function case. MegamorphicSentinel is an immortal immovable
5454  // object (undefined) so no write barrier is needed.
5456  masm->isolate()->heap()->undefined_value());
5457  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5459  }
5460  // Check for function proxy.
5461  __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5462  __ push(a1); // Put proxy as additional argument.
5463  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5464  __ li(a2, Operand(0, RelocInfo::NONE));
5465  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5466  __ SetCallKind(t1, CALL_AS_METHOD);
5467  {
5468  Handle<Code> adaptor =
5469  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5470  __ Jump(adaptor, RelocInfo::CODE_TARGET);
5471  }
5472 
5473  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5474  // of the original receiver from the call site).
5475  __ bind(&non_function);
5476  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5477  __ li(a0, Operand(argc_)); // Set up the number of arguments.
5478  __ mov(a2, zero_reg);
5479  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5480  __ SetCallKind(t1, CALL_AS_METHOD);
5481  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5482  RelocInfo::CODE_TARGET);
5483 }
5484 
5485 
5486 void CallConstructStub::Generate(MacroAssembler* masm) {
5487  // a0 : number of arguments
5488  // a1 : the function to call
5489  // a2 : cache cell for call target
5490  Label slow, non_function_call;
5491 
5492  // Check that the function is not a smi.
5493  __ JumpIfSmi(a1, &non_function_call);
5494  // Check that the function is a JSFunction.
5495  __ GetObjectType(a1, a3, a3);
5496  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5497 
5498  if (RecordCallTarget()) {
5499  GenerateRecordCallTarget(masm);
5500  }
5501 
5502  // Jump to the function-specific construct stub.
5505  __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
5506  __ Jump(at);
5507 
5508  // a0: number of arguments
5509  // a1: called object
5510  // a3: object type
5511  Label do_call;
5512  __ bind(&slow);
5513  __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5514  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5515  __ jmp(&do_call);
5516 
5517  __ bind(&non_function_call);
5518  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5519  __ bind(&do_call);
5520  // Set expected number of arguments to zero (not changing r0).
5521  __ li(a2, Operand(0, RelocInfo::NONE));
5522  __ SetCallKind(t1, CALL_AS_METHOD);
5523  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5524  RelocInfo::CODE_TARGET);
5525 }
5526 
5527 
5528 // Unfortunately you have to run without snapshots to see most of these
5529 // names in the profile since most compare stubs end up in the snapshot.
5530 void CompareStub::PrintName(StringStream* stream) {
5531  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5532  (lhs_.is(a1) && rhs_.is(a0)));
5533  const char* cc_name;
5534  switch (cc_) {
5535  case lt: cc_name = "LT"; break;
5536  case gt: cc_name = "GT"; break;
5537  case le: cc_name = "LE"; break;
5538  case ge: cc_name = "GE"; break;
5539  case eq: cc_name = "EQ"; break;
5540  case ne: cc_name = "NE"; break;
5541  default: cc_name = "UnknownCondition"; break;
5542  }
5543  bool is_equality = cc_ == eq || cc_ == ne;
5544  stream->Add("CompareStub_%s", cc_name);
5545  stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5546  stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5547  if (strict_ && is_equality) stream->Add("_STRICT");
5548  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5549  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5550  if (!include_smi_compare_) stream->Add("_NO_SMI");
5551 }
5552 
5553 
5554 int CompareStub::MinorKey() {
5555  // Encode the two parameters in a unique 16 bit value.
5556  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5557  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5558  (lhs_.is(a1) && rhs_.is(a0)));
5559  return ConditionField::encode(static_cast<unsigned>(cc_))
5560  | RegisterField::encode(lhs_.is(a0))
5561  | StrictField::encode(strict_)
5562  | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5563  | IncludeSmiCompareField::encode(include_smi_compare_);
5564 }
5565 
5566 
5567 // StringCharCodeAtGenerator.
5568 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5569  Label flat_string;
5570  Label ascii_string;
5571  Label got_char_code;
5572  Label sliced_string;
5573 
5574  ASSERT(!t0.is(index_));
5575  ASSERT(!t0.is(result_));
5576  ASSERT(!t0.is(object_));
5577 
5578  // If the receiver is a smi trigger the non-string case.
5579  __ JumpIfSmi(object_, receiver_not_string_);
5580 
5581  // Fetch the instance type of the receiver into result register.
5582  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5583  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5584  // If the receiver is not a string trigger the non-string case.
5585  __ And(t0, result_, Operand(kIsNotStringMask));
5586  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5587 
5588  // If the index is non-smi trigger the non-smi case.
5589  __ JumpIfNotSmi(index_, &index_not_smi_);
5590 
5591  __ bind(&got_smi_index_);
5592 
5593  // Check for index out of range.
5594  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5595  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
5596 
5597  __ sra(index_, index_, kSmiTagSize);
5598 
5600  object_,
5601  index_,
5602  result_,
5603  &call_runtime_);
5604 
5605  __ sll(result_, result_, kSmiTagSize);
5606  __ bind(&exit_);
5607 }
5608 
5609 
5611  MacroAssembler* masm,
5612  const RuntimeCallHelper& call_helper) {
5613  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5614 
5615  // Index is not a smi.
5616  __ bind(&index_not_smi_);
5617  // If index is a heap number, try converting it to an integer.
5618  __ CheckMap(index_,
5619  result_,
5620  Heap::kHeapNumberMapRootIndex,
5621  index_not_number_,
5623  call_helper.BeforeCall(masm);
5624  // Consumed by runtime conversion function:
5625  __ Push(object_, index_);
5626  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5627  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5628  } else {
5629  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5630  // NumberToSmi discards numbers that are not exact integers.
5631  __ CallRuntime(Runtime::kNumberToSmi, 1);
5632  }
5633 
5634  // Save the conversion result before the pop instructions below
5635  // have a chance to overwrite it.
5636 
5637  __ Move(index_, v0);
5638  __ pop(object_);
5639  // Reload the instance type.
5640  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5641  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5642  call_helper.AfterCall(masm);
5643  // If index is still not a smi, it must be out of range.
5644  __ JumpIfNotSmi(index_, index_out_of_range_);
5645  // Otherwise, return to the fast path.
5646  __ Branch(&got_smi_index_);
5647 
5648  // Call runtime. We get here when the receiver is a string and the
5649  // index is a number, but the code of getting the actual character
5650  // is too complex (e.g., when the string needs to be flattened).
5651  __ bind(&call_runtime_);
5652  call_helper.BeforeCall(masm);
5653  __ sll(index_, index_, kSmiTagSize);
5654  __ Push(object_, index_);
5655  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5656 
5657  __ Move(result_, v0);
5658 
5659  call_helper.AfterCall(masm);
5660  __ jmp(&exit_);
5661 
5662  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5663 }
5664 
5665 
5666 // -------------------------------------------------------------------------
5667 // StringCharFromCodeGenerator
5668 
5669 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5670  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5671 
5672  ASSERT(!t0.is(result_));
5673  ASSERT(!t0.is(code_));
5674 
5675  STATIC_ASSERT(kSmiTag == 0);
5678  __ And(t0,
5679  code_,
5680  Operand(kSmiTagMask |
5682  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5683 
5684  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5685  // At this point code register contains smi tagged ASCII char code.
5686  STATIC_ASSERT(kSmiTag == 0);
5687  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5688  __ Addu(result_, result_, t0);
5689  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5690  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5691  __ Branch(&slow_case_, eq, result_, Operand(t0));
5692  __ bind(&exit_);
5693 }
5694 
5695 
5697  MacroAssembler* masm,
5698  const RuntimeCallHelper& call_helper) {
5699  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5700 
5701  __ bind(&slow_case_);
5702  call_helper.BeforeCall(masm);
5703  __ push(code_);
5704  __ CallRuntime(Runtime::kCharFromCode, 1);
5705  __ Move(result_, v0);
5706 
5707  call_helper.AfterCall(masm);
5708  __ Branch(&exit_);
5709 
5710  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5711 }
5712 
5713 
5714 // -------------------------------------------------------------------------
5715 // StringCharAtGenerator
5716 
5717 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5718  char_code_at_generator_.GenerateFast(masm);
5719  char_from_code_generator_.GenerateFast(masm);
5720 }
5721 
5722 
5724  MacroAssembler* masm,
5725  const RuntimeCallHelper& call_helper) {
5726  char_code_at_generator_.GenerateSlow(masm, call_helper);
5727  char_from_code_generator_.GenerateSlow(masm, call_helper);
5728 }
5729 
5730 
5731 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5732  Register dest,
5733  Register src,
5734  Register count,
5735  Register scratch,
5736  bool ascii) {
5737  Label loop;
5738  Label done;
5739  // This loop just copies one character at a time, as it is only used for
5740  // very short strings.
5741  if (!ascii) {
5742  __ addu(count, count, count);
5743  }
5744  __ Branch(&done, eq, count, Operand(zero_reg));
5745  __ addu(count, dest, count); // Count now points to the last dest byte.
5746 
5747  __ bind(&loop);
5748  __ lbu(scratch, MemOperand(src));
5749  __ addiu(src, src, 1);
5750  __ sb(scratch, MemOperand(dest));
5751  __ addiu(dest, dest, 1);
5752  __ Branch(&loop, lt, dest, Operand(count));
5753 
5754  __ bind(&done);
5755 }
5756 
5757 
5758 enum CopyCharactersFlags {
5759  COPY_ASCII = 1,
5760  DEST_ALWAYS_ALIGNED = 2
5761 };
5762 
5763 
5764 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5765  Register dest,
5766  Register src,
5767  Register count,
5768  Register scratch1,
5769  Register scratch2,
5770  Register scratch3,
5771  Register scratch4,
5772  Register scratch5,
5773  int flags) {
5774  bool ascii = (flags & COPY_ASCII) != 0;
5775  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5776 
5777  if (dest_always_aligned && FLAG_debug_code) {
5778  // Check that destination is actually word aligned if the flag says
5779  // that it is.
5780  __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5781  __ Check(eq,
5782  "Destination of copy not aligned.",
5783  scratch4,
5784  Operand(zero_reg));
5785  }
5786 
5787  const int kReadAlignment = 4;
5788  const int kReadAlignmentMask = kReadAlignment - 1;
5789  // Ensure that reading an entire aligned word containing the last character
5790  // of a string will not read outside the allocated area (because we pad up
5791  // to kObjectAlignment).
5792  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5793  // Assumes word reads and writes are little endian.
5794  // Nothing to do for zero characters.
5795  Label done;
5796 
5797  if (!ascii) {
5798  __ addu(count, count, count);
5799  }
5800  __ Branch(&done, eq, count, Operand(zero_reg));
5801 
5802  Label byte_loop;
5803  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5804  __ Subu(scratch1, count, Operand(8));
5805  __ Addu(count, dest, Operand(count));
5806  Register limit = count; // Read until src equals this.
5807  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5808 
5809  if (!dest_always_aligned) {
5810  // Align dest by byte copying. Copies between zero and three bytes.
5811  __ And(scratch4, dest, Operand(kReadAlignmentMask));
5812  Label dest_aligned;
5813  __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5814  Label aligned_loop;
5815  __ bind(&aligned_loop);
5816  __ lbu(scratch1, MemOperand(src));
5817  __ addiu(src, src, 1);
5818  __ sb(scratch1, MemOperand(dest));
5819  __ addiu(dest, dest, 1);
5820  __ addiu(scratch4, scratch4, 1);
5821  __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5822  __ bind(&dest_aligned);
5823  }
5824 
5825  Label simple_loop;
5826 
5827  __ And(scratch4, src, Operand(kReadAlignmentMask));
5828  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5829 
5830  // Loop for src/dst that are not aligned the same way.
5831  // This loop uses lwl and lwr instructions. These instructions
5832  // depend on the endianness, and the implementation assumes little-endian.
5833  {
5834  Label loop;
5835  __ bind(&loop);
5836  __ lwr(scratch1, MemOperand(src));
5837  __ Addu(src, src, Operand(kReadAlignment));
5838  __ lwl(scratch1, MemOperand(src, -1));
5839  __ sw(scratch1, MemOperand(dest));
5840  __ Addu(dest, dest, Operand(kReadAlignment));
5841  __ Subu(scratch2, limit, dest);
5842  __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5843  }
5844 
5845  __ Branch(&byte_loop);
5846 
5847  // Simple loop.
5848  // Copy words from src to dest, until less than four bytes left.
5849  // Both src and dest are word aligned.
5850  __ bind(&simple_loop);
5851  {
5852  Label loop;
5853  __ bind(&loop);
5854  __ lw(scratch1, MemOperand(src));
5855  __ Addu(src, src, Operand(kReadAlignment));
5856  __ sw(scratch1, MemOperand(dest));
5857  __ Addu(dest, dest, Operand(kReadAlignment));
5858  __ Subu(scratch2, limit, dest);
5859  __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5860  }
5861 
5862  // Copy bytes from src to dest until dest hits limit.
5863  __ bind(&byte_loop);
5864  // Test if dest has already reached the limit.
5865  __ Branch(&done, ge, dest, Operand(limit));
5866  __ lbu(scratch1, MemOperand(src));
5867  __ addiu(src, src, 1);
5868  __ sb(scratch1, MemOperand(dest));
5869  __ addiu(dest, dest, 1);
5870  __ Branch(&byte_loop);
5871 
5872  __ bind(&done);
5873 }
5874 
5875 
5876 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5877  Register c1,
5878  Register c2,
5879  Register scratch1,
5880  Register scratch2,
5881  Register scratch3,
5882  Register scratch4,
5883  Register scratch5,
5884  Label* not_found) {
5885  // Register scratch3 is the general scratch register in this function.
5886  Register scratch = scratch3;
5887 
5888  // Make sure that both characters are not digits as such strings has a
5889  // different hash algorithm. Don't try to look for these in the symbol table.
5890  Label not_array_index;
5891  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5892  __ Branch(&not_array_index,
5893  Ugreater,
5894  scratch,
5895  Operand(static_cast<int>('9' - '0')));
5896  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5897 
5898  // If check failed combine both characters into single halfword.
5899  // This is required by the contract of the method: code at the
5900  // not_found branch expects this combination in c1 register.
5901  Label tmp;
5902  __ sll(scratch1, c2, kBitsPerByte);
5903  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5904  __ Or(c1, c1, scratch1);
5905  __ bind(&tmp);
5906  __ Branch(
5907  not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
5908 
5909  __ bind(&not_array_index);
5910  // Calculate the two character string hash.
5911  Register hash = scratch1;
5912  StringHelper::GenerateHashInit(masm, hash, c1);
5915 
5916  // Collect the two characters in a register.
5917  Register chars = c1;
5918  __ sll(scratch, c2, kBitsPerByte);
5919  __ Or(chars, chars, scratch);
5920 
5921  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5922  // hash: hash of two character string.
5923 
5924  // Load symbol table.
5925  // Load address of first element of the symbol table.
5926  Register symbol_table = c2;
5927  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5928 
5929  Register undefined = scratch4;
5930  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5931 
5932  // Calculate capacity mask from the symbol table capacity.
5933  Register mask = scratch2;
5934  __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5935  __ sra(mask, mask, 1);
5936  __ Addu(mask, mask, -1);
5937 
5938  // Calculate untagged address of the first element of the symbol table.
5939  Register first_symbol_table_element = symbol_table;
5940  __ Addu(first_symbol_table_element, symbol_table,
5942 
5943  // Registers.
5944  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5945  // hash: hash of two character string
5946  // mask: capacity mask
5947  // first_symbol_table_element: address of the first element of
5948  // the symbol table
5949  // undefined: the undefined object
5950  // scratch: -
5951 
5952  // Perform a number of probes in the symbol table.
5953  const int kProbes = 4;
5954  Label found_in_symbol_table;
5955  Label next_probe[kProbes];
5956  Register candidate = scratch5; // Scratch register contains candidate.
5957  for (int i = 0; i < kProbes; i++) {
5958  // Calculate entry in symbol table.
5959  if (i > 0) {
5960  __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5961  } else {
5962  __ mov(candidate, hash);
5963  }
5964 
5965  __ And(candidate, candidate, Operand(mask));
5966 
5967  // Load the entry from the symble table.
5969  __ sll(scratch, candidate, kPointerSizeLog2);
5970  __ Addu(scratch, scratch, first_symbol_table_element);
5971  __ lw(candidate, MemOperand(scratch));
5972 
5973  // If entry is undefined no string with this hash can be found.
5974  Label is_string;
5975  __ GetObjectType(candidate, scratch, scratch);
5976  __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5977 
5978  __ Branch(not_found, eq, undefined, Operand(candidate));
5979  // Must be the hole (deleted entry).
5980  if (FLAG_debug_code) {
5981  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
5982  __ Assert(eq, "oddball in symbol table is not undefined or the hole",
5983  scratch, Operand(candidate));
5984  }
5985  __ jmp(&next_probe[i]);
5986 
5987  __ bind(&is_string);
5988 
5989  // Check that the candidate is a non-external ASCII string. The instance
5990  // type is still in the scratch register from the CompareObjectType
5991  // operation.
5992  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5993 
5994  // If length is not 2 the string is not a candidate.
5995  __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5996  __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5997 
5998  // Check if the two characters match.
5999  // Assumes that word load is little endian.
6000  __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
6001  __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
6002  __ bind(&next_probe[i]);
6003  }
6004 
6005  // No matching 2 character string found by probing.
6006  __ jmp(not_found);
6007 
6008  // Scratch register contains result when we fall through to here.
6009  Register result = candidate;
6010  __ bind(&found_in_symbol_table);
6011  __ mov(v0, result);
6012 }
6013 
6014 
6015 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6016  Register hash,
6017  Register character) {
6018  // hash = seed + character + ((seed + character) << 10);
6019  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
6020  // Untag smi seed and add the character.
6021  __ SmiUntag(hash);
6022  __ addu(hash, hash, character);
6023  __ sll(at, hash, 10);
6024  __ addu(hash, hash, at);
6025  // hash ^= hash >> 6;
6026  __ srl(at, hash, 6);
6027  __ xor_(hash, hash, at);
6028 }
6029 
6030 
6031 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6032  Register hash,
6033  Register character) {
6034  // hash += character;
6035  __ addu(hash, hash, character);
6036  // hash += hash << 10;
6037  __ sll(at, hash, 10);
6038  __ addu(hash, hash, at);
6039  // hash ^= hash >> 6;
6040  __ srl(at, hash, 6);
6041  __ xor_(hash, hash, at);
6042 }
6043 
6044 
6045 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6046  Register hash) {
6047  // hash += hash << 3;
6048  __ sll(at, hash, 3);
6049  __ addu(hash, hash, at);
6050  // hash ^= hash >> 11;
6051  __ srl(at, hash, 11);
6052  __ xor_(hash, hash, at);
6053  // hash += hash << 15;
6054  __ sll(at, hash, 15);
6055  __ addu(hash, hash, at);
6056 
6057  __ li(at, Operand(String::kHashBitMask));
6058  __ and_(hash, hash, at);
6059 
6060  // if (hash == 0) hash = 27;
6061  __ ori(at, zero_reg, StringHasher::kZeroHash);
6062  __ Movz(hash, at, hash);
6063 }
6064 
6065 
6066 void SubStringStub::Generate(MacroAssembler* masm) {
6067  Label runtime;
6068  // Stack frame on entry.
6069  // ra: return address
6070  // sp[0]: to
6071  // sp[4]: from
6072  // sp[8]: string
6073 
6074  // This stub is called from the native-call %_SubString(...), so
6075  // nothing can be assumed about the arguments. It is tested that:
6076  // "string" is a sequential string,
6077  // both "from" and "to" are smis, and
6078  // 0 <= from <= to <= string.length.
6079  // If any of these assumptions fail, we call the runtime system.
6080 
6081  const int kToOffset = 0 * kPointerSize;
6082  const int kFromOffset = 1 * kPointerSize;
6083  const int kStringOffset = 2 * kPointerSize;
6084 
6085  __ lw(a2, MemOperand(sp, kToOffset));
6086  __ lw(a3, MemOperand(sp, kFromOffset));
6087  STATIC_ASSERT(kFromOffset == kToOffset + 4);
6088  STATIC_ASSERT(kSmiTag == 0);
6090 
6091  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
6092  // safe in this case.
6093  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
6094  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6095  // Both a2 and a3 are untagged integers.
6096 
6097  __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
6098 
6099  __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
6100  __ Subu(a2, a2, a3);
6101 
6102  // Make sure first argument is a string.
6103  __ lw(v0, MemOperand(sp, kStringOffset));
6104  __ JumpIfSmi(v0, &runtime);
6107  __ And(t0, a1, Operand(kIsNotStringMask));
6108 
6109  __ Branch(&runtime, ne, t0, Operand(zero_reg));
6110 
6111  // Short-cut for the case of trivial substring.
6112  Label return_v0;
6113  // v0: original string
6114  // a2: result string length
6116  __ sra(t0, t0, 1);
6117  // Return original string.
6118  __ Branch(&return_v0, eq, a2, Operand(t0));
6119  // Longer than original string's length or negative: unsafe arguments.
6120  __ Branch(&runtime, hi, a2, Operand(t0));
6121  // Shorter than original string's length: an actual substring.
6122 
6123  // Deal with different string types: update the index if necessary
6124  // and put the underlying string into t1.
6125  // v0: original string
6126  // a1: instance type
6127  // a2: length
6128  // a3: from index (untagged)
6129  Label underlying_unpacked, sliced_string, seq_or_external_string;
6130  // If the string is not indirect, it can only be sequential or external.
6133  __ And(t0, a1, Operand(kIsIndirectStringMask));
6134  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
6135  // t0 is used as a scratch register and can be overwritten in either case.
6136  __ And(t0, a1, Operand(kSlicedNotConsMask));
6137  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6138  // Cons string. Check whether it is flat, then fetch first part.
6140  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6141  __ Branch(&runtime, ne, t1, Operand(t0));
6143  // Update instance type.
6146  __ jmp(&underlying_unpacked);
6147 
6148  __ bind(&sliced_string);
6149  // Sliced string. Fetch parent and correct start index by offset.
6152  __ sra(t0, t0, 1); // Add offset to index.
6153  __ Addu(a3, a3, t0);
6154  // Update instance type.
6157  __ jmp(&underlying_unpacked);
6158 
6159  __ bind(&seq_or_external_string);
6160  // Sequential or external string. Just move string to the expected register.
6161  __ mov(t1, v0);
6162 
6163  __ bind(&underlying_unpacked);
6164 
6165  if (FLAG_string_slices) {
6166  Label copy_routine;
6167  // t1: underlying subject string
6168  // a1: instance type of underlying subject string
6169  // a2: length
6170  // a3: adjusted start index (untagged)
6171  // Short slice. Copy instead of slicing.
6172  __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
6173  // Allocate new sliced string. At this point we do not reload the instance
6174  // type including the string encoding because we simply rely on the info
6175  // provided by the original string. It does not matter if the original
6176  // string's encoding is wrong because we always have to recheck encoding of
6177  // the newly created string's parent anyways due to externalized strings.
6178  Label two_byte_slice, set_slice_header;
6181  __ And(t0, a1, Operand(kStringEncodingMask));
6182  __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
6183  __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6184  __ jmp(&set_slice_header);
6185  __ bind(&two_byte_slice);
6186  __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6187  __ bind(&set_slice_header);
6188  __ sll(a3, a3, 1);
6191  __ jmp(&return_v0);
6192 
6193  __ bind(&copy_routine);
6194  }
6195 
6196  // t1: underlying subject string
6197  // a1: instance type of underlying subject string
6198  // a2: length
6199  // a3: adjusted start index (untagged)
6200  Label two_byte_sequential, sequential_string, allocate_result;
6203  __ And(t0, a1, Operand(kExternalStringTag));
6204  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
6205 
6206  // Handle external string.
6207  // Rule out short external strings.
6209  __ And(t0, a1, Operand(kShortExternalStringTag));
6210  __ Branch(&runtime, ne, t0, Operand(zero_reg));
6212  // t1 already points to the first character of underlying string.
6213  __ jmp(&allocate_result);
6214 
6215  __ bind(&sequential_string);
6216  // Locate first character of underlying subject string.
6218  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6219 
6220  __ bind(&allocate_result);
6221  // Sequential acii string. Allocate the result.
6223  __ And(t0, a1, Operand(kStringEncodingMask));
6224  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
6225 
6226  // Allocate and copy the resulting ASCII string.
6227  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6228 
6229  // Locate first character of substring to copy.
6230  __ Addu(t1, t1, a3);
6231 
6232  // Locate first character of result.
6233  __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6234 
6235  // v0: result string
6236  // a1: first character of result string
6237  // a2: result string length
6238  // t1: first character of substring to copy
6241  masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6242  __ jmp(&return_v0);
6243 
6244  // Allocate and copy the resulting two-byte string.
6245  __ bind(&two_byte_sequential);
6246  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
6247 
6248  // Locate first character of substring to copy.
6249  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6250  __ sll(t0, a3, 1);
6251  __ Addu(t1, t1, t0);
6252  // Locate first character of result.
6253  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6254 
6255  // v0: result string.
6256  // a1: first character of result.
6257  // a2: result length.
6258  // t1: first character of substring to copy.
6261  masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6262 
6263  __ bind(&return_v0);
6264  Counters* counters = masm->isolate()->counters();
6265  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6266  __ DropAndRet(3);
6267 
6268  // Just jump to runtime to create the sub string.
6269  __ bind(&runtime);
6270  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6271 }
6272 
6273 
6274 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6275  Register left,
6276  Register right,
6277  Register scratch1,
6278  Register scratch2,
6279  Register scratch3) {
6280  Register length = scratch1;
6281 
6282  // Compare lengths.
6283  Label strings_not_equal, check_zero_length;
6284  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6285  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6286  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6287  __ bind(&strings_not_equal);
6288  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6289  __ Ret();
6290 
6291  // Check if the length is zero.
6292  Label compare_chars;
6293  __ bind(&check_zero_length);
6294  STATIC_ASSERT(kSmiTag == 0);
6295  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6296  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6297  __ Ret();
6298 
6299  // Compare characters.
6300  __ bind(&compare_chars);
6301 
6302  GenerateAsciiCharsCompareLoop(masm,
6303  left, right, length, scratch2, scratch3, v0,
6304  &strings_not_equal);
6305 
6306  // Characters are equal.
6307  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6308  __ Ret();
6309 }
6310 
6311 
6312 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6313  Register left,
6314  Register right,
6315  Register scratch1,
6316  Register scratch2,
6317  Register scratch3,
6318  Register scratch4) {
6319  Label result_not_equal, compare_lengths;
6320  // Find minimum length and length difference.
6321  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6322  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6323  __ Subu(scratch3, scratch1, Operand(scratch2));
6324  Register length_delta = scratch3;
6325  __ slt(scratch4, scratch2, scratch1);
6326  __ Movn(scratch1, scratch2, scratch4);
6327  Register min_length = scratch1;
6328  STATIC_ASSERT(kSmiTag == 0);
6329  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6330 
6331  // Compare loop.
6332  GenerateAsciiCharsCompareLoop(masm,
6333  left, right, min_length, scratch2, scratch4, v0,
6334  &result_not_equal);
6335 
6336  // Compare lengths - strings up to min-length are equal.
6337  __ bind(&compare_lengths);
6338  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6339  // Use length_delta as result if it's zero.
6340  __ mov(scratch2, length_delta);
6341  __ mov(scratch4, zero_reg);
6342  __ mov(v0, zero_reg);
6343 
6344  __ bind(&result_not_equal);
6345  // Conditionally update the result based either on length_delta or
6346  // the last comparion performed in the loop above.
6347  Label ret;
6348  __ Branch(&ret, eq, scratch2, Operand(scratch4));
6349  __ li(v0, Operand(Smi::FromInt(GREATER)));
6350  __ Branch(&ret, gt, scratch2, Operand(scratch4));
6351  __ li(v0, Operand(Smi::FromInt(LESS)));
6352  __ bind(&ret);
6353  __ Ret();
6354 }
6355 
6356 
6357 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6358  MacroAssembler* masm,
6359  Register left,
6360  Register right,
6361  Register length,
6362  Register scratch1,
6363  Register scratch2,
6364  Register scratch3,
6365  Label* chars_not_equal) {
6366  // Change index to run from -length to -1 by adding length to string
6367  // start. This means that loop ends when index reaches zero, which
6368  // doesn't need an additional compare.
6369  __ SmiUntag(length);
6370  __ Addu(scratch1, length,
6372  __ Addu(left, left, Operand(scratch1));
6373  __ Addu(right, right, Operand(scratch1));
6374  __ Subu(length, zero_reg, length);
6375  Register index = length; // index = -length;
6376 
6377 
6378  // Compare loop.
6379  Label loop;
6380  __ bind(&loop);
6381  __ Addu(scratch3, left, index);
6382  __ lbu(scratch1, MemOperand(scratch3));
6383  __ Addu(scratch3, right, index);
6384  __ lbu(scratch2, MemOperand(scratch3));
6385  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6386  __ Addu(index, index, 1);
6387  __ Branch(&loop, ne, index, Operand(zero_reg));
6388 }
6389 
6390 
6391 void StringCompareStub::Generate(MacroAssembler* masm) {
6392  Label runtime;
6393 
6394  Counters* counters = masm->isolate()->counters();
6395 
6396  // Stack frame on entry.
6397  // sp[0]: right string
6398  // sp[4]: left string
6399  __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
6400  __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
6401 
6402  Label not_same;
6403  __ Branch(&not_same, ne, a0, Operand(a1));
6404  STATIC_ASSERT(EQUAL == 0);
6405  STATIC_ASSERT(kSmiTag == 0);
6406  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6407  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6408  __ DropAndRet(2);
6409 
6410  __ bind(&not_same);
6411 
6412  // Check that both objects are sequential ASCII strings.
6413  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6414 
6415  // Compare flat ASCII strings natively. Remove arguments from stack first.
6416  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6417  __ Addu(sp, sp, Operand(2 * kPointerSize));
6418  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6419 
6420  __ bind(&runtime);
6421  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6422 }
6423 
6424 
6425 void StringAddStub::Generate(MacroAssembler* masm) {
6426  Label call_runtime, call_builtin;
6427  Builtins::JavaScript builtin_id = Builtins::ADD;
6428 
6429  Counters* counters = masm->isolate()->counters();
6430 
6431  // Stack on entry:
6432  // sp[0]: second argument (right).
6433  // sp[4]: first argument (left).
6434 
6435  // Load the two arguments.
6436  __ lw(a0, MemOperand(sp, 1 * kPointerSize)); // First argument.
6437  __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
6438 
6439  // Make sure that both arguments are strings if not known in advance.
6440  if (flags_ == NO_STRING_ADD_FLAGS) {
6441  __ JumpIfEitherSmi(a0, a1, &call_runtime);
6442  // Load instance types.
6447  STATIC_ASSERT(kStringTag == 0);
6448  // If either is not a string, go to runtime.
6449  __ Or(t4, t0, Operand(t1));
6450  __ And(t4, t4, Operand(kIsNotStringMask));
6451  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6452  } else {
6453  // Here at least one of the arguments is definitely a string.
6454  // We convert the one that is not known to be a string.
6455  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6456  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6457  GenerateConvertArgument(
6458  masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6459  builtin_id = Builtins::STRING_ADD_RIGHT;
6460  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6461  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6462  GenerateConvertArgument(
6463  masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6464  builtin_id = Builtins::STRING_ADD_LEFT;
6465  }
6466  }
6467 
6468  // Both arguments are strings.
6469  // a0: first string
6470  // a1: second string
6471  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6472  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6473  {
6474  Label strings_not_empty;
6475  // Check if either of the strings are empty. In that case return the other.
6476  // These tests use zero-length check on string-length whch is an Smi.
6477  // Assert that Smi::FromInt(0) is really 0.
6478  STATIC_ASSERT(kSmiTag == 0);
6479  ASSERT(Smi::FromInt(0) == 0);
6482  __ mov(v0, a0); // Assume we'll return first string (from a0).
6483  __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
6484  __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
6485  __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6486  __ and_(t4, t4, t5); // Branch if both strings were non-empty.
6487  __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6488 
6489  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6490  __ DropAndRet(2);
6491 
6492  __ bind(&strings_not_empty);
6493  }
6494 
6495  // Untag both string-lengths.
6496  __ sra(a2, a2, kSmiTagSize);
6497  __ sra(a3, a3, kSmiTagSize);
6498 
6499  // Both strings are non-empty.
6500  // a0: first string
6501  // a1: second string
6502  // a2: length of first string
6503  // a3: length of second string
6504  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6505  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6506  // Look at the length of the result of adding the two strings.
6507  Label string_add_flat_result, longer_than_two;
6508  // Adding two lengths can't overflow.
6510  __ Addu(t2, a2, Operand(a3));
6511  // Use the symbol table when adding two one character strings, as it
6512  // helps later optimizations to return a symbol here.
6513  __ Branch(&longer_than_two, ne, t2, Operand(2));
6514 
6515  // Check that both strings are non-external ASCII strings.
6516  if (flags_ != NO_STRING_ADD_FLAGS) {
6521  }
6522  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6523  &call_runtime);
6524 
6525  // Get the two characters forming the sub string.
6528 
6529  // Try to lookup two character string in symbol table. If it is not found
6530  // just allocate a new one.
6531  Label make_two_character_string;
6533  masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6534  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6535  __ DropAndRet(2);
6536 
6537  __ bind(&make_two_character_string);
6538  // Resulting string has length 2 and first chars of two strings
6539  // are combined into single halfword in a2 register.
6540  // So we can fill resulting string without two loops by a single
6541  // halfword store instruction (which assumes that processor is
6542  // in a little endian mode).
6543  __ li(t2, Operand(2));
6544  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6546  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6547  __ DropAndRet(2);
6548 
6549  __ bind(&longer_than_two);
6550  // Check if resulting string will be flat.
6551  __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
6552  // Handle exceptionally long strings in the runtime system.
6553  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6555  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6556  __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
6557 
6558  // If result is not supposed to be flat, allocate a cons string object.
6559  // If both strings are ASCII the result is an ASCII cons string.
6560  if (flags_ != NO_STRING_ADD_FLAGS) {
6565  }
6566  Label non_ascii, allocated, ascii_data;
6568  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
6569  __ And(t4, t0, Operand(t1));
6570  __ And(t4, t4, Operand(kStringEncodingMask));
6571  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6572 
6573  // Allocate an ASCII cons string.
6574  __ bind(&ascii_data);
6575  __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
6576  __ bind(&allocated);
6577  // Fill the fields of the cons string.
6580  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6581  __ DropAndRet(2);
6582 
6583  __ bind(&non_ascii);
6584  // At least one of the strings is two-byte. Check whether it happens
6585  // to contain only ASCII characters.
6586  // t0: first instance type.
6587  // t1: second instance type.
6588  // Branch to if _both_ instances have kAsciiDataHintMask set.
6589  __ And(at, t0, Operand(kAsciiDataHintMask));
6590  __ and_(at, at, t1);
6591  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6592 
6593  __ xor_(t0, t0, t1);
6595  __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6596  __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6597 
6598  // Allocate a two byte cons string.
6599  __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
6600  __ Branch(&allocated);
6601 
6602  // We cannot encounter sliced strings or cons strings here since:
6604  // Handle creating a flat result from either external or sequential strings.
6605  // Locate the first characters' locations.
6606  // a0: first string
6607  // a1: second string
6608  // a2: length of first string
6609  // a3: length of second string
6610  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6611  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6612  // t2: sum of lengths.
6613  Label first_prepared, second_prepared;
6614  __ bind(&string_add_flat_result);
6615  if (flags_ != NO_STRING_ADD_FLAGS) {
6620  }
6621  // Check whether both strings have same encoding
6622  __ Xor(t3, t0, Operand(t1));
6623  __ And(t3, t3, Operand(kStringEncodingMask));
6624  __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
6625 
6627  __ And(t4, t0, Operand(kStringRepresentationMask));
6628 
6630  Label skip_first_add;
6631  __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
6632  __ Branch(USE_DELAY_SLOT, &first_prepared);
6634  __ bind(&skip_first_add);
6635  // External string: rule out short external string and load string resource.
6637  __ And(t4, t0, Operand(kShortExternalStringMask));
6638  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6640  __ bind(&first_prepared);
6641 
6643  __ And(t4, t1, Operand(kStringRepresentationMask));
6645  Label skip_second_add;
6646  __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
6647  __ Branch(USE_DELAY_SLOT, &second_prepared);
6649  __ bind(&skip_second_add);
6650  // External string: rule out short external string and load string resource.
6652  __ And(t4, t1, Operand(kShortExternalStringMask));
6653  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6655  __ bind(&second_prepared);
6656 
6657  Label non_ascii_string_add_flat_result;
6658  // t3: first character of first string
6659  // a1: first character of second string
6660  // a2: length of first string
6661  // a3: length of second string
6662  // t2: sum of lengths.
6663  // Both strings have the same encoding.
6665  __ And(t4, t1, Operand(kStringEncodingMask));
6666  __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
6667 
6668  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6669  __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6670  // v0: result string.
6671  // t3: first character of first string.
6672  // a1: first character of second string
6673  // a2: length of first string.
6674  // a3: length of second string.
6675  // t2: first character of result.
6676 
6677  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
6678  // t2: next character of result.
6679  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6680  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6681  __ DropAndRet(2);
6682 
6683  __ bind(&non_ascii_string_add_flat_result);
6684  __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6685  __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6686  // v0: result string.
6687  // t3: first character of first string.
6688  // a1: first character of second string.
6689  // a2: length of first string.
6690  // a3: length of second string.
6691  // t2: first character of result.
6692  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
6693  // t2: next character of result.
6694  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6695 
6696  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6697  __ DropAndRet(2);
6698 
6699  // Just jump to runtime to add the two strings.
6700  __ bind(&call_runtime);
6701  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6702 
6703  if (call_builtin.is_linked()) {
6704  __ bind(&call_builtin);
6705  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6706  }
6707 }
6708 
6709 
6710 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6711  int stack_offset,
6712  Register arg,
6713  Register scratch1,
6714  Register scratch2,
6715  Register scratch3,
6716  Register scratch4,
6717  Label* slow) {
6718  // First check if the argument is already a string.
6719  Label not_string, done;
6720  __ JumpIfSmi(arg, &not_string);
6721  __ GetObjectType(arg, scratch1, scratch1);
6722  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6723 
6724  // Check the number to string cache.
6725  Label not_cached;
6726  __ bind(&not_string);
6727  // Puts the cached result into scratch1.
6729  arg,
6730  scratch1,
6731  scratch2,
6732  scratch3,
6733  scratch4,
6734  false,
6735  &not_cached);
6736  __ mov(arg, scratch1);
6737  __ sw(arg, MemOperand(sp, stack_offset));
6738  __ jmp(&done);
6739 
6740  // Check if the argument is a safe string wrapper.
6741  __ bind(&not_cached);
6742  __ JumpIfSmi(arg, slow);
6743  __ GetObjectType(arg, scratch1, scratch2); // map -> scratch1.
6744  __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6745  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6746  __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6747  __ And(scratch2, scratch2, scratch4);
6748  __ Branch(slow, ne, scratch2, Operand(scratch4));
6749  __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6750  __ sw(arg, MemOperand(sp, stack_offset));
6751 
6752  __ bind(&done);
6753 }
6754 
6755 
6756 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6757  ASSERT(state_ == CompareIC::SMIS);
6758  Label miss;
6759  __ Or(a2, a1, a0);
6760  __ JumpIfNotSmi(a2, &miss);
6761 
6762  if (GetCondition() == eq) {
6763  // For equality we do not care about the sign of the result.
6764  __ Subu(v0, a0, a1);
6765  } else {
6766  // Untag before subtracting to avoid handling overflow.
6767  __ SmiUntag(a1);
6768  __ SmiUntag(a0);
6769  __ Subu(v0, a1, a0);
6770  }
6771  __ Ret();
6772 
6773  __ bind(&miss);
6774  GenerateMiss(masm);
6775 }
6776 
6777 
6778 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6779  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6780 
6781  Label generic_stub;
6782  Label unordered, maybe_undefined1, maybe_undefined2;
6783  Label miss;
6784  __ And(a2, a1, Operand(a0));
6785  __ JumpIfSmi(a2, &generic_stub);
6786 
6787  __ GetObjectType(a0, a2, a2);
6788  __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
6789  __ GetObjectType(a1, a2, a2);
6790  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6791 
6792  // Inlining the double comparison and falling back to the general compare
6793  // stub if NaN is involved or FPU is unsupported.
6795  CpuFeatures::Scope scope(FPU);
6796 
6797  // Load left and right operand.
6798  __ Subu(a2, a1, Operand(kHeapObjectTag));
6800  __ Subu(a2, a0, Operand(kHeapObjectTag));
6802 
6803  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6804  Label fpu_eq, fpu_lt;
6805  // Test if equal, and also handle the unordered/NaN case.
6806  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6807 
6808  // Test if less (unordered case is already handled).
6809  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6810 
6811  // Otherwise it's greater, so just fall thru, and return.
6812  __ li(v0, Operand(GREATER));
6813  __ Ret();
6814 
6815  __ bind(&fpu_eq);
6816  __ li(v0, Operand(EQUAL));
6817  __ Ret();
6818 
6819  __ bind(&fpu_lt);
6820  __ li(v0, Operand(LESS));
6821  __ Ret();
6822  }
6823 
6824  __ bind(&unordered);
6825 
6826  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6827  __ bind(&generic_stub);
6828  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6829 
6830  __ bind(&maybe_undefined1);
6832  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6833  __ Branch(&miss, ne, a0, Operand(at));
6834  __ GetObjectType(a1, a2, a2);
6835  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6836  __ jmp(&unordered);
6837  }
6838 
6839  __ bind(&maybe_undefined2);
6841  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6842  __ Branch(&unordered, eq, a1, Operand(at));
6843  }
6844 
6845  __ bind(&miss);
6846  GenerateMiss(masm);
6847 }
6848 
6849 
6850 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6851  ASSERT(state_ == CompareIC::SYMBOLS);
6852  Label miss;
6853 
6854  // Registers containing left and right operands respectively.
6855  Register left = a1;
6856  Register right = a0;
6857  Register tmp1 = a2;
6858  Register tmp2 = a3;
6859 
6860  // Check that both operands are heap objects.
6861  __ JumpIfEitherSmi(left, right, &miss);
6862 
6863  // Check that both operands are symbols.
6864  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6865  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6866  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6867  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6868  STATIC_ASSERT(kSymbolTag != 0);
6869  __ And(tmp1, tmp1, Operand(tmp2));
6870  __ And(tmp1, tmp1, kIsSymbolMask);
6871  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6872  // Make sure a0 is non-zero. At this point input operands are
6873  // guaranteed to be non-zero.
6874  ASSERT(right.is(a0));
6875  STATIC_ASSERT(EQUAL == 0);
6876  STATIC_ASSERT(kSmiTag == 0);
6877  __ mov(v0, right);
6878  // Symbols are compared by identity.
6879  __ Ret(ne, left, Operand(right));
6880  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6881  __ Ret();
6882 
6883  __ bind(&miss);
6884  GenerateMiss(masm);
6885 }
6886 
6887 
6888 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6889  ASSERT(state_ == CompareIC::STRINGS);
6890  Label miss;
6891 
6892  bool equality = Token::IsEqualityOp(op_);
6893 
6894  // Registers containing left and right operands respectively.
6895  Register left = a1;
6896  Register right = a0;
6897  Register tmp1 = a2;
6898  Register tmp2 = a3;
6899  Register tmp3 = t0;
6900  Register tmp4 = t1;
6901  Register tmp5 = t2;
6902 
6903  // Check that both operands are heap objects.
6904  __ JumpIfEitherSmi(left, right, &miss);
6905 
6906  // Check that both operands are strings. This leaves the instance
6907  // types loaded in tmp1 and tmp2.
6908  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6909  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6910  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6911  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6913  __ Or(tmp3, tmp1, tmp2);
6914  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6915  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6916 
6917  // Fast check for identical strings.
6918  Label left_ne_right;
6919  STATIC_ASSERT(EQUAL == 0);
6920  STATIC_ASSERT(kSmiTag == 0);
6921  __ Branch(&left_ne_right, ne, left, Operand(right));
6922  __ Ret(USE_DELAY_SLOT);
6923  __ mov(v0, zero_reg); // In the delay slot.
6924  __ bind(&left_ne_right);
6925 
6926  // Handle not identical strings.
6927 
6928  // Check that both strings are symbols. If they are, we're done
6929  // because we already know they are not identical.
6930  if (equality) {
6931  ASSERT(GetCondition() == eq);
6932  STATIC_ASSERT(kSymbolTag != 0);
6933  __ And(tmp3, tmp1, Operand(tmp2));
6934  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6935  Label is_symbol;
6936  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
6937  // Make sure a0 is non-zero. At this point input operands are
6938  // guaranteed to be non-zero.
6939  ASSERT(right.is(a0));
6940  __ Ret(USE_DELAY_SLOT);
6941  __ mov(v0, a0); // In the delay slot.
6942  __ bind(&is_symbol);
6943  }
6944 
6945  // Check that both strings are sequential ASCII.
6946  Label runtime;
6947  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6948  tmp1, tmp2, tmp3, tmp4, &runtime);
6949 
6950  // Compare flat ASCII strings. Returns when done.
6951  if (equality) {
6953  masm, left, right, tmp1, tmp2, tmp3);
6954  } else {
6956  masm, left, right, tmp1, tmp2, tmp3, tmp4);
6957  }
6958 
6959  // Handle more complex cases in runtime.
6960  __ bind(&runtime);
6961  __ Push(left, right);
6962  if (equality) {
6963  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6964  } else {
6965  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6966  }
6967 
6968  __ bind(&miss);
6969  GenerateMiss(masm);
6970 }
6971 
6972 
6973 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6974  ASSERT(state_ == CompareIC::OBJECTS);
6975  Label miss;
6976  __ And(a2, a1, Operand(a0));
6977  __ JumpIfSmi(a2, &miss);
6978 
6979  __ GetObjectType(a0, a2, a2);
6980  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6981  __ GetObjectType(a1, a2, a2);
6982  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6983 
6984  ASSERT(GetCondition() == eq);
6985  __ Ret(USE_DELAY_SLOT);
6986  __ subu(v0, a0, a1);
6987 
6988  __ bind(&miss);
6989  GenerateMiss(masm);
6990 }
6991 
6992 
6993 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6994  Label miss;
6995  __ And(a2, a1, a0);
6996  __ JumpIfSmi(a2, &miss);
6999  __ Branch(&miss, ne, a2, Operand(known_map_));
7000  __ Branch(&miss, ne, a3, Operand(known_map_));
7001 
7002  __ Ret(USE_DELAY_SLOT);
7003  __ subu(v0, a0, a1);
7004 
7005  __ bind(&miss);
7006  GenerateMiss(masm);
7007 }
7008 
7009 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7010  {
7011  // Call the runtime system in a fresh internal frame.
7012  ExternalReference miss =
7013  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7014  FrameScope scope(masm, StackFrame::INTERNAL);
7015  __ Push(a1, a0);
7016  __ push(ra);
7017  __ Push(a1, a0);
7018  __ li(t0, Operand(Smi::FromInt(op_)));
7019  __ addiu(sp, sp, -kPointerSize);
7020  __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
7021  __ sw(t0, MemOperand(sp)); // In the delay slot.
7022  // Compute the entry point of the rewritten stub.
7023  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
7024  // Restore registers.
7025  __ Pop(a1, a0, ra);
7026  }
7027  __ Jump(a2);
7028 }
7029 
7030 
7031 void DirectCEntryStub::Generate(MacroAssembler* masm) {
7032  // No need to pop or drop anything, LeaveExitFrame will restore the old
7033  // stack, thus dropping the allocated space for the return value.
7034  // The saved ra is after the reserved stack space for the 4 args.
7035  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
7036 
7037  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
7038  // In case of an error the return address may point to a memory area
7039  // filled with kZapValue by the GC.
7040  // Dereference the address and check for this.
7041  __ lw(t0, MemOperand(t9));
7042  __ Assert(ne, "Received invalid return address.", t0,
7043  Operand(reinterpret_cast<uint32_t>(kZapValue)));
7044  }
7045  __ Jump(t9);
7046 }
7047 
7048 
7049 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7050  ExternalReference function) {
7051  __ li(t9, Operand(function));
7052  this->GenerateCall(masm, t9);
7053 }
7054 
7055 
7056 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7057  Register target) {
7058  __ Move(t9, target);
7059  __ AssertStackIsAligned();
7060  // Allocate space for arg slots.
7061  __ Subu(sp, sp, kCArgsSlotsSize);
7062 
7063  // Block the trampoline pool through the whole function to make sure the
7064  // number of generated instructions is constant.
7065  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
7066 
7067  // We need to get the current 'pc' value, which is not available on MIPS.
7068  Label find_ra;
7069  masm->bal(&find_ra); // ra = pc + 8.
7070  masm->nop(); // Branch delay slot nop.
7071  masm->bind(&find_ra);
7072 
7073  const int kNumInstructionsToJump = 6;
7074  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7075  // Push return address (accessible to GC through exit frame pc).
7076  // This spot for ra was reserved in EnterExitFrame.
7077  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7078  masm->li(ra,
7079  Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7080  RelocInfo::CODE_TARGET),
7081  CONSTANT_SIZE);
7082  // Call the function.
7083  masm->Jump(t9);
7084  // Make sure the stored 'ra' points to this position.
7085  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7086 }
7087 
7088 
7089 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7090  Label* miss,
7091  Label* done,
7092  Register receiver,
7093  Register properties,
7094  Handle<String> name,
7095  Register scratch0) {
7096  // If names of slots in range from 1 to kProbes - 1 for the hash value are
7097  // not equal to the name and kProbes-th slot is not used (its name is the
7098  // undefined value), it guarantees the hash table doesn't contain the
7099  // property. It's true even if some slots represent deleted properties
7100  // (their names are the hole value).
7101  for (int i = 0; i < kInlinedProbes; i++) {
7102  // scratch0 points to properties hash.
7103  // Compute the masked index: (hash + i + i * i) & mask.
7104  Register index = scratch0;
7105  // Capacity is smi 2^n.
7106  __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7107  __ Subu(index, index, Operand(1));
7108  __ And(index, index, Operand(
7109  Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7110 
7111  // Scale the index by multiplying by the entry size.
7113  __ sll(at, index, 1);
7114  __ Addu(index, index, at);
7115 
7116  Register entity_name = scratch0;
7117  // Having undefined at this place means the name is not contained.
7118  ASSERT_EQ(kSmiTagSize, 1);
7119  Register tmp = properties;
7120  __ sll(scratch0, index, 1);
7121  __ Addu(tmp, properties, scratch0);
7122  __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7123 
7124  ASSERT(!tmp.is(entity_name));
7125  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7126  __ Branch(done, eq, entity_name, Operand(tmp));
7127 
7128  if (i != kInlinedProbes - 1) {
7129  // Load the hole ready for use below:
7130  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7131 
7132  // Stop if found the property.
7133  __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7134 
7135  Label the_hole;
7136  __ Branch(&the_hole, eq, entity_name, Operand(tmp));
7137 
7138  // Check if the entry name is not a symbol.
7139  __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7140  __ lbu(entity_name,
7142  __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7143  __ Branch(miss, eq, scratch0, Operand(zero_reg));
7144 
7145  __ bind(&the_hole);
7146 
7147  // Restore the properties.
7148  __ lw(properties,
7150  }
7151  }
7152 
7153  const int spill_mask =
7154  (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7155  a2.bit() | a1.bit() | a0.bit() | v0.bit());
7156 
7157  __ MultiPush(spill_mask);
7158  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7159  __ li(a1, Operand(Handle<String>(name)));
7161  __ CallStub(&stub);
7162  __ mov(at, v0);
7163  __ MultiPop(spill_mask);
7164 
7165  __ Branch(done, eq, at, Operand(zero_reg));
7166  __ Branch(miss, ne, at, Operand(zero_reg));
7167 }
7168 
7169 
7170 // Probe the string dictionary in the |elements| register. Jump to the
7171 // |done| label if a property with the given name is found. Jump to
7172 // the |miss| label otherwise.
7173 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
7174 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7175  Label* miss,
7176  Label* done,
7177  Register elements,
7178  Register name,
7179  Register scratch1,
7180  Register scratch2) {
7181  ASSERT(!elements.is(scratch1));
7182  ASSERT(!elements.is(scratch2));
7183  ASSERT(!name.is(scratch1));
7184  ASSERT(!name.is(scratch2));
7185 
7186  // Assert that name contains a string.
7187  if (FLAG_debug_code) __ AbortIfNotString(name);
7188 
7189  // Compute the capacity mask.
7190  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7191  __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
7192  __ Subu(scratch1, scratch1, Operand(1));
7193 
7194  // Generate an unrolled loop that performs a few probes before
7195  // giving up. Measurements done on Gmail indicate that 2 probes
7196  // cover ~93% of loads from dictionaries.
7197  for (int i = 0; i < kInlinedProbes; i++) {
7198  // Compute the masked index: (hash + i + i * i) & mask.
7199  __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7200  if (i > 0) {
7201  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7202  // the hash in a separate instruction. The value hash + i + i * i is right
7203  // shifted in the following and instruction.
7204  ASSERT(StringDictionary::GetProbeOffset(i) <
7205  1 << (32 - String::kHashFieldOffset));
7206  __ Addu(scratch2, scratch2, Operand(
7207  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7208  }
7209  __ srl(scratch2, scratch2, String::kHashShift);
7210  __ And(scratch2, scratch1, scratch2);
7211 
7212  // Scale the index by multiplying by the element size.
7214  // scratch2 = scratch2 * 3.
7215 
7216  __ sll(at, scratch2, 1);
7217  __ Addu(scratch2, scratch2, at);
7218 
7219  // Check if the key is identical to the name.
7220  __ sll(at, scratch2, 2);
7221  __ Addu(scratch2, elements, at);
7222  __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7223  __ Branch(done, eq, name, Operand(at));
7224  }
7225 
7226  const int spill_mask =
7227  (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7228  a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7229  ~(scratch1.bit() | scratch2.bit());
7230 
7231  __ MultiPush(spill_mask);
7232  if (name.is(a0)) {
7233  ASSERT(!elements.is(a1));
7234  __ Move(a1, name);
7235  __ Move(a0, elements);
7236  } else {
7237  __ Move(a0, elements);
7238  __ Move(a1, name);
7239  }
7241  __ CallStub(&stub);
7242  __ mov(scratch2, a2);
7243  __ mov(at, v0);
7244  __ MultiPop(spill_mask);
7245 
7246  __ Branch(done, ne, at, Operand(zero_reg));
7247  __ Branch(miss, eq, at, Operand(zero_reg));
7248 }
7249 
7250 
7251 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7252  // This stub overrides SometimesSetsUpAFrame() to return false. That means
7253  // we cannot call anything that could cause a GC from this stub.
7254  // Registers:
7255  // result: StringDictionary to probe
7256  // a1: key
7257  // : StringDictionary to probe.
7258  // index_: will hold an index of entry if lookup is successful.
7259  // might alias with result_.
7260  // Returns:
7261  // result_ is zero if lookup failed, non zero otherwise.
7262 
7263  Register result = v0;
7264  Register dictionary = a0;
7265  Register key = a1;
7266  Register index = a2;
7267  Register mask = a3;
7268  Register hash = t0;
7269  Register undefined = t1;
7270  Register entry_key = t2;
7271 
7272  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7273 
7274  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7275  __ sra(mask, mask, kSmiTagSize);
7276  __ Subu(mask, mask, Operand(1));
7277 
7279 
7280  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7281 
7282  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7283  // Compute the masked index: (hash + i + i * i) & mask.
7284  // Capacity is smi 2^n.
7285  if (i > 0) {
7286  // Add the probe offset (i + i * i) left shifted to avoid right shifting
7287  // the hash in a separate instruction. The value hash + i + i * i is right
7288  // shifted in the following and instruction.
7289  ASSERT(StringDictionary::GetProbeOffset(i) <
7290  1 << (32 - String::kHashFieldOffset));
7291  __ Addu(index, hash, Operand(
7292  StringDictionary::GetProbeOffset(i) << String::kHashShift));
7293  } else {
7294  __ mov(index, hash);
7295  }
7296  __ srl(index, index, String::kHashShift);
7297  __ And(index, mask, index);
7298 
7299  // Scale the index by multiplying by the entry size.
7301  // index *= 3.
7302  __ mov(at, index);
7303  __ sll(index, index, 1);
7304  __ Addu(index, index, at);
7305 
7306 
7307  ASSERT_EQ(kSmiTagSize, 1);
7308  __ sll(index, index, 2);
7309  __ Addu(index, index, dictionary);
7310  __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7311 
7312  // Having undefined at this place means the name is not contained.
7313  __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
7314 
7315  // Stop if found the property.
7316  __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7317 
7318  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7319  // Check if the entry name is not a symbol.
7320  __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7321  __ lbu(entry_key,
7323  __ And(result, entry_key, Operand(kIsSymbolMask));
7324  __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7325  }
7326  }
7327 
7328  __ bind(&maybe_in_dictionary);
7329  // If we are doing negative lookup then probing failure should be
7330  // treated as a lookup success. For positive lookup probing failure
7331  // should be treated as lookup failure.
7332  if (mode_ == POSITIVE_LOOKUP) {
7333  __ Ret(USE_DELAY_SLOT);
7334  __ mov(result, zero_reg);
7335  }
7336 
7337  __ bind(&in_dictionary);
7338  __ Ret(USE_DELAY_SLOT);
7339  __ li(result, 1);
7340 
7341  __ bind(&not_in_dictionary);
7342  __ Ret(USE_DELAY_SLOT);
7343  __ mov(result, zero_reg);
7344 }
7345 
7346 
7347 struct AheadOfTimeWriteBarrierStubList {
7348  Register object, value, address;
7349  RememberedSetAction action;
7350 };
7351 
7352 #define REG(Name) { kRegister_ ## Name ## _Code }
7353 
7354 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7355  // Used in RegExpExecStub.
7356  { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
7357  { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
7358  // Used in CompileArrayPushCall.
7359  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7360  // Also used in KeyedStoreIC::GenerateGeneric.
7361  { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
7362  // Used in CompileStoreGlobal.
7363  { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
7364  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7365  { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
7366  { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
7367  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7368  { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
7369  { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
7370  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7371  { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7372  { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7373  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7374  // and ElementsTransitionGenerator::GenerateSmiToDouble
7375  // and ElementsTransitionGenerator::GenerateDoubleToObject
7376  { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7377  { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7378  // ElementsTransitionGenerator::GenerateDoubleToObject
7379  { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
7380  { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
7381  // StoreArrayLiteralElementStub::Generate
7382  { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
7383  // Null termination.
7384  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7385 };
7386 
7387 #undef REG
7388 
7389 
7391  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7392  !entry->object.is(no_reg);
7393  entry++) {
7394  if (object_.is(entry->object) &&
7395  value_.is(entry->value) &&
7396  address_.is(entry->address) &&
7397  remembered_set_action_ == entry->action &&
7398  save_fp_regs_mode_ == kDontSaveFPRegs) {
7399  return true;
7400  }
7401  }
7402  return false;
7403 }
7404 
7405 
7407  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7408 }
7409 
7410 
7413  stub1.GetCode()->set_is_pregenerated(true);
7414 }
7415 
7416 
7418  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7419  !entry->object.is(no_reg);
7420  entry++) {
7421  RecordWriteStub stub(entry->object,
7422  entry->value,
7423  entry->address,
7424  entry->action,
7425  kDontSaveFPRegs);
7426  stub.GetCode()->set_is_pregenerated(true);
7427  }
7428 }
7429 
7430 
7431 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7432 // the value has just been written into the object, now this stub makes sure
7433 // we keep the GC informed. The word in the object where the value has been
7434 // written is in the address register.
7435 void RecordWriteStub::Generate(MacroAssembler* masm) {
7436  Label skip_to_incremental_noncompacting;
7437  Label skip_to_incremental_compacting;
7438 
7439  // The first two branch+nop instructions are generated with labels so as to
7440  // get the offset fixed up correctly by the bind(Label*) call. We patch it
7441  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7442  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7443  // incremental heap marking.
7444  // See RecordWriteStub::Patch for details.
7445  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7446  __ nop();
7447  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7448  __ nop();
7449 
7450  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7451  __ RememberedSetHelper(object_,
7452  address_,
7453  value_,
7454  save_fp_regs_mode_,
7456  }
7457  __ Ret();
7458 
7459  __ bind(&skip_to_incremental_noncompacting);
7460  GenerateIncremental(masm, INCREMENTAL);
7461 
7462  __ bind(&skip_to_incremental_compacting);
7463  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7464 
7465  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7466  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7467 
7468  PatchBranchIntoNop(masm, 0);
7470 }
7471 
7472 
7473 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7474  regs_.Save(masm);
7475 
7476  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7477  Label dont_need_remembered_set;
7478 
7479  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7480  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7481  regs_.scratch0(),
7482  &dont_need_remembered_set);
7483 
7484  __ CheckPageFlag(regs_.object(),
7485  regs_.scratch0(),
7487  ne,
7488  &dont_need_remembered_set);
7489 
7490  // First notify the incremental marker if necessary, then update the
7491  // remembered set.
7492  CheckNeedsToInformIncrementalMarker(
7493  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7494  InformIncrementalMarker(masm, mode);
7495  regs_.Restore(masm);
7496  __ RememberedSetHelper(object_,
7497  address_,
7498  value_,
7499  save_fp_regs_mode_,
7501 
7502  __ bind(&dont_need_remembered_set);
7503  }
7504 
7505  CheckNeedsToInformIncrementalMarker(
7506  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7507  InformIncrementalMarker(masm, mode);
7508  regs_.Restore(masm);
7509  __ Ret();
7510 }
7511 
7512 
7513 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7514  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7515  int argument_count = 3;
7516  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7517  Register address =
7518  a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7519  ASSERT(!address.is(regs_.object()));
7520  ASSERT(!address.is(a0));
7521  __ Move(address, regs_.address());
7522  __ Move(a0, regs_.object());
7523  if (mode == INCREMENTAL_COMPACTION) {
7524  __ Move(a1, address);
7525  } else {
7526  ASSERT(mode == INCREMENTAL);
7527  __ lw(a1, MemOperand(address, 0));
7528  }
7529  __ li(a2, Operand(ExternalReference::isolate_address()));
7530 
7531  AllowExternalCallThatCantCauseGC scope(masm);
7532  if (mode == INCREMENTAL_COMPACTION) {
7533  __ CallCFunction(
7534  ExternalReference::incremental_evacuation_record_write_function(
7535  masm->isolate()),
7536  argument_count);
7537  } else {
7538  ASSERT(mode == INCREMENTAL);
7539  __ CallCFunction(
7540  ExternalReference::incremental_marking_record_write_function(
7541  masm->isolate()),
7542  argument_count);
7543  }
7544  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7545 }
7546 
7547 
7548 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7549  MacroAssembler* masm,
7550  OnNoNeedToInformIncrementalMarker on_no_need,
7551  Mode mode) {
7552  Label on_black;
7553  Label need_incremental;
7554  Label need_incremental_pop_scratch;
7555 
7556  // Let's look at the color of the object: If it is not black we don't have
7557  // to inform the incremental marker.
7558  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7559 
7560  regs_.Restore(masm);
7561  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7562  __ RememberedSetHelper(object_,
7563  address_,
7564  value_,
7565  save_fp_regs_mode_,
7567  } else {
7568  __ Ret();
7569  }
7570 
7571  __ bind(&on_black);
7572 
7573  // Get the value from the slot.
7574  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7575 
7576  if (mode == INCREMENTAL_COMPACTION) {
7577  Label ensure_not_white;
7578 
7579  __ CheckPageFlag(regs_.scratch0(), // Contains value.
7580  regs_.scratch1(), // Scratch.
7582  eq,
7583  &ensure_not_white);
7584 
7585  __ CheckPageFlag(regs_.object(),
7586  regs_.scratch1(), // Scratch.
7588  eq,
7589  &need_incremental);
7590 
7591  __ bind(&ensure_not_white);
7592  }
7593 
7594  // We need extra registers for this, so we push the object and the address
7595  // register temporarily.
7596  __ Push(regs_.object(), regs_.address());
7597  __ EnsureNotWhite(regs_.scratch0(), // The value.
7598  regs_.scratch1(), // Scratch.
7599  regs_.object(), // Scratch.
7600  regs_.address(), // Scratch.
7601  &need_incremental_pop_scratch);
7602  __ Pop(regs_.object(), regs_.address());
7603 
7604  regs_.Restore(masm);
7605  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7606  __ RememberedSetHelper(object_,
7607  address_,
7608  value_,
7609  save_fp_regs_mode_,
7611  } else {
7612  __ Ret();
7613  }
7614 
7615  __ bind(&need_incremental_pop_scratch);
7616  __ Pop(regs_.object(), regs_.address());
7617 
7618  __ bind(&need_incremental);
7619 
7620  // Fall through when we need to inform the incremental marker.
7621 }
7622 
7623 
7624 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7625  // ----------- S t a t e -------------
7626  // -- a0 : element value to store
7627  // -- a1 : array literal
7628  // -- a2 : map of array literal
7629  // -- a3 : element index as smi
7630  // -- t0 : array literal index in function as smi
7631  // -----------------------------------
7632 
7633  Label element_done;
7634  Label double_elements;
7635  Label smi_element;
7636  Label slow_elements;
7637  Label fast_elements;
7638 
7639  __ CheckFastElements(a2, t1, &double_elements);
7640  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
7641  __ JumpIfSmi(a0, &smi_element);
7642  __ CheckFastSmiElements(a2, t1, &fast_elements);
7643 
7644  // Store into the array literal requires a elements transition. Call into
7645  // the runtime.
7646  __ bind(&slow_elements);
7647  // call.
7648  __ Push(a1, a3, a0);
7651  __ Push(t1, t0);
7652  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7653 
7654  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7655  __ bind(&fast_elements);
7657  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7658  __ Addu(t2, t1, t2);
7659  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7660  __ sw(a0, MemOperand(t2, 0));
7661  // Update the write barrier for the array store.
7662  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7664  __ Ret(USE_DELAY_SLOT);
7665  __ mov(v0, a0);
7666 
7667  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7668  // and value is Smi.
7669  __ bind(&smi_element);
7671  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7672  __ Addu(t2, t1, t2);
7674  __ Ret(USE_DELAY_SLOT);
7675  __ mov(v0, a0);
7676 
7677  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
7678  __ bind(&double_elements);
7680  __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
7681  &slow_elements);
7682  __ Ret(USE_DELAY_SLOT);
7683  __ mov(v0, a0);
7684 }
7685 
7686 
7687 #undef __
7688 
7689 } } // namespace v8::internal
7690 
7691 #endif // V8_TARGET_ARCH_MIPS
bool FLAG_enable_slow_asserts
static const int kResourceDataOffset
Definition: objects.h:7517
const FPURegister f4
const Register cp
void GenerateFast(MacroAssembler *masm)
const SwVfpRegister s2
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:4994
const int kCArgsSlotsSize
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:3855
static const int kCodeOffset
Definition: objects.h:5606
static const int kEvacuationCandidateMask
Definition: spaces.h:407
#define CHECK_EQ(expected, value)
Definition: checks.h:219
static void LoadNumberAsInt32Double(MacroAssembler *masm, Register object, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register heap_number_map, Register scratch1, Register scratch2, SwVfpRegister single_scratch, Label *not_int32)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:408
const FPURegister f11
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const int kStaticOffsetsVectorSize
Definition: jsregexp.h:1649
static const int kArgumentsObjectSize
Definition: heap.h:863
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:450
const int kFailureTypeTagSize
Definition: objects.h:1037
static const uint32_t kExponentMask
Definition: objects.h:1317
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2220
static Failure * InternalError()
Definition: objects-inl.h:1011
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
const FPURegister f0
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:973
void Generate(MacroAssembler *masm)
static void DoubleIs32BitInteger(MacroAssembler *masm, Register src1, Register src2, Register dst, Register scratch, Label *not_int32)
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
const FPURegister f22
static const int kDataOffset
Definition: objects.h:6432
static const int kGlobalReceiverOffset
Definition: objects.h:6085
const FPURegister f10
const int kNumCalleeSavedFPU
Definition: frames-mips.h:87
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, int flags)
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1021
static void LoadNumberAsInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
Flag flags[]
Definition: flags.cc:1467
static const int kExponentBias
Definition: objects.h:1321
int int32_t
Definition: unicode.cc:47
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5052
static bool IsSupported(CpuFeature f)
static Failure * Exception()
Definition: objects-inl.h:1016
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:270
static void LoadOperands(MacroAssembler *masm, FloatingPointHelper::Destination destination, Register heap_number_map, Register scratch1, Register scratch2, Label *not_number)
const RegList kJSCallerSaved
Definition: frames-arm.h:47
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInstanceSizeOffset
Definition: objects.h:4981
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5057
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2285
const uint32_t kStringRepresentationMask
Definition: objects.h:455
MemOperand GlobalObjectOperand()
static const int kSize
Definition: objects.h:8134
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
static const int kGlobalContextOffset
Definition: objects.h:6084
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
const uint32_t kAsciiDataHintTag
Definition: objects.h:479
const uint32_t kShortExternalStringMask
Definition: objects.h:483
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:152
const int kIntSize
Definition: globals.h:231
static const int kZeroHash
Definition: objects.h:6816
#define V8_INFINITY
Definition: globals.h:32
const RegList kCalleeSavedFPU
Definition: frames-mips.h:79
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7099
static const int kSize
Definition: objects.h:8112
static const int kLastCaptureCountOffset
Definition: jsregexp.h:150
const RegList kCallerSavedFPU
Definition: frames-mips.h:89
static const int kFirstOffset
Definition: objects.h:7420
static const int kMinLength
Definition: objects.h:7433
const uint32_t kNotStringTag
Definition: objects.h:438
const Register sp
static const int kParentOffset
Definition: objects.h:7473
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1324
static const int kLiteralsOffset
Definition: objects.h:5987
#define UNREACHABLE()
Definition: checks.h:50
DwVfpRegister DoubleRegister
static const int kArgumentsObjectSizeStrict
Definition: heap.h:866
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
const uint32_t kIsSymbolMask
Definition: objects.h:443
static const int kExponentShift
Definition: objects.h:1322
const intptr_t kFailureTagMask
Definition: v8globals.h:73
static const int kValueOffset
Definition: objects.h:1307
const int kFailureTagSize
Definition: v8globals.h:72
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:232
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6478
static const int kInputOffset
Definition: objects.h:8133
static bool IsBitOp(Value op)
Definition: token.h:256
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
const bool IsMipsSoftFloatABI
void Generate(MacroAssembler *masm)
const int kPointerSize
Definition: globals.h:234
static void LoadSmis(MacroAssembler *masm, Destination destination, Register scratch1, Register scratch2)
static void CallCCodeForDoubleOperation(MacroAssembler *masm, Token::Value op, Register heap_number_result, Register scratch)
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5011
const Address kZapValue
Definition: v8globals.h:89
const int kHeapObjectTag
Definition: v8.h:3848
const RegList kCalleeSaved
Definition: frames-arm.h:63
const uint32_t kAsciiDataHintMask
Definition: objects.h:478
#define __
static void ConvertNumberToInt32(MacroAssembler *masm, Register object, Register dst, Register heap_number_map, Register scratch1, Register scratch2, Register scratch3, DwVfpRegister double_scratch, Label *not_int32)
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2113
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
static const int kMinLength
Definition: objects.h:7485
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:484
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7282
static const int kNextFunctionLinkOffset
Definition: objects.h:5989
void Generate(MacroAssembler *masm)
const int kBitsPerByte
Definition: globals.h:251
static int SizeFor(int length)
Definition: objects.h:2369
static const int kElementsOffset
Definition: objects.h:2114
bool IsPowerOf2(T x)
Definition: utils.h:50
const FPURegister f2
const uint32_t kStringTag
Definition: objects.h:437
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7474
friend class BlockTrampolinePoolScope
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
const SwVfpRegister s1
static const int kLastMatchOverhead
Definition: jsregexp.h:147
static const int kHeaderSize
Definition: objects.h:2233
const intptr_t kPointerAlignmentMask
Definition: v8globals.h:49
void Generate(MacroAssembler *masm)
#define ISOLATE
Definition: isolate.h:1410
void GenerateCall(MacroAssembler *masm, ExternalReference function)
static const int kMapOffset
Definition: objects.h:1219
static const int kMantissaBitsInTopWord
Definition: objects.h:1323
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:410
const uint32_t kIsNotStringMask
Definition: objects.h:436
const int kNumCalleeSaved
Definition: frames-arm.h:83
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
static void ConvertIntToDouble(MacroAssembler *masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, Register dst1, Register dst2, Register scratch2, SwVfpRegister single_scratch)
static const int kSize
Definition: objects.h:1315
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:7421
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kCallerFPOffset
Definition: frames-arm.h:117
static const int kArgumentsLengthIndex
Definition: heap.h:869
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
Definition: v8globals.h:44
static const int kFirstCaptureOffset
Definition: jsregexp.h:156
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7125
static const uint32_t kSignMask
Definition: objects.h:1316
static const int kLastInputOffset
Definition: jsregexp.h:154
const int kSmiShiftSize
Definition: v8.h:3899
const int kSmiTagSize
Definition: v8.h:3854
static const int kHeaderSize
Definition: objects.h:4513
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6474
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void GenerateAheadOfTime()
static const int kArgumentsCalleeIndex
Definition: heap.h:871
const int kSmiTag
Definition: v8.h:3853
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
const FPURegister f12
void Generate(MacroAssembler *masm)
const FPURegister f6
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:71
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kInstrSize
static const int kDataTagOffset
Definition: objects.h:6472
static const int kPrototypeOffset
Definition: objects.h:4953
static const int kSize
Definition: objects.h:5990
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
static const int kMaxLength
Definition: objects.h:7166
static const int kValueOffset
Definition: objects.h:6188
bool Contains(Type type) const
Definition: code-stubs.h:1050
const uint32_t kSymbolTag
Definition: objects.h:445
const Register fp
const uint32_t kAsciiStringTag
Definition: objects.h:451
static const int kConstructStubOffset
Definition: objects.h:5608
static const int kExponentBits
Definition: objects.h:1320
static const int kHashShift
Definition: objects.h:7121
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
const FPURegister f14
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:4995
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:629
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1313
static const int kDataUC16CodeOffset
Definition: objects.h:6476
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
FlagType type() const
Definition: flags.cc:1358
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
static const int kIndexOffset
Definition: objects.h:8132
const FPURegister f8
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
Definition: objects.h:1312
void Generate(MacroAssembler *masm)