v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-mips.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_MIPS
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "codegen.h"
35 #include "regexp-macro-assembler.h"
36 #include "stub-cache.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 
43  Isolate* isolate,
44  CodeStubInterfaceDescriptor* descriptor) {
45  static Register registers[] = { a2 };
46  descriptor->register_param_count_ = 1;
47  descriptor->register_params_ = registers;
48  descriptor->deoptimization_handler_ =
49  Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
50 }
51 
52 
53 void FastNewContextStub::InitializeInterfaceDescriptor(
54  Isolate* isolate,
55  CodeStubInterfaceDescriptor* descriptor) {
56  static Register registers[] = { a1 };
57  descriptor->register_param_count_ = 1;
58  descriptor->register_params_ = registers;
59  descriptor->deoptimization_handler_ = NULL;
60 }
61 
62 
64  Isolate* isolate,
65  CodeStubInterfaceDescriptor* descriptor) {
66  static Register registers[] = { a0 };
67  descriptor->register_param_count_ = 1;
68  descriptor->register_params_ = registers;
69  descriptor->deoptimization_handler_ = NULL;
70 }
71 
72 
73 void NumberToStringStub::InitializeInterfaceDescriptor(
74  Isolate* isolate,
75  CodeStubInterfaceDescriptor* descriptor) {
76  static Register registers[] = { a0 };
77  descriptor->register_param_count_ = 1;
78  descriptor->register_params_ = registers;
79  descriptor->deoptimization_handler_ =
80  Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
81 }
82 
83 
85  Isolate* isolate,
86  CodeStubInterfaceDescriptor* descriptor) {
87  static Register registers[] = { a3, a2, a1 };
88  descriptor->register_param_count_ = 3;
89  descriptor->register_params_ = registers;
90  descriptor->deoptimization_handler_ =
92  Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
93 }
94 
95 
97  Isolate* isolate,
98  CodeStubInterfaceDescriptor* descriptor) {
99  static Register registers[] = { a3, a2, a1, a0 };
100  descriptor->register_param_count_ = 4;
101  descriptor->register_params_ = registers;
102  descriptor->deoptimization_handler_ =
103  Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
104 }
105 
106 
108  Isolate* isolate,
109  CodeStubInterfaceDescriptor* descriptor) {
110  static Register registers[] = { a2, a3 };
111  descriptor->register_param_count_ = 2;
112  descriptor->register_params_ = registers;
113  descriptor->deoptimization_handler_ = NULL;
114 }
115 
116 
118  Isolate* isolate,
119  CodeStubInterfaceDescriptor* descriptor) {
120  static Register registers[] = { a1, a0 };
121  descriptor->register_param_count_ = 2;
122  descriptor->register_params_ = registers;
123  descriptor->deoptimization_handler_ =
124  FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
125 }
126 
127 
129  Isolate* isolate,
130  CodeStubInterfaceDescriptor* descriptor) {
131  static Register registers[] = {a1, a0 };
132  descriptor->register_param_count_ = 2;
133  descriptor->register_params_ = registers;
134  descriptor->deoptimization_handler_ =
135  FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
136 }
137 
138 
139 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
140  Isolate* isolate,
141  CodeStubInterfaceDescriptor* descriptor) {
142  static Register registers[] = { a2, a1, a0 };
143  descriptor->register_param_count_ = 3;
144  descriptor->register_params_ = registers;
145  descriptor->deoptimization_handler_ =
146  Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
147 }
148 
149 
151  Isolate* isolate,
152  CodeStubInterfaceDescriptor* descriptor) {
153  static Register registers[] = { a0 };
154  descriptor->register_param_count_ = 1;
155  descriptor->register_params_ = registers;
156  descriptor->deoptimization_handler_ = NULL;
157 }
158 
159 
161  Isolate* isolate,
162  CodeStubInterfaceDescriptor* descriptor) {
163  static Register registers[] = { a1 };
164  descriptor->register_param_count_ = 1;
165  descriptor->register_params_ = registers;
166  descriptor->deoptimization_handler_ = NULL;
167 }
168 
169 
171  Isolate* isolate,
172  CodeStubInterfaceDescriptor* descriptor) {
173  static Register registers[] = { a0, a2 };
174  descriptor->register_param_count_ = 2;
175  descriptor->register_params_ = registers;
176  descriptor->deoptimization_handler_ = NULL;
177 }
178 
179 
181  Isolate* isolate,
182  CodeStubInterfaceDescriptor* descriptor) {
183  static Register registers[] = { a1, a0 };
184  descriptor->register_param_count_ = 2;
185  descriptor->register_params_ = registers;
186  descriptor->deoptimization_handler_ = NULL;
187 }
188 
189 
191  Isolate* isolate,
192  CodeStubInterfaceDescriptor* descriptor) {
193  static Register registers[] = { a2, a1, a0 };
194  descriptor->register_param_count_ = 3;
195  descriptor->register_params_ = registers;
196  descriptor->deoptimization_handler_ =
197  FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
198 }
199 
200 
202  Isolate* isolate,
203  CodeStubInterfaceDescriptor* descriptor) {
204  static Register registers[] = { a0, a1 };
205  descriptor->register_param_count_ = 2;
206  descriptor->register_params_ = registers;
207  Address entry =
208  Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
209  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
210 }
211 
212 
214  Isolate* isolate,
215  CodeStubInterfaceDescriptor* descriptor) {
216  static Register registers[] = { a0 };
217  descriptor->register_param_count_ = 1;
218  descriptor->register_params_ = registers;
219  descriptor->deoptimization_handler_ =
220  FUNCTION_ADDR(CompareNilIC_Miss);
221  descriptor->SetMissHandler(
222  ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
223 }
224 
225 
226 static void InitializeArrayConstructorDescriptor(
227  Isolate* isolate,
228  CodeStubInterfaceDescriptor* descriptor,
229  int constant_stack_parameter_count) {
230  // register state
231  // a0 -- number of arguments
232  // a1 -- function
233  // a2 -- allocation site with elements kind
234  static Register registers_variable_args[] = { a1, a2, a0 };
235  static Register registers_no_args[] = { a1, a2 };
236 
237  if (constant_stack_parameter_count == 0) {
238  descriptor->register_param_count_ = 2;
239  descriptor->register_params_ = registers_no_args;
240  } else {
241  // stack param count needs (constructor pointer, and single argument)
242  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
243  descriptor->stack_parameter_count_ = a0;
244  descriptor->register_param_count_ = 3;
245  descriptor->register_params_ = registers_variable_args;
246  }
247 
248  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
249  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
250  descriptor->deoptimization_handler_ =
251  Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
252 }
253 
254 
255 static void InitializeInternalArrayConstructorDescriptor(
256  Isolate* isolate,
257  CodeStubInterfaceDescriptor* descriptor,
258  int constant_stack_parameter_count) {
259  // register state
260  // a0 -- number of arguments
261  // a1 -- constructor function
262  static Register registers_variable_args[] = { a1, a0 };
263  static Register registers_no_args[] = { a1 };
264 
265  if (constant_stack_parameter_count == 0) {
266  descriptor->register_param_count_ = 1;
267  descriptor->register_params_ = registers_no_args;
268  } else {
269  // stack param count needs (constructor pointer, and single argument)
270  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
271  descriptor->stack_parameter_count_ = a0;
272  descriptor->register_param_count_ = 2;
273  descriptor->register_params_ = registers_variable_args;
274  }
275 
276  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
277  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
278  descriptor->deoptimization_handler_ =
279  Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
280 }
281 
282 
284  Isolate* isolate,
285  CodeStubInterfaceDescriptor* descriptor) {
286  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
287 }
288 
289 
291  Isolate* isolate,
292  CodeStubInterfaceDescriptor* descriptor) {
293  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
294 }
295 
296 
298  Isolate* isolate,
299  CodeStubInterfaceDescriptor* descriptor) {
300  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
301 }
302 
303 
305  Isolate* isolate,
306  CodeStubInterfaceDescriptor* descriptor) {
307  static Register registers[] = { a0 };
308  descriptor->register_param_count_ = 1;
309  descriptor->register_params_ = registers;
310  descriptor->deoptimization_handler_ =
311  FUNCTION_ADDR(ToBooleanIC_Miss);
312  descriptor->SetMissHandler(
313  ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
314 }
315 
316 
318  Isolate* isolate,
319  CodeStubInterfaceDescriptor* descriptor) {
320  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
321 }
322 
323 
325  Isolate* isolate,
326  CodeStubInterfaceDescriptor* descriptor) {
327  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
328 }
329 
330 
332  Isolate* isolate,
333  CodeStubInterfaceDescriptor* descriptor) {
334  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
335 }
336 
337 
339  Isolate* isolate,
340  CodeStubInterfaceDescriptor* descriptor) {
341  static Register registers[] = { a1, a2, a0 };
342  descriptor->register_param_count_ = 3;
343  descriptor->register_params_ = registers;
344  descriptor->deoptimization_handler_ =
345  FUNCTION_ADDR(StoreIC_MissFromStubFailure);
346 }
347 
348 
350  Isolate* isolate,
351  CodeStubInterfaceDescriptor* descriptor) {
352  static Register registers[] = { a0, a3, a1, a2 };
353  descriptor->register_param_count_ = 4;
354  descriptor->register_params_ = registers;
355  descriptor->deoptimization_handler_ =
356  FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
357 }
358 
359 
361  Isolate* isolate,
362  CodeStubInterfaceDescriptor* descriptor) {
363  static Register registers[] = { a1, a0 };
364  descriptor->register_param_count_ = 2;
365  descriptor->register_params_ = registers;
366  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
367  descriptor->SetMissHandler(
368  ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
369 }
370 
371 
372 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
373  Isolate* isolate,
374  CodeStubInterfaceDescriptor* descriptor) {
375  static Register registers[] = { a2, a1, a0 };
376  descriptor->register_param_count_ = 3;
377  descriptor->register_params_ = registers;
378  descriptor->deoptimization_handler_ =
379  FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
380 }
381 
382 
383 void StringAddStub::InitializeInterfaceDescriptor(
384  Isolate* isolate,
385  CodeStubInterfaceDescriptor* descriptor) {
386  static Register registers[] = { a1, a0 };
387  descriptor->register_param_count_ = 2;
388  descriptor->register_params_ = registers;
389  descriptor->deoptimization_handler_ =
390  Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
391 }
392 
393 
394 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
395  {
396  CallInterfaceDescriptor* descriptor =
397  isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
398  static Register registers[] = { a1, // JSFunction
399  cp, // context
400  a0, // actual number of arguments
401  a2, // expected number of arguments
402  };
403  static Representation representations[] = {
404  Representation::Tagged(), // JSFunction
405  Representation::Tagged(), // context
406  Representation::Integer32(), // actual number of arguments
407  Representation::Integer32(), // expected number of arguments
408  };
409  descriptor->register_param_count_ = 4;
410  descriptor->register_params_ = registers;
411  descriptor->param_representations_ = representations;
412  }
413  {
414  CallInterfaceDescriptor* descriptor =
415  isolate->call_descriptor(Isolate::KeyedCall);
416  static Register registers[] = { cp, // context
417  a2, // key
418  };
419  static Representation representations[] = {
420  Representation::Tagged(), // context
421  Representation::Tagged(), // key
422  };
423  descriptor->register_param_count_ = 2;
424  descriptor->register_params_ = registers;
425  descriptor->param_representations_ = representations;
426  }
427  {
428  CallInterfaceDescriptor* descriptor =
429  isolate->call_descriptor(Isolate::NamedCall);
430  static Register registers[] = { cp, // context
431  a2, // name
432  };
433  static Representation representations[] = {
434  Representation::Tagged(), // context
435  Representation::Tagged(), // name
436  };
437  descriptor->register_param_count_ = 2;
438  descriptor->register_params_ = registers;
439  descriptor->param_representations_ = representations;
440  }
441  {
442  CallInterfaceDescriptor* descriptor =
443  isolate->call_descriptor(Isolate::CallHandler);
444  static Register registers[] = { cp, // context
445  a0, // receiver
446  };
447  static Representation representations[] = {
448  Representation::Tagged(), // context
449  Representation::Tagged(), // receiver
450  };
451  descriptor->register_param_count_ = 2;
452  descriptor->register_params_ = registers;
453  descriptor->param_representations_ = representations;
454  }
455  {
456  CallInterfaceDescriptor* descriptor =
457  isolate->call_descriptor(Isolate::ApiFunctionCall);
458  static Register registers[] = { a0, // callee
459  t0, // call_data
460  a2, // holder
461  a1, // api_function_address
462  cp, // context
463  };
464  static Representation representations[] = {
465  Representation::Tagged(), // callee
466  Representation::Tagged(), // call_data
467  Representation::Tagged(), // holder
468  Representation::External(), // api_function_address
469  Representation::Tagged(), // context
470  };
471  descriptor->register_param_count_ = 5;
472  descriptor->register_params_ = registers;
473  descriptor->param_representations_ = representations;
474  }
475 }
476 
477 
478 #define __ ACCESS_MASM(masm)
479 
480 
481 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
482  Label* slow,
483  Condition cc);
484 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
485  Register lhs,
486  Register rhs,
487  Label* rhs_not_nan,
488  Label* slow,
489  bool strict);
490 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
491  Register lhs,
492  Register rhs);
493 
494 
495 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
496  // Update the static counter each time a new code stub is generated.
497  Isolate* isolate = masm->isolate();
498  isolate->counters()->code_stubs()->Increment();
499 
500  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
501  int param_count = descriptor->register_param_count_;
502  {
503  // Call the runtime system in a fresh internal frame.
504  FrameScope scope(masm, StackFrame::INTERNAL);
505  ASSERT(descriptor->register_param_count_ == 0 ||
506  a0.is(descriptor->register_params_[param_count - 1]));
507  // Push arguments, adjust sp.
508  __ Subu(sp, sp, Operand(param_count * kPointerSize));
509  for (int i = 0; i < param_count; ++i) {
510  // Store argument to stack.
511  __ sw(descriptor->register_params_[i],
512  MemOperand(sp, (param_count-1-i) * kPointerSize));
513  }
514  ExternalReference miss = descriptor->miss_handler();
515  __ CallExternalReference(miss, descriptor->register_param_count_);
516  }
517 
518  __ Ret();
519 }
520 
521 
522 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
523 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
524 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
525 // scratch register. Destroys the source register. No GC occurs during this
526 // stub so you don't have to set up the frame.
527 class ConvertToDoubleStub : public PlatformCodeStub {
528  public:
529  ConvertToDoubleStub(Register result_reg_1,
530  Register result_reg_2,
531  Register source_reg,
532  Register scratch_reg)
533  : result1_(result_reg_1),
534  result2_(result_reg_2),
535  source_(source_reg),
536  zeros_(scratch_reg) { }
537 
538  private:
539  Register result1_;
540  Register result2_;
541  Register source_;
542  Register zeros_;
543 
544  // Minor key encoding in 16 bits.
545  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
546  class OpBits: public BitField<Token::Value, 2, 14> {};
547 
548  Major MajorKey() { return ConvertToDouble; }
549  int MinorKey() {
550  // Encode the parameters in a unique 16 bit value.
551  return result1_.code() +
552  (result2_.code() << 4) +
553  (source_.code() << 8) +
554  (zeros_.code() << 12);
555  }
556 
557  void Generate(MacroAssembler* masm);
558 };
559 
560 
561 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
562 #ifndef BIG_ENDIAN_FLOATING_POINT
563  Register exponent = result1_;
564  Register mantissa = result2_;
565 #else
566  Register exponent = result2_;
567  Register mantissa = result1_;
568 #endif
569  Label not_special;
570  // Convert from Smi to integer.
571  __ sra(source_, source_, kSmiTagSize);
572  // Move sign bit from source to destination. This works because the sign bit
573  // in the exponent word of the double has the same position and polarity as
574  // the 2's complement sign bit in a Smi.
575  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
576  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
577  // Subtract from 0 if source was negative.
578  __ subu(at, zero_reg, source_);
579  __ Movn(source_, at, exponent);
580 
581  // We have -1, 0 or 1, which we treat specially. Register source_ contains
582  // absolute value: it is either equal to 1 (special case of -1 and 1),
583  // greater than 1 (not a special case) or less than 1 (special case of 0).
584  __ Branch(&not_special, gt, source_, Operand(1));
585 
586  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
587  const uint32_t exponent_word_for_1 =
589  // Safe to use 'at' as dest reg here.
590  __ Or(at, exponent, Operand(exponent_word_for_1));
591  __ Movn(exponent, at, source_); // Write exp when source not 0.
592  // 1, 0 and -1 all have 0 for the second word.
593  __ Ret(USE_DELAY_SLOT);
594  __ mov(mantissa, zero_reg);
595 
596  __ bind(&not_special);
597  // Count leading zeros.
598  // Gets the wrong answer for 0, but we already checked for that case above.
599  __ Clz(zeros_, source_);
600  // Compute exponent and or it into the exponent register.
601  // We use mantissa as a scratch register here.
602  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
603  __ subu(mantissa, mantissa, zeros_);
604  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
605  __ Or(exponent, exponent, mantissa);
606 
607  // Shift up the source chopping the top bit off.
608  __ Addu(zeros_, zeros_, Operand(1));
609  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
610  __ sllv(source_, source_, zeros_);
611  // Compute lower part of fraction (last 12 bits).
612  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
613  // And the top (top 20 bits).
614  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
615 
616  __ Ret(USE_DELAY_SLOT);
617  __ or_(exponent, exponent, source_);
618 }
619 
620 
621 void DoubleToIStub::Generate(MacroAssembler* masm) {
622  Label out_of_range, only_low, negate, done;
623  Register input_reg = source();
624  Register result_reg = destination();
625 
626  int double_offset = offset();
627  // Account for saved regs if input is sp.
628  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
629 
630  Register scratch =
631  GetRegisterThatIsNotOneOf(input_reg, result_reg);
632  Register scratch2 =
633  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
634  Register scratch3 =
635  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
636  DoubleRegister double_scratch = kLithiumScratchDouble;
637 
638  __ Push(scratch, scratch2, scratch3);
639 
640  if (!skip_fastpath()) {
641  // Load double input.
642  __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
643 
644  // Clear cumulative exception flags and save the FCSR.
645  __ cfc1(scratch2, FCSR);
646  __ ctc1(zero_reg, FCSR);
647 
648  // Try a conversion to a signed integer.
649  __ Trunc_w_d(double_scratch, double_scratch);
650  // Move the converted value into the result register.
651  __ mfc1(scratch3, double_scratch);
652 
653  // Retrieve and restore the FCSR.
654  __ cfc1(scratch, FCSR);
655  __ ctc1(scratch2, FCSR);
656 
657  // Check for overflow and NaNs.
658  __ And(
659  scratch, scratch,
662  // If we had no exceptions then set result_reg and we are done.
663  Label error;
664  __ Branch(&error, ne, scratch, Operand(zero_reg));
665  __ Move(result_reg, scratch3);
666  __ Branch(&done);
667  __ bind(&error);
668  }
669 
670  // Load the double value and perform a manual truncation.
671  Register input_high = scratch2;
672  Register input_low = scratch3;
673 
674  __ lw(input_low, MemOperand(input_reg, double_offset));
675  __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
676 
677  Label normal_exponent, restore_sign;
678  // Extract the biased exponent in result.
679  __ Ext(result_reg,
680  input_high,
681  HeapNumber::kExponentShift,
683 
684  // Check for Infinity and NaNs, which should return 0.
685  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
686  __ Movz(result_reg, zero_reg, scratch);
687  __ Branch(&done, eq, scratch, Operand(zero_reg));
688 
689  // Express exponent as delta to (number of mantissa bits + 31).
690  __ Subu(result_reg,
691  result_reg,
693 
694  // If the delta is strictly positive, all bits would be shifted away,
695  // which means that we can return 0.
696  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
697  __ mov(result_reg, zero_reg);
698  __ Branch(&done);
699 
700  __ bind(&normal_exponent);
701  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
702  // Calculate shift.
703  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
704 
705  // Save the sign.
706  Register sign = result_reg;
707  result_reg = no_reg;
708  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
709 
710  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
711  // to check for this specific case.
712  Label high_shift_needed, high_shift_done;
713  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
714  __ mov(input_high, zero_reg);
715  __ Branch(&high_shift_done);
716  __ bind(&high_shift_needed);
717 
718  // Set the implicit 1 before the mantissa part in input_high.
719  __ Or(input_high,
720  input_high,
721  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
722  // Shift the mantissa bits to the correct position.
723  // We don't need to clear non-mantissa bits as they will be shifted away.
724  // If they weren't, it would mean that the answer is in the 32bit range.
725  __ sllv(input_high, input_high, scratch);
726 
727  __ bind(&high_shift_done);
728 
729  // Replace the shifted bits with bits from the lower mantissa word.
730  Label pos_shift, shift_done;
731  __ li(at, 32);
732  __ subu(scratch, at, scratch);
733  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
734 
735  // Negate scratch.
736  __ Subu(scratch, zero_reg, scratch);
737  __ sllv(input_low, input_low, scratch);
738  __ Branch(&shift_done);
739 
740  __ bind(&pos_shift);
741  __ srlv(input_low, input_low, scratch);
742 
743  __ bind(&shift_done);
744  __ Or(input_high, input_high, Operand(input_low));
745  // Restore sign if necessary.
746  __ mov(scratch, sign);
747  result_reg = sign;
748  sign = no_reg;
749  __ Subu(result_reg, zero_reg, input_high);
750  __ Movz(result_reg, input_high, scratch);
751 
752  __ bind(&done);
753 
754  __ Pop(scratch, scratch2, scratch3);
755  __ Ret();
756 }
757 
758 
760  Isolate* isolate) {
761  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
762  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
763  stub1.GetCode(isolate);
764  stub2.GetCode(isolate);
765 }
766 
767 
768 // See comment for class, this does NOT work for int32's that are in Smi range.
769 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
770  Label max_negative_int;
771  // the_int_ has the answer which is a signed int32 but not a Smi.
772  // We test for the special value that has a different exponent.
773  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
774  // Test sign, and save for later conditionals.
775  __ And(sign_, the_int_, Operand(0x80000000u));
776  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
777 
778  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
779  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
780  uint32_t non_smi_exponent =
781  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
782  __ li(scratch_, Operand(non_smi_exponent));
783  // Set the sign bit in scratch_ if the value was negative.
784  __ or_(scratch_, scratch_, sign_);
785  // Subtract from 0 if the value was negative.
786  __ subu(at, zero_reg, the_int_);
787  __ Movn(the_int_, at, sign_);
788  // We should be masking the implict first digit of the mantissa away here,
789  // but it just ends up combining harmlessly with the last digit of the
790  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
791  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
792  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
793  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
794  __ srl(at, the_int_, shift_distance);
795  __ or_(scratch_, scratch_, at);
796  __ sw(scratch_, FieldMemOperand(the_heap_number_,
798  __ sll(scratch_, the_int_, 32 - shift_distance);
799  __ Ret(USE_DELAY_SLOT);
800  __ sw(scratch_, FieldMemOperand(the_heap_number_,
802 
803  __ bind(&max_negative_int);
804  // The max negative int32 is stored as a positive number in the mantissa of
805  // a double because it uses a sign bit instead of using two's complement.
806  // The actual mantissa bits stored are all 0 because the implicit most
807  // significant 1 bit is not stored.
808  non_smi_exponent += 1 << HeapNumber::kExponentShift;
809  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
810  __ sw(scratch_,
811  FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
812  __ mov(scratch_, zero_reg);
813  __ Ret(USE_DELAY_SLOT);
814  __ sw(scratch_,
815  FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
816 }
817 
818 
819 // Handle the case where the lhs and rhs are the same object.
820 // Equality is almost reflexive (everything but NaN), so this is a test
821 // for "identity and not NaN".
822 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
823  Label* slow,
824  Condition cc) {
825  Label not_identical;
826  Label heap_number, return_equal;
827  Register exp_mask_reg = t5;
828 
829  __ Branch(&not_identical, ne, a0, Operand(a1));
830 
831  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
832 
833  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
834  // so we do the second best thing - test it ourselves.
835  // They are both equal and they are not both Smis so both of them are not
836  // Smis. If it's not a heap number, then return equal.
837  if (cc == less || cc == greater) {
838  __ GetObjectType(a0, t4, t4);
839  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
840  } else {
841  __ GetObjectType(a0, t4, t4);
842  __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
843  // Comparing JS objects with <=, >= is complicated.
844  if (cc != eq) {
845  __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
846  // Normally here we fall through to return_equal, but undefined is
847  // special: (undefined == undefined) == true, but
848  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
849  if (cc == less_equal || cc == greater_equal) {
850  __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
851  __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
852  __ Branch(&return_equal, ne, a0, Operand(t2));
853  ASSERT(is_int16(GREATER) && is_int16(LESS));
854  __ Ret(USE_DELAY_SLOT);
855  if (cc == le) {
856  // undefined <= undefined should fail.
857  __ li(v0, Operand(GREATER));
858  } else {
859  // undefined >= undefined should fail.
860  __ li(v0, Operand(LESS));
861  }
862  }
863  }
864  }
865 
866  __ bind(&return_equal);
867  ASSERT(is_int16(GREATER) && is_int16(LESS));
868  __ Ret(USE_DELAY_SLOT);
869  if (cc == less) {
870  __ li(v0, Operand(GREATER)); // Things aren't less than themselves.
871  } else if (cc == greater) {
872  __ li(v0, Operand(LESS)); // Things aren't greater than themselves.
873  } else {
874  __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
875  }
876 
877  // For less and greater we don't have to check for NaN since the result of
878  // x < x is false regardless. For the others here is some code to check
879  // for NaN.
880  if (cc != lt && cc != gt) {
881  __ bind(&heap_number);
882  // It is a heap number, so return non-equal if it's NaN and equal if it's
883  // not NaN.
884 
885  // The representation of NaN values has all exponent bits (52..62) set,
886  // and not all mantissa bits (0..51) clear.
887  // Read top bits of double representation (second word of value).
889  // Test that exponent bits are all set.
890  __ And(t3, t2, Operand(exp_mask_reg));
891  // If all bits not set (ne cond), then not a NaN, objects are equal.
892  __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
893 
894  // Shift out flag and all exponent bits, retaining only mantissa.
896  // Or with all low-bits of mantissa.
898  __ Or(v0, t3, Operand(t2));
899  // For equal we already have the right value in v0: Return zero (equal)
900  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
901  // not (it's a NaN). For <= and >= we need to load v0 with the failing
902  // value if it's a NaN.
903  if (cc != eq) {
904  // All-zero means Infinity means equal.
905  __ Ret(eq, v0, Operand(zero_reg));
906  ASSERT(is_int16(GREATER) && is_int16(LESS));
907  __ Ret(USE_DELAY_SLOT);
908  if (cc == le) {
909  __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
910  } else {
911  __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
912  }
913  }
914  }
915  // No fall through here.
916 
917  __ bind(&not_identical);
918 }
919 
920 
921 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
922  Register lhs,
923  Register rhs,
924  Label* both_loaded_as_doubles,
925  Label* slow,
926  bool strict) {
927  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
928  (lhs.is(a1) && rhs.is(a0)));
929 
930  Label lhs_is_smi;
931  __ JumpIfSmi(lhs, &lhs_is_smi);
932  // Rhs is a Smi.
933  // Check whether the non-smi is a heap number.
934  __ GetObjectType(lhs, t4, t4);
935  if (strict) {
936  // If lhs was not a number and rhs was a Smi then strict equality cannot
937  // succeed. Return non-equal (lhs is already not zero).
938  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
939  __ mov(v0, lhs);
940  } else {
941  // Smi compared non-strictly with a non-Smi non-heap-number. Call
942  // the runtime.
943  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
944  }
945 
946  // Rhs is a smi, lhs is a number.
947  // Convert smi rhs to double.
948  __ sra(at, rhs, kSmiTagSize);
949  __ mtc1(at, f14);
950  __ cvt_d_w(f14, f14);
952 
953  // We now have both loaded as doubles.
954  __ jmp(both_loaded_as_doubles);
955 
956  __ bind(&lhs_is_smi);
957  // Lhs is a Smi. Check whether the non-smi is a heap number.
958  __ GetObjectType(rhs, t4, t4);
959  if (strict) {
960  // If lhs was not a number and rhs was a Smi then strict equality cannot
961  // succeed. Return non-equal.
962  __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
963  __ li(v0, Operand(1));
964  } else {
965  // Smi compared non-strictly with a non-Smi non-heap-number. Call
966  // the runtime.
967  __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
968  }
969 
970  // Lhs is a smi, rhs is a number.
971  // Convert smi lhs to double.
972  __ sra(at, lhs, kSmiTagSize);
973  __ mtc1(at, f12);
974  __ cvt_d_w(f12, f12);
976  // Fall through to both_loaded_as_doubles.
977 }
978 
979 
980 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
981  Register lhs,
982  Register rhs) {
983  // If either operand is a JS object or an oddball value, then they are
984  // not equal since their pointers are different.
985  // There is no test for undetectability in strict equality.
987  Label first_non_object;
988  // Get the type of the first operand into a2 and compare it with
989  // FIRST_SPEC_OBJECT_TYPE.
990  __ GetObjectType(lhs, a2, a2);
991  __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
992 
993  // Return non-zero.
994  Label return_not_equal;
995  __ bind(&return_not_equal);
996  __ Ret(USE_DELAY_SLOT);
997  __ li(v0, Operand(1));
998 
999  __ bind(&first_non_object);
1000  // Check for oddballs: true, false, null, undefined.
1001  __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1002 
1003  __ GetObjectType(rhs, a3, a3);
1004  __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1005 
1006  // Check for oddballs: true, false, null, undefined.
1007  __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1008 
1009  // Now that we have the types we might as well check for
1010  // internalized-internalized.
1012  __ Or(a2, a2, Operand(a3));
1013  __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
1014  __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
1015 }
1016 
1017 
1018 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1019  Register lhs,
1020  Register rhs,
1021  Label* both_loaded_as_doubles,
1022  Label* not_heap_numbers,
1023  Label* slow) {
1024  __ GetObjectType(lhs, a3, a2);
1025  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1027  // If first was a heap number & second wasn't, go to slow case.
1028  __ Branch(slow, ne, a3, Operand(a2));
1029 
1030  // Both are heap numbers. Load them up then jump to the code we have
1031  // for that.
1034 
1035  __ jmp(both_loaded_as_doubles);
1036 }
1037 
1038 
1039 // Fast negative check for internalized-to-internalized equality.
1040 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
1041  Register lhs,
1042  Register rhs,
1043  Label* possible_strings,
1044  Label* not_both_strings) {
1045  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1046  (lhs.is(a1) && rhs.is(a0)));
1047 
1048  // a2 is object type of rhs.
1049  Label object_test;
1051  __ And(at, a2, Operand(kIsNotStringMask));
1052  __ Branch(&object_test, ne, at, Operand(zero_reg));
1053  __ And(at, a2, Operand(kIsNotInternalizedMask));
1054  __ Branch(possible_strings, ne, at, Operand(zero_reg));
1055  __ GetObjectType(rhs, a3, a3);
1056  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1057  __ And(at, a3, Operand(kIsNotInternalizedMask));
1058  __ Branch(possible_strings, ne, at, Operand(zero_reg));
1059 
1060  // Both are internalized strings. We already checked they weren't the same
1061  // pointer so they are not equal.
1062  __ Ret(USE_DELAY_SLOT);
1063  __ li(v0, Operand(1)); // Non-zero indicates not equal.
1064 
1065  __ bind(&object_test);
1066  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1067  __ GetObjectType(rhs, a2, a3);
1068  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1069 
1070  // If both objects are undetectable, they are equal. Otherwise, they
1071  // are not equal, since they are different objects and an object is not
1072  // equal to undefined.
1074  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1075  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1076  __ and_(a0, a2, a3);
1077  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1078  __ Ret(USE_DELAY_SLOT);
1079  __ xori(v0, a0, 1 << Map::kIsUndetectable);
1080 }
1081 
1082 
1083 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1084  Register input,
1085  Register scratch,
1086  CompareIC::State expected,
1087  Label* fail) {
1088  Label ok;
1089  if (expected == CompareIC::SMI) {
1090  __ JumpIfNotSmi(input, fail);
1091  } else if (expected == CompareIC::NUMBER) {
1092  __ JumpIfSmi(input, &ok);
1093  __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1095  }
1096  // We could be strict about internalized/string here, but as long as
1097  // hydrogen doesn't care, the stub doesn't have to care either.
1098  __ bind(&ok);
1099 }
1100 
1101 
1102 // On entry a1 and a2 are the values to be compared.
1103 // On exit a0 is 0, positive or negative to indicate the result of
1104 // the comparison.
1105 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1106  Register lhs = a1;
1107  Register rhs = a0;
1108  Condition cc = GetCondition();
1109 
1110  Label miss;
1111  ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
1112  ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
1113 
1114  Label slow; // Call builtin.
1115  Label not_smis, both_loaded_as_doubles;
1116 
1117  Label not_two_smis, smi_done;
1118  __ Or(a2, a1, a0);
1119  __ JumpIfNotSmi(a2, &not_two_smis);
1120  __ sra(a1, a1, 1);
1121  __ sra(a0, a0, 1);
1122  __ Ret(USE_DELAY_SLOT);
1123  __ subu(v0, a1, a0);
1124  __ bind(&not_two_smis);
1125 
1126  // NOTICE! This code is only reached after a smi-fast-case check, so
1127  // it is certain that at least one operand isn't a smi.
1128 
1129  // Handle the case where the objects are identical. Either returns the answer
1130  // or goes to slow. Only falls through if the objects were not identical.
1131  EmitIdenticalObjectComparison(masm, &slow, cc);
1132 
1133  // If either is a Smi (we know that not both are), then they can only
1134  // be strictly equal if the other is a HeapNumber.
1135  STATIC_ASSERT(kSmiTag == 0);
1136  ASSERT_EQ(0, Smi::FromInt(0));
1137  __ And(t2, lhs, Operand(rhs));
1138  __ JumpIfNotSmi(t2, &not_smis, t0);
1139  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1140  // 1) Return the answer.
1141  // 2) Go to slow.
1142  // 3) Fall through to both_loaded_as_doubles.
1143  // 4) Jump to rhs_not_nan.
1144  // In cases 3 and 4 we have found out we were dealing with a number-number
1145  // comparison and the numbers have been loaded into f12 and f14 as doubles,
1146  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1147  EmitSmiNonsmiComparison(masm, lhs, rhs,
1148  &both_loaded_as_doubles, &slow, strict());
1149 
1150  __ bind(&both_loaded_as_doubles);
1151  // f12, f14 are the double representations of the left hand side
1152  // and the right hand side if we have FPU. Otherwise a2, a3 represent
1153  // left hand side and a0, a1 represent right hand side.
1154 
1155  Isolate* isolate = masm->isolate();
1156  Label nan;
1157  __ li(t0, Operand(LESS));
1158  __ li(t1, Operand(GREATER));
1159  __ li(t2, Operand(EQUAL));
1160 
1161  // Check if either rhs or lhs is NaN.
1162  __ BranchF(NULL, &nan, eq, f12, f14);
1163 
1164  // Check if LESS condition is satisfied. If true, move conditionally
1165  // result to v0.
1166  __ c(OLT, D, f12, f14);
1167  __ Movt(v0, t0);
1168  // Use previous check to store conditionally to v0 oposite condition
1169  // (GREATER). If rhs is equal to lhs, this will be corrected in next
1170  // check.
1171  __ Movf(v0, t1);
1172  // Check if EQUAL condition is satisfied. If true, move conditionally
1173  // result to v0.
1174  __ c(EQ, D, f12, f14);
1175  __ Movt(v0, t2);
1176 
1177  __ Ret();
1178 
1179  __ bind(&nan);
1180  // NaN comparisons always fail.
1181  // Load whatever we need in v0 to make the comparison fail.
1182  ASSERT(is_int16(GREATER) && is_int16(LESS));
1183  __ Ret(USE_DELAY_SLOT);
1184  if (cc == lt || cc == le) {
1185  __ li(v0, Operand(GREATER));
1186  } else {
1187  __ li(v0, Operand(LESS));
1188  }
1189 
1190 
1191  __ bind(&not_smis);
1192  // At this point we know we are dealing with two different objects,
1193  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1194  if (strict()) {
1195  // This returns non-equal for some object types, or falls through if it
1196  // was not lucky.
1197  EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1198  }
1199 
1200  Label check_for_internalized_strings;
1201  Label flat_string_check;
1202  // Check for heap-number-heap-number comparison. Can jump to slow case,
1203  // or load both doubles and jump to the code that handles
1204  // that case. If the inputs are not doubles then jumps to
1205  // check_for_internalized_strings.
1206  // In this case a2 will contain the type of lhs_.
1207  EmitCheckForTwoHeapNumbers(masm,
1208  lhs,
1209  rhs,
1210  &both_loaded_as_doubles,
1211  &check_for_internalized_strings,
1212  &flat_string_check);
1213 
1214  __ bind(&check_for_internalized_strings);
1215  if (cc == eq && !strict()) {
1216  // Returns an answer for two internalized strings or two
1217  // detectable objects.
1218  // Otherwise jumps to string case or not both strings case.
1219  // Assumes that a2 is the type of lhs_ on entry.
1220  EmitCheckForInternalizedStringsOrObjects(
1221  masm, lhs, rhs, &flat_string_check, &slow);
1222  }
1223 
1224  // Check for both being sequential ASCII strings, and inline if that is the
1225  // case.
1226  __ bind(&flat_string_check);
1227 
1228  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1229 
1230  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1231  if (cc == eq) {
1233  lhs,
1234  rhs,
1235  a2,
1236  a3,
1237  t0);
1238  } else {
1240  lhs,
1241  rhs,
1242  a2,
1243  a3,
1244  t0,
1245  t1);
1246  }
1247  // Never falls through to here.
1248 
1249  __ bind(&slow);
1250  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1251  // a1 (rhs) second.
1252  __ Push(lhs, rhs);
1253  // Figure out which native to call and setup the arguments.
1254  Builtins::JavaScript native;
1255  if (cc == eq) {
1256  native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1257  } else {
1258  native = Builtins::COMPARE;
1259  int ncr; // NaN compare result.
1260  if (cc == lt || cc == le) {
1261  ncr = GREATER;
1262  } else {
1263  ASSERT(cc == gt || cc == ge); // Remaining cases.
1264  ncr = LESS;
1265  }
1266  __ li(a0, Operand(Smi::FromInt(ncr)));
1267  __ push(a0);
1268  }
1269 
1270  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1271  // tagged as a small integer.
1272  __ InvokeBuiltin(native, JUMP_FUNCTION);
1273 
1274  __ bind(&miss);
1275  GenerateMiss(masm);
1276 }
1277 
1278 
1279 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1280  __ mov(t9, ra);
1281  __ pop(ra);
1282  if (save_doubles_ == kSaveFPRegs) {
1283  __ PushSafepointRegistersAndDoubles();
1284  } else {
1285  __ PushSafepointRegisters();
1286  }
1287  __ Jump(t9);
1288 }
1289 
1290 
1291 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1292  __ mov(t9, ra);
1293  __ pop(ra);
1294  __ StoreToSafepointRegisterSlot(t9, t9);
1295  if (save_doubles_ == kSaveFPRegs) {
1296  __ PopSafepointRegistersAndDoubles();
1297  } else {
1298  __ PopSafepointRegisters();
1299  }
1300  __ Jump(t9);
1301 }
1302 
1303 
1304 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1305  // We don't allow a GC during a store buffer overflow so there is no need to
1306  // store the registers in any particular way, but we do have to store and
1307  // restore them.
1308  __ MultiPush(kJSCallerSaved | ra.bit());
1309  if (save_doubles_ == kSaveFPRegs) {
1310  __ MultiPushFPU(kCallerSavedFPU);
1311  }
1312  const int argument_count = 1;
1313  const int fp_argument_count = 0;
1314  const Register scratch = a1;
1315 
1316  AllowExternalCallThatCantCauseGC scope(masm);
1317  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1318  __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
1319  __ CallCFunction(
1320  ExternalReference::store_buffer_overflow_function(masm->isolate()),
1321  argument_count);
1322  if (save_doubles_ == kSaveFPRegs) {
1323  __ MultiPopFPU(kCallerSavedFPU);
1324  }
1325 
1326  __ MultiPop(kJSCallerSaved | ra.bit());
1327  __ Ret();
1328 }
1329 
1330 
1331 void MathPowStub::Generate(MacroAssembler* masm) {
1332  const Register base = a1;
1333  const Register exponent = a2;
1334  const Register heapnumbermap = t1;
1335  const Register heapnumber = v0;
1336  const DoubleRegister double_base = f2;
1337  const DoubleRegister double_exponent = f4;
1338  const DoubleRegister double_result = f0;
1339  const DoubleRegister double_scratch = f6;
1340  const FPURegister single_scratch = f8;
1341  const Register scratch = t5;
1342  const Register scratch2 = t3;
1343 
1344  Label call_runtime, done, int_exponent;
1345  if (exponent_type_ == ON_STACK) {
1346  Label base_is_smi, unpack_exponent;
1347  // The exponent and base are supplied as arguments on the stack.
1348  // This can only happen if the stub is called from non-optimized code.
1349  // Load input parameters from stack to double registers.
1350  __ lw(base, MemOperand(sp, 1 * kPointerSize));
1351  __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1352 
1353  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1354 
1355  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1356  __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1357  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1358 
1359  __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1360  __ jmp(&unpack_exponent);
1361 
1362  __ bind(&base_is_smi);
1363  __ mtc1(scratch, single_scratch);
1364  __ cvt_d_w(double_base, single_scratch);
1365  __ bind(&unpack_exponent);
1366 
1367  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1368 
1369  __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1370  __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1371  __ ldc1(double_exponent,
1373  } else if (exponent_type_ == TAGGED) {
1374  // Base is already in double_base.
1375  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1376 
1377  __ ldc1(double_exponent,
1379  }
1380 
1381  if (exponent_type_ != INTEGER) {
1382  Label int_exponent_convert;
1383  // Detect integer exponents stored as double.
1384  __ EmitFPUTruncate(kRoundToMinusInf,
1385  scratch,
1386  double_exponent,
1387  at,
1388  double_scratch,
1389  scratch2,
1391  // scratch2 == 0 means there was no conversion error.
1392  __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1393 
1394  if (exponent_type_ == ON_STACK) {
1395  // Detect square root case. Crankshaft detects constant +/-0.5 at
1396  // compile time and uses DoMathPowHalf instead. We then skip this check
1397  // for non-constant cases of +/-0.5 as these hardly occur.
1398  Label not_plus_half;
1399 
1400  // Test for 0.5.
1401  __ Move(double_scratch, 0.5);
1402  __ BranchF(USE_DELAY_SLOT,
1403  &not_plus_half,
1404  NULL,
1405  ne,
1406  double_exponent,
1407  double_scratch);
1408  // double_scratch can be overwritten in the delay slot.
1409  // Calculates square root of base. Check for the special case of
1410  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1411  __ Move(double_scratch, -V8_INFINITY);
1412  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1413  __ neg_d(double_result, double_scratch);
1414 
1415  // Add +0 to convert -0 to +0.
1416  __ add_d(double_scratch, double_base, kDoubleRegZero);
1417  __ sqrt_d(double_result, double_scratch);
1418  __ jmp(&done);
1419 
1420  __ bind(&not_plus_half);
1421  __ Move(double_scratch, -0.5);
1422  __ BranchF(USE_DELAY_SLOT,
1423  &call_runtime,
1424  NULL,
1425  ne,
1426  double_exponent,
1427  double_scratch);
1428  // double_scratch can be overwritten in the delay slot.
1429  // Calculates square root of base. Check for the special case of
1430  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1431  __ Move(double_scratch, -V8_INFINITY);
1432  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1433  __ Move(double_result, kDoubleRegZero);
1434 
1435  // Add +0 to convert -0 to +0.
1436  __ add_d(double_scratch, double_base, kDoubleRegZero);
1437  __ Move(double_result, 1);
1438  __ sqrt_d(double_scratch, double_scratch);
1439  __ div_d(double_result, double_result, double_scratch);
1440  __ jmp(&done);
1441  }
1442 
1443  __ push(ra);
1444  {
1445  AllowExternalCallThatCantCauseGC scope(masm);
1446  __ PrepareCallCFunction(0, 2, scratch2);
1447  __ MovToFloatParameters(double_base, double_exponent);
1448  __ CallCFunction(
1449  ExternalReference::power_double_double_function(masm->isolate()),
1450  0, 2);
1451  }
1452  __ pop(ra);
1453  __ MovFromFloatResult(double_result);
1454  __ jmp(&done);
1455 
1456  __ bind(&int_exponent_convert);
1457  }
1458 
1459  // Calculate power with integer exponent.
1460  __ bind(&int_exponent);
1461 
1462  // Get two copies of exponent in the registers scratch and exponent.
1463  if (exponent_type_ == INTEGER) {
1464  __ mov(scratch, exponent);
1465  } else {
1466  // Exponent has previously been stored into scratch as untagged integer.
1467  __ mov(exponent, scratch);
1468  }
1469 
1470  __ mov_d(double_scratch, double_base); // Back up base.
1471  __ Move(double_result, 1.0);
1472 
1473  // Get absolute value of exponent.
1474  Label positive_exponent;
1475  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1476  __ Subu(scratch, zero_reg, scratch);
1477  __ bind(&positive_exponent);
1478 
1479  Label while_true, no_carry, loop_end;
1480  __ bind(&while_true);
1481 
1482  __ And(scratch2, scratch, 1);
1483 
1484  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1485  __ mul_d(double_result, double_result, double_scratch);
1486  __ bind(&no_carry);
1487 
1488  __ sra(scratch, scratch, 1);
1489 
1490  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1491  __ mul_d(double_scratch, double_scratch, double_scratch);
1492 
1493  __ Branch(&while_true);
1494 
1495  __ bind(&loop_end);
1496 
1497  __ Branch(&done, ge, exponent, Operand(zero_reg));
1498  __ Move(double_scratch, 1.0);
1499  __ div_d(double_result, double_scratch, double_result);
1500  // Test whether result is zero. Bail out to check for subnormal result.
1501  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1502  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1503 
1504  // double_exponent may not contain the exponent value if the input was a
1505  // smi. We set it with exponent value before bailing out.
1506  __ mtc1(exponent, single_scratch);
1507  __ cvt_d_w(double_exponent, single_scratch);
1508 
1509  // Returning or bailing out.
1510  Counters* counters = masm->isolate()->counters();
1511  if (exponent_type_ == ON_STACK) {
1512  // The arguments are still on the stack.
1513  __ bind(&call_runtime);
1514  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1515 
1516  // The stub is called from non-optimized code, which expects the result
1517  // as heap number in exponent.
1518  __ bind(&done);
1519  __ AllocateHeapNumber(
1520  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1521  __ sdc1(double_result,
1523  ASSERT(heapnumber.is(v0));
1524  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1525  __ DropAndRet(2);
1526  } else {
1527  __ push(ra);
1528  {
1529  AllowExternalCallThatCantCauseGC scope(masm);
1530  __ PrepareCallCFunction(0, 2, scratch);
1531  __ MovToFloatParameters(double_base, double_exponent);
1532  __ CallCFunction(
1533  ExternalReference::power_double_double_function(masm->isolate()),
1534  0, 2);
1535  }
1536  __ pop(ra);
1537  __ MovFromFloatResult(double_result);
1538 
1539  __ bind(&done);
1540  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1541  __ Ret();
1542  }
1543 }
1544 
1545 
1546 bool CEntryStub::NeedsImmovableCode() {
1547  return true;
1548 }
1549 
1550 
1551 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1561  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1562 }
1563 
1564 
1566  Isolate* isolate) {
1568  stub1.GetCode(isolate);
1569  // Hydrogen code stubs need stub2 at snapshot time.
1571  stub2.GetCode(isolate);
1572 }
1573 
1574 
1576  Isolate* isolate) {
1578  stub1.GetCode(isolate);
1579  // Hydrogen code stubs need stub2 at snapshot time.
1581  stub2.GetCode(isolate);
1582 }
1583 
1584 
1585 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1587  CEntryStub save_doubles(1, mode);
1588  StoreBufferOverflowStub stub(mode);
1589  // These stubs might already be in the snapshot, detect that and don't
1590  // regenerate, which would lead to code stub initialization state being messed
1591  // up.
1592  Code* save_doubles_code;
1593  if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1594  save_doubles_code = *save_doubles.GetCode(isolate);
1595  }
1596  Code* store_buffer_overflow_code;
1597  if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1598  store_buffer_overflow_code = *stub.GetCode(isolate);
1599  }
1600  isolate->set_fp_stubs_generated(true);
1601 }
1602 
1603 
1604 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1605  CEntryStub stub(1, kDontSaveFPRegs);
1606  stub.GetCode(isolate);
1607 }
1608 
1609 
1610 void CEntryStub::GenerateCore(MacroAssembler* masm,
1611  Label* throw_normal_exception,
1612  Label* throw_termination_exception,
1613  bool do_gc,
1614  bool always_allocate) {
1615  // v0: result parameter for PerformGC, if any
1616  // s0: number of arguments including receiver (C callee-saved)
1617  // s1: pointer to the first argument (C callee-saved)
1618  // s2: pointer to builtin function (C callee-saved)
1619 
1620  Isolate* isolate = masm->isolate();
1621 
1622  if (do_gc) {
1623  // Move result passed in v0 into a0 to call PerformGC.
1624  __ mov(a0, v0);
1625  __ PrepareCallCFunction(2, 0, a1);
1626  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
1627  __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
1628  }
1629 
1630  ExternalReference scope_depth =
1631  ExternalReference::heap_always_allocate_scope_depth(isolate);
1632  if (always_allocate) {
1633  __ li(a0, Operand(scope_depth));
1634  __ lw(a1, MemOperand(a0));
1635  __ Addu(a1, a1, Operand(1));
1636  __ sw(a1, MemOperand(a0));
1637  }
1638 
1639  // Prepare arguments for C routine.
1640  // a0 = argc
1641  __ mov(a0, s0);
1642  // a1 = argv (set in the delay slot after find_ra below).
1643 
1644  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1645  // also need to reserve the 4 argument slots on the stack.
1646 
1647  __ AssertStackIsAligned();
1648 
1649  __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
1650 
1651  // To let the GC traverse the return address of the exit frames, we need to
1652  // know where the return address is. The CEntryStub is unmovable, so
1653  // we can store the address on the stack to be able to find it again and
1654  // we never have to restore it, because it will not change.
1655  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1656  // This branch-and-link sequence is needed to find the current PC on mips,
1657  // saved to the ra register.
1658  // Use masm-> here instead of the double-underscore macro since extra
1659  // coverage code can interfere with the proper calculation of ra.
1660  Label find_ra;
1661  masm->bal(&find_ra); // bal exposes branch delay slot.
1662  masm->mov(a1, s1);
1663  masm->bind(&find_ra);
1664 
1665  // Adjust the value in ra to point to the correct return location, 2nd
1666  // instruction past the real call into C code (the jalr(t9)), and push it.
1667  // This is the return address of the exit frame.
1668  const int kNumInstructionsToJump = 5;
1669  masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1670  masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
1671  // Stack space reservation moved to the branch delay slot below.
1672  // Stack is still aligned.
1673 
1674  // Call the C routine.
1675  masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
1676  masm->jalr(t9);
1677  // Set up sp in the delay slot.
1678  masm->addiu(sp, sp, -kCArgsSlotsSize);
1679  // Make sure the stored 'ra' points to this position.
1680  ASSERT_EQ(kNumInstructionsToJump,
1681  masm->InstructionsGeneratedSince(&find_ra));
1682  }
1683 
1684  if (always_allocate) {
1685  // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
1686  __ li(a2, Operand(scope_depth));
1687  __ lw(a3, MemOperand(a2));
1688  __ Subu(a3, a3, Operand(1));
1689  __ sw(a3, MemOperand(a2));
1690  }
1691 
1692  // Check for failure result.
1693  Label failure_returned;
1694  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1695  __ addiu(a2, v0, 1);
1696  __ andi(t0, a2, kFailureTagMask);
1697  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
1698  // Restore stack (remove arg slots) in branch delay slot.
1699  __ addiu(sp, sp, kCArgsSlotsSize);
1700 
1701 
1702  // Exit C frame and return.
1703  // v0:v1: result
1704  // sp: stack pointer
1705  // fp: frame pointer
1706  __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1707 
1708  // Check if we should retry or throw exception.
1709  Label retry;
1710  __ bind(&failure_returned);
1712  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
1713  __ Branch(&retry, eq, t0, Operand(zero_reg));
1714 
1715  // Retrieve the pending exception.
1716  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1717  isolate)));
1718  __ lw(v0, MemOperand(t0));
1719 
1720  // Clear the pending exception.
1721  __ li(a3, Operand(isolate->factory()->the_hole_value()));
1722  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1723  isolate)));
1724  __ sw(a3, MemOperand(t0));
1725 
1726  // Special handling of termination exceptions which are uncatchable
1727  // by javascript code.
1728  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1729  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
1730 
1731  // Handle normal exception.
1732  __ jmp(throw_normal_exception);
1733 
1734  __ bind(&retry);
1735  // Last failure (v0) will be moved to (a0) for parameter when retrying.
1736 }
1737 
1738 
1739 void CEntryStub::Generate(MacroAssembler* masm) {
1740  // Called from JavaScript; parameters are on stack as if calling JS function
1741  // s0: number of arguments including receiver
1742  // s1: size of arguments excluding receiver
1743  // s2: pointer to builtin function
1744  // fp: frame pointer (restored after C call)
1745  // sp: stack pointer (restored as callee's sp after C call)
1746  // cp: current context (C callee-saved)
1747 
1749 
1750  // NOTE: Invocations of builtins may return failure objects
1751  // instead of a proper result. The builtin entry handles
1752  // this by performing a garbage collection and retrying the
1753  // builtin once.
1754 
1755  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1756  // The reason for this is that these arguments would need to be saved anyway
1757  // so it's faster to set them up directly.
1758  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1759 
1760  // Compute the argv pointer in a callee-saved register.
1761  __ Addu(s1, sp, s1);
1762 
1763  // Enter the exit frame that transitions from JavaScript to C++.
1764  FrameScope scope(masm, StackFrame::MANUAL);
1765  __ EnterExitFrame(save_doubles_);
1766 
1767  // s0: number of arguments (C callee-saved)
1768  // s1: pointer to first argument (C callee-saved)
1769  // s2: pointer to builtin function (C callee-saved)
1770 
1771  Label throw_normal_exception;
1772  Label throw_termination_exception;
1773 
1774  // Call into the runtime system.
1775  GenerateCore(masm,
1776  &throw_normal_exception,
1777  &throw_termination_exception,
1778  false,
1779  false);
1780 
1781  // Do space-specific GC and retry runtime call.
1782  GenerateCore(masm,
1783  &throw_normal_exception,
1784  &throw_termination_exception,
1785  true,
1786  false);
1787 
1788  // Do full GC and retry runtime call one final time.
1789  Failure* failure = Failure::InternalError();
1790  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
1791  GenerateCore(masm,
1792  &throw_normal_exception,
1793  &throw_termination_exception,
1794  true,
1795  true);
1796 
1797  { FrameScope scope(masm, StackFrame::MANUAL);
1798  __ PrepareCallCFunction(0, v0);
1799  __ CallCFunction(
1800  ExternalReference::out_of_memory_function(masm->isolate()), 0);
1801  }
1802 
1803  __ bind(&throw_termination_exception);
1804  __ ThrowUncatchable(v0);
1805 
1806  __ bind(&throw_normal_exception);
1807  __ Throw(v0);
1808 }
1809 
1810 
1811 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1812  Label invoke, handler_entry, exit;
1813  Isolate* isolate = masm->isolate();
1814 
1815  // Registers:
1816  // a0: entry address
1817  // a1: function
1818  // a2: receiver
1819  // a3: argc
1820  //
1821  // Stack:
1822  // 4 args slots
1823  // args
1824 
1826 
1827  // Save callee saved registers on the stack.
1828  __ MultiPush(kCalleeSaved | ra.bit());
1829 
1830  // Save callee-saved FPU registers.
1831  __ MultiPushFPU(kCalleeSavedFPU);
1832  // Set up the reserved register for 0.0.
1833  __ Move(kDoubleRegZero, 0.0);
1834 
1835 
1836  // Load argv in s0 register.
1837  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1838  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
1839 
1840  __ InitializeRootRegister();
1841  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
1842 
1843  // We build an EntryFrame.
1844  __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1845  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1846  __ li(t2, Operand(Smi::FromInt(marker)));
1847  __ li(t1, Operand(Smi::FromInt(marker)));
1848  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1849  isolate)));
1850  __ lw(t0, MemOperand(t0));
1851  __ Push(t3, t2, t1, t0);
1852  // Set up frame pointer for the frame to be pushed.
1854 
1855  // Registers:
1856  // a0: entry_address
1857  // a1: function
1858  // a2: receiver_pointer
1859  // a3: argc
1860  // s0: argv
1861  //
1862  // Stack:
1863  // caller fp |
1864  // function slot | entry frame
1865  // context slot |
1866  // bad fp (0xff...f) |
1867  // callee saved registers + ra
1868  // 4 args slots
1869  // args
1870 
1871  // If this is the outermost JS call, set js_entry_sp value.
1872  Label non_outermost_js;
1873  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1874  __ li(t1, Operand(ExternalReference(js_entry_sp)));
1875  __ lw(t2, MemOperand(t1));
1876  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
1877  __ sw(fp, MemOperand(t1));
1878  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1879  Label cont;
1880  __ b(&cont);
1881  __ nop(); // Branch delay slot nop.
1882  __ bind(&non_outermost_js);
1883  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1884  __ bind(&cont);
1885  __ push(t0);
1886 
1887  // Jump to a faked try block that does the invoke, with a faked catch
1888  // block that sets the pending exception.
1889  __ jmp(&invoke);
1890  __ bind(&handler_entry);
1891  handler_offset_ = handler_entry.pos();
1892  // Caught exception: Store result (exception) in the pending exception
1893  // field in the JSEnv and return a failure sentinel. Coming in here the
1894  // fp will be invalid because the PushTryHandler below sets it to 0 to
1895  // signal the existence of the JSEntry frame.
1896  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1897  isolate)));
1898  __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
1899  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
1900  __ b(&exit); // b exposes branch delay slot.
1901  __ nop(); // Branch delay slot nop.
1902 
1903  // Invoke: Link this frame into the handler chain. There's only one
1904  // handler block in this code object, so its index is 0.
1905  __ bind(&invoke);
1906  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1907  // If an exception not caught by another handler occurs, this handler
1908  // returns control to the code after the bal(&invoke) above, which
1909  // restores all kCalleeSaved registers (including cp and fp) to their
1910  // saved values before returning a failure to C.
1911 
1912  // Clear any pending exceptions.
1913  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
1914  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1915  isolate)));
1916  __ sw(t1, MemOperand(t0));
1917 
1918  // Invoke the function by calling through JS entry trampoline builtin.
1919  // Notice that we cannot store a reference to the trampoline code directly in
1920  // this stub, because runtime stubs are not traversed when doing GC.
1921 
1922  // Registers:
1923  // a0: entry_address
1924  // a1: function
1925  // a2: receiver_pointer
1926  // a3: argc
1927  // s0: argv
1928  //
1929  // Stack:
1930  // handler frame
1931  // entry frame
1932  // callee saved registers + ra
1933  // 4 args slots
1934  // args
1935 
1936  if (is_construct) {
1937  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1938  isolate);
1939  __ li(t0, Operand(construct_entry));
1940  } else {
1941  ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1942  __ li(t0, Operand(entry));
1943  }
1944  __ lw(t9, MemOperand(t0)); // Deref address.
1945 
1946  // Call JSEntryTrampoline.
1947  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1948  __ Call(t9);
1949 
1950  // Unlink this frame from the handler chain.
1951  __ PopTryHandler();
1952 
1953  __ bind(&exit); // v0 holds result
1954  // Check if the current stack frame is marked as the outermost JS frame.
1955  Label non_outermost_js_2;
1956  __ pop(t1);
1957  __ Branch(&non_outermost_js_2,
1958  ne,
1959  t1,
1960  Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1961  __ li(t1, Operand(ExternalReference(js_entry_sp)));
1962  __ sw(zero_reg, MemOperand(t1));
1963  __ bind(&non_outermost_js_2);
1964 
1965  // Restore the top frame descriptors from the stack.
1966  __ pop(t1);
1967  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1968  isolate)));
1969  __ sw(t1, MemOperand(t0));
1970 
1971  // Reset the stack to the callee saved registers.
1973 
1974  // Restore callee-saved fpu registers.
1975  __ MultiPopFPU(kCalleeSavedFPU);
1976 
1977  // Restore callee saved registers from the stack.
1978  __ MultiPop(kCalleeSaved | ra.bit());
1979  // Return.
1980  __ Jump(ra);
1981 }
1982 
1983 
1984 // Uses registers a0 to t0.
1985 // Expected input (depending on whether args are in registers or on the stack):
1986 // * object: a0 or at sp + 1 * kPointerSize.
1987 // * function: a1 or at sp.
1988 //
1989 // An inlined call site may have been generated before calling this stub.
1990 // In this case the offset to the inline site to patch is passed on the stack,
1991 // in the safepoint slot for register t0.
1992 void InstanceofStub::Generate(MacroAssembler* masm) {
1993  // Call site inlining and patching implies arguments in registers.
1994  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1995  // ReturnTrueFalse is only implemented for inlined call sites.
1996  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1997 
1998  // Fixed register usage throughout the stub:
1999  const Register object = a0; // Object (lhs).
2000  Register map = a3; // Map of the object.
2001  const Register function = a1; // Function (rhs).
2002  const Register prototype = t0; // Prototype of the function.
2003  const Register inline_site = t5;
2004  const Register scratch = a2;
2005 
2006  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
2007 
2008  Label slow, loop, is_instance, is_not_instance, not_js_object;
2009 
2010  if (!HasArgsInRegisters()) {
2011  __ lw(object, MemOperand(sp, 1 * kPointerSize));
2012  __ lw(function, MemOperand(sp, 0));
2013  }
2014 
2015  // Check that the left hand is a JS object and load map.
2016  __ JumpIfSmi(object, &not_js_object);
2017  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
2018 
2019  // If there is a call site cache don't look in the global cache, but do the
2020  // real lookup and update the call site cache.
2021  if (!HasCallSiteInlineCheck()) {
2022  Label miss;
2023  __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
2024  __ Branch(&miss, ne, function, Operand(at));
2025  __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
2026  __ Branch(&miss, ne, map, Operand(at));
2027  __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2028  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2029 
2030  __ bind(&miss);
2031  }
2032 
2033  // Get the prototype of the function.
2034  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
2035 
2036  // Check that the function prototype is a JS object.
2037  __ JumpIfSmi(prototype, &slow);
2038  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
2039 
2040  // Update the global instanceof or call site inlined cache with the current
2041  // map and function. The cached answer will be set when it is known below.
2042  if (!HasCallSiteInlineCheck()) {
2043  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2044  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2045  } else {
2046  ASSERT(HasArgsInRegisters());
2047  // Patch the (relocated) inlined map check.
2048 
2049  // The offset was stored in t0 safepoint slot.
2050  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
2051  __ LoadFromSafepointRegisterSlot(scratch, t0);
2052  __ Subu(inline_site, ra, scratch);
2053  // Get the map location in scratch and patch it.
2054  __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
2055  __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
2056  }
2057 
2058  // Register mapping: a3 is object map and t0 is function prototype.
2059  // Get prototype of object into a2.
2060  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
2061 
2062  // We don't need map any more. Use it as a scratch register.
2063  Register scratch2 = map;
2064  map = no_reg;
2065 
2066  // Loop through the prototype chain looking for the function prototype.
2067  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
2068  __ bind(&loop);
2069  __ Branch(&is_instance, eq, scratch, Operand(prototype));
2070  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
2071  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
2072  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
2073  __ Branch(&loop);
2074 
2075  __ bind(&is_instance);
2076  ASSERT(Smi::FromInt(0) == 0);
2077  if (!HasCallSiteInlineCheck()) {
2078  __ mov(v0, zero_reg);
2079  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2080  } else {
2081  // Patch the call site to return true.
2082  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2083  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2084  // Get the boolean result location in scratch and patch it.
2085  __ PatchRelocatedValue(inline_site, scratch, v0);
2086 
2087  if (!ReturnTrueFalseObject()) {
2088  ASSERT_EQ(Smi::FromInt(0), 0);
2089  __ mov(v0, zero_reg);
2090  }
2091  }
2092  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2093 
2094  __ bind(&is_not_instance);
2095  if (!HasCallSiteInlineCheck()) {
2096  __ li(v0, Operand(Smi::FromInt(1)));
2097  __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2098  } else {
2099  // Patch the call site to return false.
2100  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2101  __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2102  // Get the boolean result location in scratch and patch it.
2103  __ PatchRelocatedValue(inline_site, scratch, v0);
2104 
2105  if (!ReturnTrueFalseObject()) {
2106  __ li(v0, Operand(Smi::FromInt(1)));
2107  }
2108  }
2109 
2110  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2111 
2112  Label object_not_null, object_not_null_or_smi;
2113  __ bind(&not_js_object);
2114  // Before null, smi and string value checks, check that the rhs is a function
2115  // as for a non-function rhs an exception needs to be thrown.
2116  __ JumpIfSmi(function, &slow);
2117  __ GetObjectType(function, scratch2, scratch);
2118  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
2119 
2120  // Null is not instance of anything.
2121  __ Branch(&object_not_null,
2122  ne,
2123  scratch,
2124  Operand(masm->isolate()->factory()->null_value()));
2125  __ li(v0, Operand(Smi::FromInt(1)));
2126  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2127 
2128  __ bind(&object_not_null);
2129  // Smi values are not instances of anything.
2130  __ JumpIfNotSmi(object, &object_not_null_or_smi);
2131  __ li(v0, Operand(Smi::FromInt(1)));
2132  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2133 
2134  __ bind(&object_not_null_or_smi);
2135  // String values are not instances of anything.
2136  __ IsObjectJSStringType(object, scratch, &slow);
2137  __ li(v0, Operand(Smi::FromInt(1)));
2138  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2139 
2140  // Slow-case. Tail call builtin.
2141  __ bind(&slow);
2142  if (!ReturnTrueFalseObject()) {
2143  if (HasArgsInRegisters()) {
2144  __ Push(a0, a1);
2145  }
2146  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2147  } else {
2148  {
2149  FrameScope scope(masm, StackFrame::INTERNAL);
2150  __ Push(a0, a1);
2151  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2152  }
2153  __ mov(a0, v0);
2154  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2155  __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
2156  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2157  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2158  }
2159 }
2160 
2161 
2162 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2163  Label miss;
2164  Register receiver;
2165  if (kind() == Code::KEYED_LOAD_IC) {
2166  // ----------- S t a t e -------------
2167  // -- ra : return address
2168  // -- a0 : key
2169  // -- a1 : receiver
2170  // -----------------------------------
2171  __ Branch(&miss, ne, a0,
2172  Operand(masm->isolate()->factory()->prototype_string()));
2173  receiver = a1;
2174  } else {
2175  ASSERT(kind() == Code::LOAD_IC);
2176  // ----------- S t a t e -------------
2177  // -- a2 : name
2178  // -- ra : return address
2179  // -- a0 : receiver
2180  // -- sp[0] : receiver
2181  // -----------------------------------
2182  receiver = a0;
2183  }
2184 
2185  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
2186  __ bind(&miss);
2187  StubCompiler::TailCallBuiltin(
2189 }
2190 
2191 
2192 Register InstanceofStub::left() { return a0; }
2193 
2194 
2195 Register InstanceofStub::right() { return a1; }
2196 
2197 
2198 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2199  // The displacement is the offset of the last parameter (if any)
2200  // relative to the frame pointer.
2201  const int kDisplacement =
2203 
2204  // Check that the key is a smiGenerateReadElement.
2205  Label slow;
2206  __ JumpIfNotSmi(a1, &slow);
2207 
2208  // Check if the calling frame is an arguments adaptor frame.
2209  Label adaptor;
2212  __ Branch(&adaptor,
2213  eq,
2214  a3,
2216 
2217  // Check index (a1) against formal parameters count limit passed in
2218  // through register a0. Use unsigned comparison to get negative
2219  // check for free.
2220  __ Branch(&slow, hs, a1, Operand(a0));
2221 
2222  // Read the argument from the stack and return it.
2223  __ subu(a3, a0, a1);
2224  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2225  __ Addu(a3, fp, Operand(t3));
2226  __ Ret(USE_DELAY_SLOT);
2227  __ lw(v0, MemOperand(a3, kDisplacement));
2228 
2229  // Arguments adaptor case: Check index (a1) against actual arguments
2230  // limit found in the arguments adaptor frame. Use unsigned
2231  // comparison to get negative check for free.
2232  __ bind(&adaptor);
2234  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
2235 
2236  // Read the argument from the adaptor frame and return it.
2237  __ subu(a3, a0, a1);
2238  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2239  __ Addu(a3, a2, Operand(t3));
2240  __ Ret(USE_DELAY_SLOT);
2241  __ lw(v0, MemOperand(a3, kDisplacement));
2242 
2243  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2244  // by calling the runtime system.
2245  __ bind(&slow);
2246  __ push(a1);
2247  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2248 }
2249 
2250 
2251 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2252  // sp[0] : number of parameters
2253  // sp[4] : receiver displacement
2254  // sp[8] : function
2255  // Check if the calling frame is an arguments adaptor frame.
2256  Label runtime;
2259  __ Branch(&runtime,
2260  ne,
2261  a2,
2263 
2264  // Patch the arguments.length and the parameters pointer in the current frame.
2266  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2267  __ sll(t3, a2, 1);
2268  __ Addu(a3, a3, Operand(t3));
2270  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2271 
2272  __ bind(&runtime);
2273  __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2274 }
2275 
2276 
2277 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2278  // Stack layout:
2279  // sp[0] : number of parameters (tagged)
2280  // sp[4] : address of receiver argument
2281  // sp[8] : function
2282  // Registers used over whole function:
2283  // t2 : allocated object (tagged)
2284  // t5 : mapped parameter count (tagged)
2285 
2286  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2287  // a1 = parameter count (tagged)
2288 
2289  // Check if the calling frame is an arguments adaptor frame.
2290  Label runtime;
2291  Label adaptor_frame, try_allocate;
2294  __ Branch(&adaptor_frame,
2295  eq,
2296  a2,
2298 
2299  // No adaptor, parameter count = argument count.
2300  __ mov(a2, a1);
2301  __ b(&try_allocate);
2302  __ nop(); // Branch delay slot nop.
2303 
2304  // We have an adaptor frame. Patch the parameters pointer.
2305  __ bind(&adaptor_frame);
2307  __ sll(t6, a2, 1);
2308  __ Addu(a3, a3, Operand(t6));
2309  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2310  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2311 
2312  // a1 = parameter count (tagged)
2313  // a2 = argument count (tagged)
2314  // Compute the mapped parameter count = min(a1, a2) in a1.
2315  Label skip_min;
2316  __ Branch(&skip_min, lt, a1, Operand(a2));
2317  __ mov(a1, a2);
2318  __ bind(&skip_min);
2319 
2320  __ bind(&try_allocate);
2321 
2322  // Compute the sizes of backing store, parameter map, and arguments object.
2323  // 1. Parameter map, has 2 extra words containing context and backing store.
2324  const int kParameterMapHeaderSize =
2326  // If there are no mapped parameters, we do not need the parameter_map.
2327  Label param_map_size;
2328  ASSERT_EQ(0, Smi::FromInt(0));
2329  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
2330  __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
2331  __ sll(t5, a1, 1);
2332  __ addiu(t5, t5, kParameterMapHeaderSize);
2333  __ bind(&param_map_size);
2334 
2335  // 2. Backing store.
2336  __ sll(t6, a2, 1);
2337  __ Addu(t5, t5, Operand(t6));
2338  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2339 
2340  // 3. Arguments object.
2341  __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
2342 
2343  // Do the allocation of all three objects in one go.
2344  __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2345 
2346  // v0 = address of new object(s) (tagged)
2347  // a2 = argument count (tagged)
2348  // Get the arguments boilerplate from the current native context into t0.
2349  const int kNormalOffset =
2351  const int kAliasedOffset =
2353 
2356  Label skip2_ne, skip2_eq;
2357  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2358  __ lw(t0, MemOperand(t0, kNormalOffset));
2359  __ bind(&skip2_ne);
2360 
2361  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2362  __ lw(t0, MemOperand(t0, kAliasedOffset));
2363  __ bind(&skip2_eq);
2364 
2365  // v0 = address of new object (tagged)
2366  // a1 = mapped parameter count (tagged)
2367  // a2 = argument count (tagged)
2368  // t0 = address of boilerplate object (tagged)
2369  // Copy the JS object part.
2370  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2371  __ lw(a3, FieldMemOperand(t0, i));
2372  __ sw(a3, FieldMemOperand(v0, i));
2373  }
2374 
2375  // Set up the callee in-object property.
2377  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2378  const int kCalleeOffset = JSObject::kHeaderSize +
2380  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2381 
2382  // Use the length (smi tagged) and set that as an in-object property too.
2384  const int kLengthOffset = JSObject::kHeaderSize +
2386  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2387 
2388  // Set up the elements pointer in the allocated arguments object.
2389  // If we allocated a parameter map, t0 will point there, otherwise
2390  // it will point to the backing store.
2391  __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
2393 
2394  // v0 = address of new object (tagged)
2395  // a1 = mapped parameter count (tagged)
2396  // a2 = argument count (tagged)
2397  // t0 = address of parameter map or backing store (tagged)
2398  // Initialize parameter map. If there are no mapped arguments, we're done.
2399  Label skip_parameter_map;
2400  Label skip3;
2401  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2402  // Move backing store address to a3, because it is
2403  // expected there when filling in the unmapped arguments.
2404  __ mov(a3, t0);
2405  __ bind(&skip3);
2406 
2407  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2408 
2409  __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
2411  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2413  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2414  __ sll(t6, a1, 1);
2415  __ Addu(t2, t0, Operand(t6));
2416  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2417  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2418 
2419  // Copy the parameter slots and the holes in the arguments.
2420  // We need to fill in mapped_parameter_count slots. They index the context,
2421  // where parameters are stored in reverse order, at
2422  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2423  // The mapped parameter thus need to get indices
2424  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2425  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2426  // We loop from right to left.
2427  Label parameters_loop, parameters_test;
2428  __ mov(t2, a1);
2429  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2430  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2431  __ Subu(t5, t5, Operand(a1));
2432  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2433  __ sll(t6, t2, 1);
2434  __ Addu(a3, t0, Operand(t6));
2435  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2436 
2437  // t2 = loop variable (tagged)
2438  // a1 = mapping index (tagged)
2439  // a3 = address of backing store (tagged)
2440  // t0 = address of parameter map (tagged)
2441  // t1 = temporary scratch (a.o., for address calculation)
2442  // t3 = the hole value
2443  __ jmp(&parameters_test);
2444 
2445  __ bind(&parameters_loop);
2446  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2447  __ sll(t1, t2, 1);
2448  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2449  __ Addu(t6, t0, t1);
2450  __ sw(t5, MemOperand(t6));
2451  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2452  __ Addu(t6, a3, t1);
2453  __ sw(t3, MemOperand(t6));
2454  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2455  __ bind(&parameters_test);
2456  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
2457 
2458  __ bind(&skip_parameter_map);
2459  // a2 = argument count (tagged)
2460  // a3 = address of backing store (tagged)
2461  // t1 = scratch
2462  // Copy arguments header and remaining slots (if there are any).
2463  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2466 
2467  Label arguments_loop, arguments_test;
2468  __ mov(t5, a1);
2469  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2470  __ sll(t6, t5, 1);
2471  __ Subu(t0, t0, Operand(t6));
2472  __ jmp(&arguments_test);
2473 
2474  __ bind(&arguments_loop);
2475  __ Subu(t0, t0, Operand(kPointerSize));
2476  __ lw(t2, MemOperand(t0, 0));
2477  __ sll(t6, t5, 1);
2478  __ Addu(t1, a3, Operand(t6));
2480  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2481 
2482  __ bind(&arguments_test);
2483  __ Branch(&arguments_loop, lt, t5, Operand(a2));
2484 
2485  // Return and remove the on-stack parameters.
2486  __ DropAndRet(3);
2487 
2488  // Do the runtime call to allocate the arguments object.
2489  // a2 = argument count (tagged)
2490  __ bind(&runtime);
2491  __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2492  __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2493 }
2494 
2495 
2496 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2497  // sp[0] : number of parameters
2498  // sp[4] : receiver displacement
2499  // sp[8] : function
2500  // Check if the calling frame is an arguments adaptor frame.
2501  Label adaptor_frame, try_allocate, runtime;
2504  __ Branch(&adaptor_frame,
2505  eq,
2506  a3,
2508 
2509  // Get the length from the frame.
2510  __ lw(a1, MemOperand(sp, 0));
2511  __ Branch(&try_allocate);
2512 
2513  // Patch the arguments.length and the parameters pointer.
2514  __ bind(&adaptor_frame);
2516  __ sw(a1, MemOperand(sp, 0));
2517  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2518  __ Addu(a3, a2, Operand(at));
2519 
2520  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2521  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2522 
2523  // Try the new space allocation. Start out with computing the size
2524  // of the arguments object and the elements array in words.
2525  Label add_arguments_object;
2526  __ bind(&try_allocate);
2527  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2528  __ srl(a1, a1, kSmiTagSize);
2529 
2530  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2531  __ bind(&add_arguments_object);
2532  __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2533 
2534  // Do the allocation of both objects in one go.
2535  __ Allocate(a1, v0, a2, a3, &runtime,
2536  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2537 
2538  // Get the arguments boilerplate from the current native context.
2541  __ lw(t0, MemOperand(t0, Context::SlotOffset(
2543 
2544  // Copy the JS object part.
2545  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
2546 
2547  // Get the length (smi tagged) and set that as an in-object property too.
2549  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2550  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2551  Heap::kArgumentsLengthIndex * kPointerSize));
2552 
2553  Label done;
2554  __ Branch(&done, eq, a1, Operand(zero_reg));
2555 
2556  // Get the parameters pointer from the stack.
2557  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2558 
2559  // Set up the elements pointer in the allocated arguments object and
2560  // initialize the header in the elements fixed array.
2561  __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
2563  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2566  // Untag the length for the loop.
2567  __ srl(a1, a1, kSmiTagSize);
2568 
2569  // Copy the fixed array slots.
2570  Label loop;
2571  // Set up t0 to point to the first array slot.
2572  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2573  __ bind(&loop);
2574  // Pre-decrement a2 with kPointerSize on each iteration.
2575  // Pre-decrement in order to skip receiver.
2576  __ Addu(a2, a2, Operand(-kPointerSize));
2577  __ lw(a3, MemOperand(a2));
2578  // Post-increment t0 with kPointerSize on each iteration.
2579  __ sw(a3, MemOperand(t0));
2580  __ Addu(t0, t0, Operand(kPointerSize));
2581  __ Subu(a1, a1, Operand(1));
2582  __ Branch(&loop, ne, a1, Operand(zero_reg));
2583 
2584  // Return and remove the on-stack parameters.
2585  __ bind(&done);
2586  __ DropAndRet(3);
2587 
2588  // Do the runtime call to allocate the arguments object.
2589  __ bind(&runtime);
2590  __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
2591 }
2592 
2593 
2594 void RegExpExecStub::Generate(MacroAssembler* masm) {
2595  // Just jump directly to runtime if native RegExp is not selected at compile
2596  // time or if regexp entry in generated code is turned off runtime switch or
2597  // at compilation.
2598 #ifdef V8_INTERPRETED_REGEXP
2599  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2600 #else // V8_INTERPRETED_REGEXP
2601 
2602  // Stack frame on entry.
2603  // sp[0]: last_match_info (expected JSArray)
2604  // sp[4]: previous index
2605  // sp[8]: subject string
2606  // sp[12]: JSRegExp object
2607 
2608  const int kLastMatchInfoOffset = 0 * kPointerSize;
2609  const int kPreviousIndexOffset = 1 * kPointerSize;
2610  const int kSubjectOffset = 2 * kPointerSize;
2611  const int kJSRegExpOffset = 3 * kPointerSize;
2612 
2613  Isolate* isolate = masm->isolate();
2614 
2615  Label runtime;
2616  // Allocation of registers for this function. These are in callee save
2617  // registers and will be preserved by the call to the native RegExp code, as
2618  // this code is called using the normal C calling convention. When calling
2619  // directly from generated code the native RegExp code will not do a GC and
2620  // therefore the content of these registers are safe to use after the call.
2621  // MIPS - using s0..s2, since we are not using CEntry Stub.
2622  Register subject = s0;
2623  Register regexp_data = s1;
2624  Register last_match_info_elements = s2;
2625 
2626  // Ensure that a RegExp stack is allocated.
2627  ExternalReference address_of_regexp_stack_memory_address =
2628  ExternalReference::address_of_regexp_stack_memory_address(
2629  isolate);
2630  ExternalReference address_of_regexp_stack_memory_size =
2631  ExternalReference::address_of_regexp_stack_memory_size(isolate);
2632  __ li(a0, Operand(address_of_regexp_stack_memory_size));
2633  __ lw(a0, MemOperand(a0, 0));
2634  __ Branch(&runtime, eq, a0, Operand(zero_reg));
2635 
2636  // Check that the first argument is a JSRegExp object.
2637  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2638  STATIC_ASSERT(kSmiTag == 0);
2639  __ JumpIfSmi(a0, &runtime);
2640  __ GetObjectType(a0, a1, a1);
2641  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2642 
2643  // Check that the RegExp has been compiled (data contains a fixed array).
2644  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2645  if (FLAG_debug_code) {
2646  __ SmiTst(regexp_data, t0);
2647  __ Check(nz,
2648  kUnexpectedTypeForRegExpDataFixedArrayExpected,
2649  t0,
2650  Operand(zero_reg));
2651  __ GetObjectType(regexp_data, a0, a0);
2652  __ Check(eq,
2653  kUnexpectedTypeForRegExpDataFixedArrayExpected,
2654  a0,
2655  Operand(FIXED_ARRAY_TYPE));
2656  }
2657 
2658  // regexp_data: RegExp data (FixedArray)
2659  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2660  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2661  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2662 
2663  // regexp_data: RegExp data (FixedArray)
2664  // Check that the number of captures fit in the static offsets vector buffer.
2665  __ lw(a2,
2667  // Check (number_of_captures + 1) * 2 <= offsets vector size
2668  // Or number_of_captures * 2 <= offsets vector size - 2
2669  // Multiplying by 2 comes for free since a2 is smi-tagged.
2670  STATIC_ASSERT(kSmiTag == 0);
2673  __ Branch(
2674  &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2675 
2676  // Reset offset for possibly sliced string.
2677  __ mov(t0, zero_reg);
2678  __ lw(subject, MemOperand(sp, kSubjectOffset));
2679  __ JumpIfSmi(subject, &runtime);
2680  __ mov(a3, subject); // Make a copy of the original subject string.
2681  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2683  // subject: subject string
2684  // a3: subject string
2685  // a0: subject string instance type
2686  // regexp_data: RegExp data (FixedArray)
2687  // Handle subject string according to its encoding and representation:
2688  // (1) Sequential string? If yes, go to (5).
2689  // (2) Anything but sequential or cons? If yes, go to (6).
2690  // (3) Cons string. If the string is flat, replace subject with first string.
2691  // Otherwise bailout.
2692  // (4) Is subject external? If yes, go to (7).
2693  // (5) Sequential string. Load regexp code according to encoding.
2694  // (E) Carry on.
2696 
2697  // Deferred code at the end of the stub:
2698  // (6) Not a long external string? If yes, go to (8).
2699  // (7) External string. Make it, offset-wise, look like a sequential string.
2700  // Go to (5).
2701  // (8) Short external string or not a string? If yes, bail out to runtime.
2702  // (9) Sliced string. Replace subject with parent. Go to (4).
2703 
2704  Label seq_string /* 5 */, external_string /* 7 */,
2705  check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2706  not_long_external /* 8 */;
2707 
2708  // (1) Sequential string? If yes, go to (5).
2709  __ And(a1,
2710  a0,
2711  Operand(kIsNotStringMask |
2715  __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
2716 
2717  // (2) Anything but sequential or cons? If yes, go to (6).
2722  // Go to (6).
2723  __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
2724 
2725  // (3) Cons string. Check that it's flat.
2726  // Replace subject with first string and reload instance type.
2727  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
2728  __ LoadRoot(a1, Heap::kempty_stringRootIndex);
2729  __ Branch(&runtime, ne, a0, Operand(a1));
2730  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2731 
2732  // (4) Is subject external? If yes, go to (7).
2733  __ bind(&check_underlying);
2734  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2737  __ And(at, a0, Operand(kStringRepresentationMask));
2738  // The underlying external string is never a short external string.
2741  __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
2742 
2743  // (5) Sequential string. Load regexp code according to encoding.
2744  __ bind(&seq_string);
2745  // subject: sequential subject string (or look-alike, external string)
2746  // a3: original subject string
2747  // Load previous index and check range before a3 is overwritten. We have to
2748  // use a3 instead of subject here because subject might have been only made
2749  // to look like a sequential string when it actually is an external string.
2750  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
2751  __ JumpIfNotSmi(a1, &runtime);
2753  __ Branch(&runtime, ls, a3, Operand(a1));
2754  __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
2755 
2759  __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
2760  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
2761  __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
2762  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2763  __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
2764 
2765  // (E) Carry on. String handling is done.
2766  // t9: irregexp code
2767  // Check that the irregexp code has been generated for the actual string
2768  // encoding. If it has, the field contains a code object otherwise it contains
2769  // a smi (code flushing support).
2770  __ JumpIfSmi(t9, &runtime);
2771 
2772  // a1: previous index
2773  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
2774  // t9: code
2775  // subject: Subject string
2776  // regexp_data: RegExp data (FixedArray)
2777  // All checks done. Now push arguments for native regexp code.
2778  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
2779  1, a0, a2);
2780 
2781  // Isolates: note we add an additional parameter here (isolate pointer).
2782  const int kRegExpExecuteArguments = 9;
2783  const int kParameterRegisters = 4;
2784  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2785 
2786  // Stack pointer now points to cell where return address is to be written.
2787  // Arguments are before that on the stack or in registers, meaning we
2788  // treat the return address as argument 5. Thus every argument after that
2789  // needs to be shifted back by 1. Since DirectCEntryStub will handle
2790  // allocating space for the c argument slots, we don't need to calculate
2791  // that into the argument positions on the stack. This is how the stack will
2792  // look (sp meaning the value of sp at this moment):
2793  // [sp + 5] - Argument 9
2794  // [sp + 4] - Argument 8
2795  // [sp + 3] - Argument 7
2796  // [sp + 2] - Argument 6
2797  // [sp + 1] - Argument 5
2798  // [sp + 0] - saved ra
2799 
2800  // Argument 9: Pass current isolate address.
2801  // CFunctionArgumentOperand handles MIPS stack argument slots.
2802  __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
2803  __ sw(a0, MemOperand(sp, 5 * kPointerSize));
2804 
2805  // Argument 8: Indicate that this is a direct call from JavaScript.
2806  __ li(a0, Operand(1));
2807  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
2808 
2809  // Argument 7: Start (high end) of backtracking stack memory area.
2810  __ li(a0, Operand(address_of_regexp_stack_memory_address));
2811  __ lw(a0, MemOperand(a0, 0));
2812  __ li(a2, Operand(address_of_regexp_stack_memory_size));
2813  __ lw(a2, MemOperand(a2, 0));
2814  __ addu(a0, a0, a2);
2815  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
2816 
2817  // Argument 6: Set the number of capture registers to zero to force global
2818  // regexps to behave as non-global. This does not affect non-global regexps.
2819  __ mov(a0, zero_reg);
2820  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
2821 
2822  // Argument 5: static offsets vector buffer.
2823  __ li(a0, Operand(
2824  ExternalReference::address_of_static_offsets_vector(isolate)));
2825  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
2826 
2827  // For arguments 4 and 3 get string length, calculate start of string data
2828  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
2829  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2830  __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
2831  // Load the length from the original subject string from the previous stack
2832  // frame. Therefore we have to use fp, which points exactly to two pointer
2833  // sizes below the previous sp. (Because creating a new stack frame pushes
2834  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2835  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2836  // If slice offset is not 0, load the length from the original sliced string.
2837  // Argument 4, a3: End of string data
2838  // Argument 3, a2: Start of string data
2839  // Prepare start and end index of the input.
2840  __ sllv(t1, t0, a3);
2841  __ addu(t0, t2, t1);
2842  __ sllv(t1, a1, a3);
2843  __ addu(a2, t0, t1);
2844 
2845  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
2846  __ sra(t2, t2, kSmiTagSize);
2847  __ sllv(t1, t2, a3);
2848  __ addu(a3, t0, t1);
2849  // Argument 2 (a1): Previous index.
2850  // Already there
2851 
2852  // Argument 1 (a0): Subject string.
2853  __ mov(a0, subject);
2854 
2855  // Locate the code entry and call it.
2856  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
2857  DirectCEntryStub stub;
2858  stub.GenerateCall(masm, t9);
2859 
2860  __ LeaveExitFrame(false, no_reg, true);
2861 
2862  // v0: result
2863  // subject: subject string (callee saved)
2864  // regexp_data: RegExp data (callee saved)
2865  // last_match_info_elements: Last match info elements (callee saved)
2866  // Check the result.
2867  Label success;
2868  __ Branch(&success, eq, v0, Operand(1));
2869  // We expect exactly one result since we force the called regexp to behave
2870  // as non-global.
2871  Label failure;
2872  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
2873  // If not exception it can only be retry. Handle that in the runtime system.
2874  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2875  // Result must now be exception. If there is no pending exception already a
2876  // stack overflow (on the backtrack stack) was detected in RegExp code but
2877  // haven't created the exception yet. Handle that in the runtime system.
2878  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2879  __ li(a1, Operand(isolate->factory()->the_hole_value()));
2880  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2881  isolate)));
2882  __ lw(v0, MemOperand(a2, 0));
2883  __ Branch(&runtime, eq, v0, Operand(a1));
2884 
2885  __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
2886 
2887  // Check if the exception is a termination. If so, throw as uncatchable.
2888  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
2889  Label termination_exception;
2890  __ Branch(&termination_exception, eq, v0, Operand(a0));
2891 
2892  __ Throw(v0);
2893 
2894  __ bind(&termination_exception);
2895  __ ThrowUncatchable(v0);
2896 
2897  __ bind(&failure);
2898  // For failure and exception return null.
2899  __ li(v0, Operand(isolate->factory()->null_value()));
2900  __ DropAndRet(4);
2901 
2902  // Process the result from the native regexp code.
2903  __ bind(&success);
2904  __ lw(a1,
2906  // Calculate number of capture registers (number_of_captures + 1) * 2.
2907  // Multiplying by 2 comes for free since r1 is smi-tagged.
2908  STATIC_ASSERT(kSmiTag == 0);
2910  __ Addu(a1, a1, Operand(2)); // a1 was a smi.
2911 
2912  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
2913  __ JumpIfSmi(a0, &runtime);
2914  __ GetObjectType(a0, a2, a2);
2915  __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
2916  // Check that the JSArray is in fast case.
2917  __ lw(last_match_info_elements,
2919  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2920  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2921  __ Branch(&runtime, ne, a0, Operand(at));
2922  // Check that the last match info has space for the capture registers and the
2923  // additional information.
2924  __ lw(a0,
2925  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2926  __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
2927  __ sra(at, a0, kSmiTagSize);
2928  __ Branch(&runtime, gt, a2, Operand(at));
2929 
2930  // a1: number of capture registers
2931  // subject: subject string
2932  // Store the capture count.
2933  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
2934  __ sw(a2, FieldMemOperand(last_match_info_elements,
2936  // Store last subject and last input.
2937  __ sw(subject,
2938  FieldMemOperand(last_match_info_elements,
2940  __ mov(a2, subject);
2941  __ RecordWriteField(last_match_info_elements,
2943  subject,
2944  t3,
2946  kDontSaveFPRegs);
2947  __ mov(subject, a2);
2948  __ sw(subject,
2949  FieldMemOperand(last_match_info_elements,
2951  __ RecordWriteField(last_match_info_elements,
2953  subject,
2954  t3,
2956  kDontSaveFPRegs);
2957 
2958  // Get the static offsets vector filled by the native regexp code.
2959  ExternalReference address_of_static_offsets_vector =
2960  ExternalReference::address_of_static_offsets_vector(isolate);
2961  __ li(a2, Operand(address_of_static_offsets_vector));
2962 
2963  // a1: number of capture registers
2964  // a2: offsets vector
2965  Label next_capture, done;
2966  // Capture register counter starts from number of capture registers and
2967  // counts down until wrapping after zero.
2968  __ Addu(a0,
2969  last_match_info_elements,
2971  __ bind(&next_capture);
2972  __ Subu(a1, a1, Operand(1));
2973  __ Branch(&done, lt, a1, Operand(zero_reg));
2974  // Read the value from the static offsets vector buffer.
2975  __ lw(a3, MemOperand(a2, 0));
2976  __ addiu(a2, a2, kPointerSize);
2977  // Store the smi value in the last match info.
2978  __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
2979  __ sw(a3, MemOperand(a0, 0));
2980  __ Branch(&next_capture, USE_DELAY_SLOT);
2981  __ addiu(a0, a0, kPointerSize); // In branch delay slot.
2982 
2983  __ bind(&done);
2984 
2985  // Return last match info.
2986  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
2987  __ DropAndRet(4);
2988 
2989  // Do the runtime call to execute the regexp.
2990  __ bind(&runtime);
2991  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2992 
2993  // Deferred code for string handling.
2994  // (6) Not a long external string? If yes, go to (8).
2995  __ bind(&not_seq_nor_cons);
2996  // Go to (8).
2997  __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
2998 
2999  // (7) External string. Make it, offset-wise, look like a sequential string.
3000  __ bind(&external_string);
3001  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
3003  if (FLAG_debug_code) {
3004  // Assert that we do not have a cons or slice (indirect strings) here.
3005  // Sequential strings have already been ruled out.
3006  __ And(at, a0, Operand(kIsIndirectStringMask));
3007  __ Assert(eq,
3008  kExternalStringExpectedButNotFound,
3009  at,
3010  Operand(zero_reg));
3011  }
3012  __ lw(subject,
3014  // Move the pointer so that offset-wise, it looks like a sequential string.
3016  __ Subu(subject,
3017  subject,
3019  __ jmp(&seq_string); // Go to (5).
3020 
3021  // (8) Short external string or not a string? If yes, bail out to runtime.
3022  __ bind(&not_long_external);
3024  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
3025  __ Branch(&runtime, ne, at, Operand(zero_reg));
3026 
3027  // (9) Sliced string. Replace subject with parent. Go to (4).
3028  // Load offset into t0 and replace subject string with parent.
3030  __ sra(t0, t0, kSmiTagSize);
3031  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3032  __ jmp(&check_underlying); // Go to (4).
3033 #endif // V8_INTERPRETED_REGEXP
3034 }
3035 
3036 
3037 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3038  // Cache the called function in a feedback vector slot. Cache states
3039  // are uninitialized, monomorphic (indicated by a JSFunction), and
3040  // megamorphic.
3041  // a0 : number of arguments to the construct function
3042  // a1 : the function to call
3043  // a2 : Feedback vector
3044  // a3 : slot in feedback vector (Smi)
3045  Label initialize, done, miss, megamorphic, not_array_function;
3046 
3048  masm->isolate()->heap()->megamorphic_symbol());
3050  masm->isolate()->heap()->uninitialized_symbol());
3051 
3052  // Load the cache state into t0.
3053  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3054  __ Addu(t0, a2, Operand(t0));
3056 
3057  // A monomorphic cache hit or an already megamorphic state: invoke the
3058  // function without changing the state.
3059  __ Branch(&done, eq, t0, Operand(a1));
3060 
3061  if (!FLAG_pretenuring_call_new) {
3062  // If we came here, we need to see if we are the array function.
3063  // If we didn't have a matching function, and we didn't find the megamorph
3064  // sentinel, then we have in the slot either some other function or an
3065  // AllocationSite. Do a map check on the object in a3.
3066  __ lw(t1, FieldMemOperand(t0, 0));
3067  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3068  __ Branch(&miss, ne, t1, Operand(at));
3069 
3070  // Make sure the function is the Array() function
3071  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
3072  __ Branch(&megamorphic, ne, a1, Operand(t0));
3073  __ jmp(&done);
3074  }
3075 
3076  __ bind(&miss);
3077 
3078  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3079  // megamorphic.
3080  __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
3081  __ Branch(&initialize, eq, t0, Operand(at));
3082  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3083  // write-barrier is needed.
3084  __ bind(&megamorphic);
3085  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3086  __ Addu(t0, a2, Operand(t0));
3087  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3089  __ jmp(&done);
3090 
3091  // An uninitialized cache is patched with the function.
3092  __ bind(&initialize);
3093  if (!FLAG_pretenuring_call_new) {
3094  // Make sure the function is the Array() function.
3095  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
3096  __ Branch(&not_array_function, ne, a1, Operand(t0));
3097 
3098  // The target function is the Array constructor,
3099  // Create an AllocationSite if we don't already have it, store it in the
3100  // slot.
3101  {
3102  FrameScope scope(masm, StackFrame::INTERNAL);
3103  const RegList kSavedRegs =
3104  1 << 4 | // a0
3105  1 << 5 | // a1
3106  1 << 6 | // a2
3107  1 << 7; // a3
3108 
3109  // Arguments register must be smi-tagged to call out.
3110  __ SmiTag(a0);
3111  __ MultiPush(kSavedRegs);
3112 
3113  CreateAllocationSiteStub create_stub;
3114  __ CallStub(&create_stub);
3115 
3116  __ MultiPop(kSavedRegs);
3117  __ SmiUntag(a0);
3118  }
3119  __ Branch(&done);
3120 
3121  __ bind(&not_array_function);
3122  }
3123 
3124  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
3125  __ Addu(t0, a2, Operand(t0));
3126  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3127  __ sw(a1, MemOperand(t0, 0));
3128 
3129  __ Push(t0, a2, a1);
3130  __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
3132  __ Pop(t0, a2, a1);
3133 
3134  __ bind(&done);
3135 }
3136 
3137 
3138 void CallFunctionStub::Generate(MacroAssembler* masm) {
3139  // a1 : the function to call
3140  // a2 : feedback vector
3141  // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
3142  // vector (Smi)
3143  Label slow, non_function, wrap, cont;
3144 
3145  if (NeedsChecks()) {
3146  // Check that the function is really a JavaScript function.
3147  // a1: pushed function (to be verified)
3148  __ JumpIfSmi(a1, &non_function);
3149 
3150  // Goto slow case if we do not have a function.
3151  __ GetObjectType(a1, t0, t0);
3152  __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3153 
3154  if (RecordCallTarget()) {
3155  GenerateRecordCallTarget(masm);
3156  // Type information was updated. Because we may call Array, which
3157  // expects either undefined or an AllocationSite in a2 we need
3158  // to set a2 to undefined.
3159  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3160  }
3161  }
3162 
3163  // Fast-case: Invoke the function now.
3164  // a1: pushed function
3165  ParameterCount actual(argc_);
3166 
3167  if (CallAsMethod()) {
3168  if (NeedsChecks()) {
3169  // Do not transform the receiver for strict mode functions and natives.
3172  int32_t strict_mode_function_mask =
3174  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3175  __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
3176  __ Branch(&cont, ne, at, Operand(zero_reg));
3177  }
3178 
3179  // Compute the receiver in sloppy mode.
3180  __ lw(a3, MemOperand(sp, argc_ * kPointerSize));
3181 
3182  if (NeedsChecks()) {
3183  __ JumpIfSmi(a3, &wrap);
3184  __ GetObjectType(a3, t0, t0);
3185  __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
3186  } else {
3187  __ jmp(&wrap);
3188  }
3189 
3190  __ bind(&cont);
3191  }
3192  __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
3193 
3194  if (NeedsChecks()) {
3195  // Slow-case: Non-function called.
3196  __ bind(&slow);
3197  if (RecordCallTarget()) {
3198  // If there is a call target cache, mark it megamorphic in the
3199  // non-function case. MegamorphicSentinel is an immortal immovable
3200  // object (megamorphic symbol) so no write barrier is needed.
3202  masm->isolate()->heap()->megamorphic_symbol());
3203  __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
3204  __ Addu(t1, a2, Operand(t1));
3205  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
3207  }
3208  // Check for function proxy.
3209  __ Branch(&non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3210  __ push(a1); // Put proxy as additional argument.
3211  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
3212  __ li(a2, Operand(0, RelocInfo::NONE32));
3213  __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
3214  {
3215  Handle<Code> adaptor =
3216  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3217  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3218  }
3219 
3220  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3221  // of the original receiver from the call site).
3222  __ bind(&non_function);
3223  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
3224  __ li(a0, Operand(argc_)); // Set up the number of arguments.
3225  __ li(a2, Operand(0, RelocInfo::NONE32));
3226  __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
3227  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3228  RelocInfo::CODE_TARGET);
3229  }
3230 
3231  if (CallAsMethod()) {
3232  __ bind(&wrap);
3233  // Wrap the receiver and patch it back onto the stack.
3234  { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3235  __ Push(a1, a3);
3236  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3237  __ pop(a1);
3238  }
3239  __ mov(a0, v0);
3240  __ sw(a0, MemOperand(sp, argc_ * kPointerSize));
3241  __ jmp(&cont);
3242  }
3243 }
3244 
3245 
3246 void CallConstructStub::Generate(MacroAssembler* masm) {
3247  // a0 : number of arguments
3248  // a1 : the function to call
3249  // a2 : feedback vector
3250  // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
3251  Label slow, non_function_call;
3252 
3253  // Check that the function is not a smi.
3254  __ JumpIfSmi(a1, &non_function_call);
3255  // Check that the function is a JSFunction.
3256  __ GetObjectType(a1, t0, t0);
3257  __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
3258 
3259  if (RecordCallTarget()) {
3260  GenerateRecordCallTarget(masm);
3261 
3262  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
3263  __ Addu(t1, a2, at);
3264  if (FLAG_pretenuring_call_new) {
3265  // Put the AllocationSite from the feedback vector into a2.
3266  // By adding kPointerSize we encode that we know the AllocationSite
3267  // entry is at the feedback vector slot given by a3 + 1.
3268  __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
3269  } else {
3270  Label feedback_register_initialized;
3271  // Put the AllocationSite from the feedback vector into a2, or undefined.
3274  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3275  __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
3276  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3277  __ bind(&feedback_register_initialized);
3278  }
3279 
3280  __ AssertUndefinedOrAllocationSite(a2, t1);
3281  }
3282 
3283  // Jump to the function-specific construct stub.
3284  Register jmp_reg = t0;
3286  __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3288  __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3289  __ Jump(at);
3290 
3291  // a0: number of arguments
3292  // a1: called object
3293  // t0: object type
3294  Label do_call;
3295  __ bind(&slow);
3296  __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
3297  __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3298  __ jmp(&do_call);
3299 
3300  __ bind(&non_function_call);
3301  __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3302  __ bind(&do_call);
3303  // Set expected number of arguments to zero (not changing r0).
3304  __ li(a2, Operand(0, RelocInfo::NONE32));
3305  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3306  RelocInfo::CODE_TARGET);
3307 }
3308 
3309 
3310 // StringCharCodeAtGenerator.
3311 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3312  Label flat_string;
3313  Label ascii_string;
3314  Label got_char_code;
3315  Label sliced_string;
3316 
3317  ASSERT(!t0.is(index_));
3318  ASSERT(!t0.is(result_));
3319  ASSERT(!t0.is(object_));
3320 
3321  // If the receiver is a smi trigger the non-string case.
3322  __ JumpIfSmi(object_, receiver_not_string_);
3323 
3324  // Fetch the instance type of the receiver into result register.
3325  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3326  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3327  // If the receiver is not a string trigger the non-string case.
3328  __ And(t0, result_, Operand(kIsNotStringMask));
3329  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3330 
3331  // If the index is non-smi trigger the non-smi case.
3332  __ JumpIfNotSmi(index_, &index_not_smi_);
3333 
3334  __ bind(&got_smi_index_);
3335 
3336  // Check for index out of range.
3337  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3338  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3339 
3340  __ sra(index_, index_, kSmiTagSize);
3341 
3343  object_,
3344  index_,
3345  result_,
3346  &call_runtime_);
3347 
3348  __ sll(result_, result_, kSmiTagSize);
3349  __ bind(&exit_);
3350 }
3351 
3352 
3354  MacroAssembler* masm,
3355  const RuntimeCallHelper& call_helper) {
3356  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3357 
3358  // Index is not a smi.
3359  __ bind(&index_not_smi_);
3360  // If index is a heap number, try converting it to an integer.
3361  __ CheckMap(index_,
3362  result_,
3363  Heap::kHeapNumberMapRootIndex,
3364  index_not_number_,
3366  call_helper.BeforeCall(masm);
3367  // Consumed by runtime conversion function:
3368  __ Push(object_, index_);
3369  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3370  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3371  } else {
3372  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3373  // NumberToSmi discards numbers that are not exact integers.
3374  __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
3375  }
3376 
3377  // Save the conversion result before the pop instructions below
3378  // have a chance to overwrite it.
3379 
3380  __ Move(index_, v0);
3381  __ pop(object_);
3382  // Reload the instance type.
3383  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3384  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3385  call_helper.AfterCall(masm);
3386  // If index is still not a smi, it must be out of range.
3387  __ JumpIfNotSmi(index_, index_out_of_range_);
3388  // Otherwise, return to the fast path.
3389  __ Branch(&got_smi_index_);
3390 
3391  // Call runtime. We get here when the receiver is a string and the
3392  // index is a number, but the code of getting the actual character
3393  // is too complex (e.g., when the string needs to be flattened).
3394  __ bind(&call_runtime_);
3395  call_helper.BeforeCall(masm);
3396  __ sll(index_, index_, kSmiTagSize);
3397  __ Push(object_, index_);
3398  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
3399 
3400  __ Move(result_, v0);
3401 
3402  call_helper.AfterCall(masm);
3403  __ jmp(&exit_);
3404 
3405  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3406 }
3407 
3408 
3409 // -------------------------------------------------------------------------
3410 // StringCharFromCodeGenerator
3411 
3412 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3413  // Fast case of Heap::LookupSingleCharacterStringFromCode.
3414 
3415  ASSERT(!t0.is(result_));
3416  ASSERT(!t0.is(code_));
3417 
3418  STATIC_ASSERT(kSmiTag == 0);
3421  __ And(t0,
3422  code_,
3423  Operand(kSmiTagMask |
3425  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3426 
3427  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3428  // At this point code register contains smi tagged ASCII char code.
3429  STATIC_ASSERT(kSmiTag == 0);
3430  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3431  __ Addu(result_, result_, t0);
3432  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3433  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3434  __ Branch(&slow_case_, eq, result_, Operand(t0));
3435  __ bind(&exit_);
3436 }
3437 
3438 
3440  MacroAssembler* masm,
3441  const RuntimeCallHelper& call_helper) {
3442  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3443 
3444  __ bind(&slow_case_);
3445  call_helper.BeforeCall(masm);
3446  __ push(code_);
3447  __ CallRuntime(Runtime::kCharFromCode, 1);
3448  __ Move(result_, v0);
3449 
3450  call_helper.AfterCall(masm);
3451  __ Branch(&exit_);
3452 
3453  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3454 }
3455 
3456 
3457 enum CopyCharactersFlags {
3458  COPY_ASCII = 1,
3459  DEST_ALWAYS_ALIGNED = 2
3460 };
3461 
3462 
3463 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3464  Register dest,
3465  Register src,
3466  Register count,
3467  Register scratch1,
3468  Register scratch2,
3469  Register scratch3,
3470  Register scratch4,
3471  Register scratch5,
3472  int flags) {
3473  bool ascii = (flags & COPY_ASCII) != 0;
3474  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3475 
3476  if (dest_always_aligned && FLAG_debug_code) {
3477  // Check that destination is actually word aligned if the flag says
3478  // that it is.
3479  __ And(scratch4, dest, Operand(kPointerAlignmentMask));
3480  __ Check(eq,
3481  kDestinationOfCopyNotAligned,
3482  scratch4,
3483  Operand(zero_reg));
3484  }
3485 
3486  const int kReadAlignment = 4;
3487  const int kReadAlignmentMask = kReadAlignment - 1;
3488  // Ensure that reading an entire aligned word containing the last character
3489  // of a string will not read outside the allocated area (because we pad up
3490  // to kObjectAlignment).
3491  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3492  // Assumes word reads and writes are little endian.
3493  // Nothing to do for zero characters.
3494  Label done;
3495 
3496  if (!ascii) {
3497  __ addu(count, count, count);
3498  }
3499  __ Branch(&done, eq, count, Operand(zero_reg));
3500 
3501  Label byte_loop;
3502  // Must copy at least eight bytes, otherwise just do it one byte at a time.
3503  __ Subu(scratch1, count, Operand(8));
3504  __ Addu(count, dest, Operand(count));
3505  Register limit = count; // Read until src equals this.
3506  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
3507 
3508  if (!dest_always_aligned) {
3509  // Align dest by byte copying. Copies between zero and three bytes.
3510  __ And(scratch4, dest, Operand(kReadAlignmentMask));
3511  Label dest_aligned;
3512  __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
3513  Label aligned_loop;
3514  __ bind(&aligned_loop);
3515  __ lbu(scratch1, MemOperand(src));
3516  __ addiu(src, src, 1);
3517  __ sb(scratch1, MemOperand(dest));
3518  __ addiu(dest, dest, 1);
3519  __ addiu(scratch4, scratch4, 1);
3520  __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
3521  __ bind(&dest_aligned);
3522  }
3523 
3524  Label simple_loop;
3525 
3526  __ And(scratch4, src, Operand(kReadAlignmentMask));
3527  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
3528 
3529  // Loop for src/dst that are not aligned the same way.
3530  // This loop uses lwl and lwr instructions. These instructions
3531  // depend on the endianness, and the implementation assumes little-endian.
3532  {
3533  Label loop;
3534  __ bind(&loop);
3535  __ lwr(scratch1, MemOperand(src));
3536  __ Addu(src, src, Operand(kReadAlignment));
3537  __ lwl(scratch1, MemOperand(src, -1));
3538  __ sw(scratch1, MemOperand(dest));
3539  __ Addu(dest, dest, Operand(kReadAlignment));
3540  __ Subu(scratch2, limit, dest);
3541  __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3542  }
3543 
3544  __ Branch(&byte_loop);
3545 
3546  // Simple loop.
3547  // Copy words from src to dest, until less than four bytes left.
3548  // Both src and dest are word aligned.
3549  __ bind(&simple_loop);
3550  {
3551  Label loop;
3552  __ bind(&loop);
3553  __ lw(scratch1, MemOperand(src));
3554  __ Addu(src, src, Operand(kReadAlignment));
3555  __ sw(scratch1, MemOperand(dest));
3556  __ Addu(dest, dest, Operand(kReadAlignment));
3557  __ Subu(scratch2, limit, dest);
3558  __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3559  }
3560 
3561  // Copy bytes from src to dest until dest hits limit.
3562  __ bind(&byte_loop);
3563  // Test if dest has already reached the limit.
3564  __ Branch(&done, ge, dest, Operand(limit));
3565  __ lbu(scratch1, MemOperand(src));
3566  __ addiu(src, src, 1);
3567  __ sb(scratch1, MemOperand(dest));
3568  __ addiu(dest, dest, 1);
3569  __ Branch(&byte_loop);
3570 
3571  __ bind(&done);
3572 }
3573 
3574 
3575 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3576  Register hash,
3577  Register character) {
3578  // hash = seed + character + ((seed + character) << 10);
3579  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3580  // Untag smi seed and add the character.
3581  __ SmiUntag(hash);
3582  __ addu(hash, hash, character);
3583  __ sll(at, hash, 10);
3584  __ addu(hash, hash, at);
3585  // hash ^= hash >> 6;
3586  __ srl(at, hash, 6);
3587  __ xor_(hash, hash, at);
3588 }
3589 
3590 
3591 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3592  Register hash,
3593  Register character) {
3594  // hash += character;
3595  __ addu(hash, hash, character);
3596  // hash += hash << 10;
3597  __ sll(at, hash, 10);
3598  __ addu(hash, hash, at);
3599  // hash ^= hash >> 6;
3600  __ srl(at, hash, 6);
3601  __ xor_(hash, hash, at);
3602 }
3603 
3604 
3605 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3606  Register hash) {
3607  // hash += hash << 3;
3608  __ sll(at, hash, 3);
3609  __ addu(hash, hash, at);
3610  // hash ^= hash >> 11;
3611  __ srl(at, hash, 11);
3612  __ xor_(hash, hash, at);
3613  // hash += hash << 15;
3614  __ sll(at, hash, 15);
3615  __ addu(hash, hash, at);
3616 
3617  __ li(at, Operand(String::kHashBitMask));
3618  __ and_(hash, hash, at);
3619 
3620  // if (hash == 0) hash = 27;
3621  __ ori(at, zero_reg, StringHasher::kZeroHash);
3622  __ Movz(hash, at, hash);
3623 }
3624 
3625 
3626 void SubStringStub::Generate(MacroAssembler* masm) {
3627  Label runtime;
3628  // Stack frame on entry.
3629  // ra: return address
3630  // sp[0]: to
3631  // sp[4]: from
3632  // sp[8]: string
3633 
3634  // This stub is called from the native-call %_SubString(...), so
3635  // nothing can be assumed about the arguments. It is tested that:
3636  // "string" is a sequential string,
3637  // both "from" and "to" are smis, and
3638  // 0 <= from <= to <= string.length.
3639  // If any of these assumptions fail, we call the runtime system.
3640 
3641  const int kToOffset = 0 * kPointerSize;
3642  const int kFromOffset = 1 * kPointerSize;
3643  const int kStringOffset = 2 * kPointerSize;
3644 
3645  __ lw(a2, MemOperand(sp, kToOffset));
3646  __ lw(a3, MemOperand(sp, kFromOffset));
3647  STATIC_ASSERT(kFromOffset == kToOffset + 4);
3648  STATIC_ASSERT(kSmiTag == 0);
3650 
3651  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
3652  // safe in this case.
3653  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
3654  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
3655  // Both a2 and a3 are untagged integers.
3656 
3657  __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
3658 
3659  __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
3660  __ Subu(a2, a2, a3);
3661 
3662  // Make sure first argument is a string.
3663  __ lw(v0, MemOperand(sp, kStringOffset));
3664  __ JumpIfSmi(v0, &runtime);
3667  __ And(t0, a1, Operand(kIsNotStringMask));
3668 
3669  __ Branch(&runtime, ne, t0, Operand(zero_reg));
3670 
3671  Label single_char;
3672  __ Branch(&single_char, eq, a2, Operand(1));
3673 
3674  // Short-cut for the case of trivial substring.
3675  Label return_v0;
3676  // v0: original string
3677  // a2: result string length
3679  __ sra(t0, t0, 1);
3680  // Return original string.
3681  __ Branch(&return_v0, eq, a2, Operand(t0));
3682  // Longer than original string's length or negative: unsafe arguments.
3683  __ Branch(&runtime, hi, a2, Operand(t0));
3684  // Shorter than original string's length: an actual substring.
3685 
3686  // Deal with different string types: update the index if necessary
3687  // and put the underlying string into t1.
3688  // v0: original string
3689  // a1: instance type
3690  // a2: length
3691  // a3: from index (untagged)
3692  Label underlying_unpacked, sliced_string, seq_or_external_string;
3693  // If the string is not indirect, it can only be sequential or external.
3696  __ And(t0, a1, Operand(kIsIndirectStringMask));
3697  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
3698  // t0 is used as a scratch register and can be overwritten in either case.
3699  __ And(t0, a1, Operand(kSlicedNotConsMask));
3700  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
3701  // Cons string. Check whether it is flat, then fetch first part.
3703  __ LoadRoot(t0, Heap::kempty_stringRootIndex);
3704  __ Branch(&runtime, ne, t1, Operand(t0));
3706  // Update instance type.
3709  __ jmp(&underlying_unpacked);
3710 
3711  __ bind(&sliced_string);
3712  // Sliced string. Fetch parent and correct start index by offset.
3715  __ sra(t0, t0, 1); // Add offset to index.
3716  __ Addu(a3, a3, t0);
3717  // Update instance type.
3720  __ jmp(&underlying_unpacked);
3721 
3722  __ bind(&seq_or_external_string);
3723  // Sequential or external string. Just move string to the expected register.
3724  __ mov(t1, v0);
3725 
3726  __ bind(&underlying_unpacked);
3727 
3728  if (FLAG_string_slices) {
3729  Label copy_routine;
3730  // t1: underlying subject string
3731  // a1: instance type of underlying subject string
3732  // a2: length
3733  // a3: adjusted start index (untagged)
3734  // Short slice. Copy instead of slicing.
3735  __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
3736  // Allocate new sliced string. At this point we do not reload the instance
3737  // type including the string encoding because we simply rely on the info
3738  // provided by the original string. It does not matter if the original
3739  // string's encoding is wrong because we always have to recheck encoding of
3740  // the newly created string's parent anyways due to externalized strings.
3741  Label two_byte_slice, set_slice_header;
3744  __ And(t0, a1, Operand(kStringEncodingMask));
3745  __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
3746  __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
3747  __ jmp(&set_slice_header);
3748  __ bind(&two_byte_slice);
3749  __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
3750  __ bind(&set_slice_header);
3751  __ sll(a3, a3, 1);
3754  __ jmp(&return_v0);
3755 
3756  __ bind(&copy_routine);
3757  }
3758 
3759  // t1: underlying subject string
3760  // a1: instance type of underlying subject string
3761  // a2: length
3762  // a3: adjusted start index (untagged)
3763  Label two_byte_sequential, sequential_string, allocate_result;
3766  __ And(t0, a1, Operand(kExternalStringTag));
3767  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
3768 
3769  // Handle external string.
3770  // Rule out short external strings.
3772  __ And(t0, a1, Operand(kShortExternalStringTag));
3773  __ Branch(&runtime, ne, t0, Operand(zero_reg));
3775  // t1 already points to the first character of underlying string.
3776  __ jmp(&allocate_result);
3777 
3778  __ bind(&sequential_string);
3779  // Locate first character of underlying subject string.
3781  __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3782 
3783  __ bind(&allocate_result);
3784  // Sequential acii string. Allocate the result.
3786  __ And(t0, a1, Operand(kStringEncodingMask));
3787  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
3788 
3789  // Allocate and copy the resulting ASCII string.
3790  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
3791 
3792  // Locate first character of substring to copy.
3793  __ Addu(t1, t1, a3);
3794 
3795  // Locate first character of result.
3796  __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3797 
3798  // v0: result string
3799  // a1: first character of result string
3800  // a2: result string length
3801  // t1: first character of substring to copy
3804  masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
3805  __ jmp(&return_v0);
3806 
3807  // Allocate and copy the resulting two-byte string.
3808  __ bind(&two_byte_sequential);
3809  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
3810 
3811  // Locate first character of substring to copy.
3812  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3813  __ sll(t0, a3, 1);
3814  __ Addu(t1, t1, t0);
3815  // Locate first character of result.
3816  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3817 
3818  // v0: result string.
3819  // a1: first character of result.
3820  // a2: result length.
3821  // t1: first character of substring to copy.
3824  masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
3825 
3826  __ bind(&return_v0);
3827  Counters* counters = masm->isolate()->counters();
3828  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
3829  __ DropAndRet(3);
3830 
3831  // Just jump to runtime to create the sub string.
3832  __ bind(&runtime);
3833  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
3834 
3835  __ bind(&single_char);
3836  // v0: original string
3837  // a1: instance type
3838  // a2: length
3839  // a3: from index (untagged)
3840  __ SmiTag(a3, a3);
3841  StringCharAtGenerator generator(
3842  v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3843  generator.GenerateFast(masm);
3844  __ DropAndRet(3);
3845  generator.SkipSlow(masm, &runtime);
3846 }
3847 
3848 
3849 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3850  Register left,
3851  Register right,
3852  Register scratch1,
3853  Register scratch2,
3854  Register scratch3) {
3855  Register length = scratch1;
3856 
3857  // Compare lengths.
3858  Label strings_not_equal, check_zero_length;
3859  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
3860  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3861  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
3862  __ bind(&strings_not_equal);
3863  ASSERT(is_int16(NOT_EQUAL));
3864  __ Ret(USE_DELAY_SLOT);
3865  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
3866 
3867  // Check if the length is zero.
3868  Label compare_chars;
3869  __ bind(&check_zero_length);
3870  STATIC_ASSERT(kSmiTag == 0);
3871  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
3872  ASSERT(is_int16(EQUAL));
3873  __ Ret(USE_DELAY_SLOT);
3874  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3875 
3876  // Compare characters.
3877  __ bind(&compare_chars);
3878 
3879  GenerateAsciiCharsCompareLoop(masm,
3880  left, right, length, scratch2, scratch3, v0,
3881  &strings_not_equal);
3882 
3883  // Characters are equal.
3884  __ Ret(USE_DELAY_SLOT);
3885  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3886 }
3887 
3888 
3889 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3890  Register left,
3891  Register right,
3892  Register scratch1,
3893  Register scratch2,
3894  Register scratch3,
3895  Register scratch4) {
3896  Label result_not_equal, compare_lengths;
3897  // Find minimum length and length difference.
3898  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
3899  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
3900  __ Subu(scratch3, scratch1, Operand(scratch2));
3901  Register length_delta = scratch3;
3902  __ slt(scratch4, scratch2, scratch1);
3903  __ Movn(scratch1, scratch2, scratch4);
3904  Register min_length = scratch1;
3905  STATIC_ASSERT(kSmiTag == 0);
3906  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
3907 
3908  // Compare loop.
3909  GenerateAsciiCharsCompareLoop(masm,
3910  left, right, min_length, scratch2, scratch4, v0,
3911  &result_not_equal);
3912 
3913  // Compare lengths - strings up to min-length are equal.
3914  __ bind(&compare_lengths);
3915  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3916  // Use length_delta as result if it's zero.
3917  __ mov(scratch2, length_delta);
3918  __ mov(scratch4, zero_reg);
3919  __ mov(v0, zero_reg);
3920 
3921  __ bind(&result_not_equal);
3922  // Conditionally update the result based either on length_delta or
3923  // the last comparion performed in the loop above.
3924  Label ret;
3925  __ Branch(&ret, eq, scratch2, Operand(scratch4));
3926  __ li(v0, Operand(Smi::FromInt(GREATER)));
3927  __ Branch(&ret, gt, scratch2, Operand(scratch4));
3928  __ li(v0, Operand(Smi::FromInt(LESS)));
3929  __ bind(&ret);
3930  __ Ret();
3931 }
3932 
3933 
3934 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3935  MacroAssembler* masm,
3936  Register left,
3937  Register right,
3938  Register length,
3939  Register scratch1,
3940  Register scratch2,
3941  Register scratch3,
3942  Label* chars_not_equal) {
3943  // Change index to run from -length to -1 by adding length to string
3944  // start. This means that loop ends when index reaches zero, which
3945  // doesn't need an additional compare.
3946  __ SmiUntag(length);
3947  __ Addu(scratch1, length,
3949  __ Addu(left, left, Operand(scratch1));
3950  __ Addu(right, right, Operand(scratch1));
3951  __ Subu(length, zero_reg, length);
3952  Register index = length; // index = -length;
3953 
3954 
3955  // Compare loop.
3956  Label loop;
3957  __ bind(&loop);
3958  __ Addu(scratch3, left, index);
3959  __ lbu(scratch1, MemOperand(scratch3));
3960  __ Addu(scratch3, right, index);
3961  __ lbu(scratch2, MemOperand(scratch3));
3962  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
3963  __ Addu(index, index, 1);
3964  __ Branch(&loop, ne, index, Operand(zero_reg));
3965 }
3966 
3967 
3968 void StringCompareStub::Generate(MacroAssembler* masm) {
3969  Label runtime;
3970 
3971  Counters* counters = masm->isolate()->counters();
3972 
3973  // Stack frame on entry.
3974  // sp[0]: right string
3975  // sp[4]: left string
3976  __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
3977  __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
3978 
3979  Label not_same;
3980  __ Branch(&not_same, ne, a0, Operand(a1));
3981  STATIC_ASSERT(EQUAL == 0);
3982  STATIC_ASSERT(kSmiTag == 0);
3983  __ li(v0, Operand(Smi::FromInt(EQUAL)));
3984  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
3985  __ DropAndRet(2);
3986 
3987  __ bind(&not_same);
3988 
3989  // Check that both objects are sequential ASCII strings.
3990  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
3991 
3992  // Compare flat ASCII strings natively. Remove arguments from stack first.
3993  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
3994  __ Addu(sp, sp, Operand(2 * kPointerSize));
3995  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
3996 
3997  __ bind(&runtime);
3998  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
3999 }
4000 
4001 
4002 void ArrayPushStub::Generate(MacroAssembler* masm) {
4003  Register receiver = a0;
4004  Register scratch = a1;
4005 
4006  int argc = arguments_count();
4007 
4008  if (argc == 0) {
4009  // Nothing to do, just return the length.
4010  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
4011  __ DropAndRet(argc + 1);
4012  return;
4013  }
4014 
4015  Isolate* isolate = masm->isolate();
4016 
4017  if (argc != 1) {
4018  __ TailCallExternalReference(
4019  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4020  return;
4021  }
4022 
4023  Label call_builtin, attempt_to_grow_elements, with_write_barrier;
4024 
4025  Register elements = t2;
4026  Register end_elements = t1;
4027  // Get the elements array of the object.
4028  __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
4029 
4030  if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4031  // Check that the elements are in fast mode and writable.
4032  __ CheckMap(elements,
4033  scratch,
4034  Heap::kFixedArrayMapRootIndex,
4035  &call_builtin,
4037  }
4038 
4039  // Get the array's length into scratch and calculate new length.
4040  __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4041  __ Addu(scratch, scratch, Operand(Smi::FromInt(argc)));
4042 
4043  // Get the elements' length.
4044  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
4045 
4046  const int kEndElementsOffset =
4048 
4049  if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4050  // Check if we could survive without allocation.
4051  __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0));
4052 
4053  // Check if value is a smi.
4054  __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
4055  __ JumpIfNotSmi(t0, &with_write_barrier);
4056 
4057  // Store the value.
4058  // We may need a register containing the address end_elements below,
4059  // so write back the value in end_elements.
4060  __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
4061  __ Addu(end_elements, elements, end_elements);
4062  __ Addu(end_elements, end_elements, kEndElementsOffset);
4063  __ sw(t0, MemOperand(end_elements));
4064  } else {
4065  // Check if we could survive without allocation.
4066  __ Branch(&call_builtin, gt, scratch, Operand(t0));
4067 
4068  __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
4069  __ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2,
4070  &call_builtin, argc * kDoubleSize);
4071  }
4072 
4073  // Save new length.
4074  __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4075  __ mov(v0, scratch);
4076  __ DropAndRet(argc + 1);
4077 
4078  if (IsFastDoubleElementsKind(elements_kind())) {
4079  __ bind(&call_builtin);
4080  __ TailCallExternalReference(
4081  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4082  return;
4083  }
4084 
4085  __ bind(&with_write_barrier);
4086 
4087  if (IsFastSmiElementsKind(elements_kind())) {
4088  if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
4089 
4091  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4092  __ Branch(&call_builtin, eq, t3, Operand(at));
4093 
4094  ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
4099  const int header_size = FixedArrayBase::kHeaderSize;
4100  // Verify that the object can be transitioned in place.
4101  const int origin_offset = header_size + elements_kind() * kPointerSize;
4102  __ lw(a2, FieldMemOperand(receiver, origin_offset));
4104  __ Branch(&call_builtin, ne, a2, Operand(at));
4105 
4106 
4107  const int target_offset = header_size + target_kind * kPointerSize;
4108  __ lw(a3, FieldMemOperand(a3, target_offset));
4109  __ mov(a2, receiver);
4112  }
4113 
4114  // Save new length.
4115  __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4116 
4117  // Store the value.
4118  // We may need a register containing the address end_elements below, so write
4119  // back the value in end_elements.
4120  __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
4121  __ Addu(end_elements, elements, end_elements);
4122  __ Addu(end_elements, end_elements, kEndElementsOffset);
4123  __ sw(t0, MemOperand(end_elements));
4124 
4125  __ RecordWrite(elements,
4126  end_elements,
4127  t0,
4131  OMIT_SMI_CHECK);
4132  __ mov(v0, scratch);
4133  __ DropAndRet(argc + 1);
4134 
4135  __ bind(&attempt_to_grow_elements);
4136  // scratch: array's length + 1.
4137 
4138  if (!FLAG_inline_new) {
4139  __ bind(&call_builtin);
4140  __ TailCallExternalReference(
4141  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4142  return;
4143  }
4144 
4145  __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
4146  // Growing elements that are SMI-only requires special handling in case the
4147  // new element is non-Smi. For now, delegate to the builtin.
4148  if (IsFastSmiElementsKind(elements_kind())) {
4149  __ JumpIfNotSmi(a2, &call_builtin);
4150  }
4151 
4152  // We could be lucky and the elements array could be at the top of new-space.
4153  // In this case we can just grow it in place by moving the allocation pointer
4154  // up.
4155  ExternalReference new_space_allocation_top =
4156  ExternalReference::new_space_allocation_top_address(isolate);
4157  ExternalReference new_space_allocation_limit =
4158  ExternalReference::new_space_allocation_limit_address(isolate);
4159 
4160  const int kAllocationDelta = 4;
4161  ASSERT(kAllocationDelta >= argc);
4162  // Load top and check if it is the end of elements.
4163  __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize);
4164  __ Addu(end_elements, elements, end_elements);
4165  __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
4166  __ li(t0, Operand(new_space_allocation_top));
4167  __ lw(a3, MemOperand(t0));
4168  __ Branch(&call_builtin, ne, a3, Operand(end_elements));
4169 
4170  __ li(t3, Operand(new_space_allocation_limit));
4171  __ lw(t3, MemOperand(t3));
4172  __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
4173  __ Branch(&call_builtin, hi, a3, Operand(t3));
4174 
4175  // We fit and could grow elements.
4176  // Update new_space_allocation_top.
4177  __ sw(a3, MemOperand(t0));
4178  // Push the argument.
4179  __ sw(a2, MemOperand(end_elements));
4180  // Fill the rest with holes.
4181  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
4182  for (int i = 1; i < kAllocationDelta; i++) {
4183  __ sw(a3, MemOperand(end_elements, i * kPointerSize));
4184  }
4185 
4186  // Update elements' and array's sizes.
4187  __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4188  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
4189  __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
4190  __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
4191 
4192  // Elements are in new space, so write barrier is not required.
4193  __ mov(v0, scratch);
4194  __ DropAndRet(argc + 1);
4195 
4196  __ bind(&call_builtin);
4197  __ TailCallExternalReference(
4198  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4199 }
4200 
4201 
4202 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4203  // ----------- S t a t e -------------
4204  // -- a1 : left
4205  // -- a0 : right
4206  // -- ra : return address
4207  // -----------------------------------
4208  Isolate* isolate = masm->isolate();
4209 
4210  // Load a2 with the allocation site. We stick an undefined dummy value here
4211  // and replace it with the real allocation site later when we instantiate this
4212  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4213  __ li(a2, handle(isolate->heap()->undefined_value()));
4214 
4215  // Make sure that we actually patched the allocation site.
4216  if (FLAG_debug_code) {
4217  __ And(at, a2, Operand(kSmiTagMask));
4218  __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
4220  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4221  __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
4222  }
4223 
4224  // Tail call into the stub that handles binary operations with allocation
4225  // sites.
4226  BinaryOpWithAllocationSiteStub stub(state_);
4227  __ TailCallStub(&stub);
4228 }
4229 
4230 
4231 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4232  ASSERT(state_ == CompareIC::SMI);
4233  Label miss;
4234  __ Or(a2, a1, a0);
4235  __ JumpIfNotSmi(a2, &miss);
4236 
4237  if (GetCondition() == eq) {
4238  // For equality we do not care about the sign of the result.
4239  __ Ret(USE_DELAY_SLOT);
4240  __ Subu(v0, a0, a1);
4241  } else {
4242  // Untag before subtracting to avoid handling overflow.
4243  __ SmiUntag(a1);
4244  __ SmiUntag(a0);
4245  __ Ret(USE_DELAY_SLOT);
4246  __ Subu(v0, a1, a0);
4247  }
4248 
4249  __ bind(&miss);
4250  GenerateMiss(masm);
4251 }
4252 
4253 
4254 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4255  ASSERT(state_ == CompareIC::NUMBER);
4256 
4257  Label generic_stub;
4258  Label unordered, maybe_undefined1, maybe_undefined2;
4259  Label miss;
4260 
4261  if (left_ == CompareIC::SMI) {
4262  __ JumpIfNotSmi(a1, &miss);
4263  }
4264  if (right_ == CompareIC::SMI) {
4265  __ JumpIfNotSmi(a0, &miss);
4266  }
4267 
4268  // Inlining the double comparison and falling back to the general compare
4269  // stub if NaN is involved.
4270  // Load left and right operand.
4271  Label done, left, left_smi, right_smi;
4272  __ JumpIfSmi(a0, &right_smi);
4273  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4275  __ Subu(a2, a0, Operand(kHeapObjectTag));
4277  __ Branch(&left);
4278  __ bind(&right_smi);
4279  __ SmiUntag(a2, a0); // Can't clobber a0 yet.
4280  FPURegister single_scratch = f6;
4281  __ mtc1(a2, single_scratch);
4282  __ cvt_d_w(f2, single_scratch);
4283 
4284  __ bind(&left);
4285  __ JumpIfSmi(a1, &left_smi);
4286  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4288  __ Subu(a2, a1, Operand(kHeapObjectTag));
4290  __ Branch(&done);
4291  __ bind(&left_smi);
4292  __ SmiUntag(a2, a1); // Can't clobber a1 yet.
4293  single_scratch = f8;
4294  __ mtc1(a2, single_scratch);
4295  __ cvt_d_w(f0, single_scratch);
4296 
4297  __ bind(&done);
4298 
4299  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
4300  Label fpu_eq, fpu_lt;
4301  // Test if equal, and also handle the unordered/NaN case.
4302  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
4303 
4304  // Test if less (unordered case is already handled).
4305  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
4306 
4307  // Otherwise it's greater, so just fall thru, and return.
4308  ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
4309  __ Ret(USE_DELAY_SLOT);
4310  __ li(v0, Operand(GREATER));
4311 
4312  __ bind(&fpu_eq);
4313  __ Ret(USE_DELAY_SLOT);
4314  __ li(v0, Operand(EQUAL));
4315 
4316  __ bind(&fpu_lt);
4317  __ Ret(USE_DELAY_SLOT);
4318  __ li(v0, Operand(LESS));
4319 
4320  __ bind(&unordered);
4321  __ bind(&generic_stub);
4324  __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4325 
4326  __ bind(&maybe_undefined1);
4328  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4329  __ Branch(&miss, ne, a0, Operand(at));
4330  __ JumpIfSmi(a1, &unordered);
4331  __ GetObjectType(a1, a2, a2);
4332  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
4333  __ jmp(&unordered);
4334  }
4335 
4336  __ bind(&maybe_undefined2);
4338  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4339  __ Branch(&unordered, eq, a1, Operand(at));
4340  }
4341 
4342  __ bind(&miss);
4343  GenerateMiss(masm);
4344 }
4345 
4346 
4347 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4349  Label miss;
4350 
4351  // Registers containing left and right operands respectively.
4352  Register left = a1;
4353  Register right = a0;
4354  Register tmp1 = a2;
4355  Register tmp2 = a3;
4356 
4357  // Check that both operands are heap objects.
4358  __ JumpIfEitherSmi(left, right, &miss);
4359 
4360  // Check that both operands are internalized strings.
4361  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4362  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4363  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4364  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4366  __ Or(tmp1, tmp1, Operand(tmp2));
4367  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4368  __ Branch(&miss, ne, at, Operand(zero_reg));
4369 
4370  // Make sure a0 is non-zero. At this point input operands are
4371  // guaranteed to be non-zero.
4372  ASSERT(right.is(a0));
4373  STATIC_ASSERT(EQUAL == 0);
4374  STATIC_ASSERT(kSmiTag == 0);
4375  __ mov(v0, right);
4376  // Internalized strings are compared by identity.
4377  __ Ret(ne, left, Operand(right));
4378  ASSERT(is_int16(EQUAL));
4379  __ Ret(USE_DELAY_SLOT);
4380  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4381 
4382  __ bind(&miss);
4383  GenerateMiss(masm);
4384 }
4385 
4386 
4387 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4388  ASSERT(state_ == CompareIC::UNIQUE_NAME);
4389  ASSERT(GetCondition() == eq);
4390  Label miss;
4391 
4392  // Registers containing left and right operands respectively.
4393  Register left = a1;
4394  Register right = a0;
4395  Register tmp1 = a2;
4396  Register tmp2 = a3;
4397 
4398  // Check that both operands are heap objects.
4399  __ JumpIfEitherSmi(left, right, &miss);
4400 
4401  // Check that both operands are unique names. This leaves the instance
4402  // types loaded in tmp1 and tmp2.
4403  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4404  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4405  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4406  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4407 
4408  __ JumpIfNotUniqueName(tmp1, &miss);
4409  __ JumpIfNotUniqueName(tmp2, &miss);
4410 
4411  // Use a0 as result
4412  __ mov(v0, a0);
4413 
4414  // Unique names are compared by identity.
4415  Label done;
4416  __ Branch(&done, ne, left, Operand(right));
4417  // Make sure a0 is non-zero. At this point input operands are
4418  // guaranteed to be non-zero.
4419  ASSERT(right.is(a0));
4420  STATIC_ASSERT(EQUAL == 0);
4421  STATIC_ASSERT(kSmiTag == 0);
4422  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4423  __ bind(&done);
4424  __ Ret();
4425 
4426  __ bind(&miss);
4427  GenerateMiss(masm);
4428 }
4429 
4430 
4431 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4432  ASSERT(state_ == CompareIC::STRING);
4433  Label miss;
4434 
4435  bool equality = Token::IsEqualityOp(op_);
4436 
4437  // Registers containing left and right operands respectively.
4438  Register left = a1;
4439  Register right = a0;
4440  Register tmp1 = a2;
4441  Register tmp2 = a3;
4442  Register tmp3 = t0;
4443  Register tmp4 = t1;
4444  Register tmp5 = t2;
4445 
4446  // Check that both operands are heap objects.
4447  __ JumpIfEitherSmi(left, right, &miss);
4448 
4449  // Check that both operands are strings. This leaves the instance
4450  // types loaded in tmp1 and tmp2.
4451  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4452  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4453  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4454  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4456  __ Or(tmp3, tmp1, tmp2);
4457  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
4458  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
4459 
4460  // Fast check for identical strings.
4461  Label left_ne_right;
4462  STATIC_ASSERT(EQUAL == 0);
4463  STATIC_ASSERT(kSmiTag == 0);
4464  __ Branch(&left_ne_right, ne, left, Operand(right));
4465  __ Ret(USE_DELAY_SLOT);
4466  __ mov(v0, zero_reg); // In the delay slot.
4467  __ bind(&left_ne_right);
4468 
4469  // Handle not identical strings.
4470 
4471  // Check that both strings are internalized strings. If they are, we're done
4472  // because we already know they are not identical. We know they are both
4473  // strings.
4474  if (equality) {
4475  ASSERT(GetCondition() == eq);
4477  __ Or(tmp3, tmp1, Operand(tmp2));
4478  __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
4479  Label is_symbol;
4480  __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
4481  // Make sure a0 is non-zero. At this point input operands are
4482  // guaranteed to be non-zero.
4483  ASSERT(right.is(a0));
4484  __ Ret(USE_DELAY_SLOT);
4485  __ mov(v0, a0); // In the delay slot.
4486  __ bind(&is_symbol);
4487  }
4488 
4489  // Check that both strings are sequential ASCII.
4490  Label runtime;
4491  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4492  tmp1, tmp2, tmp3, tmp4, &runtime);
4493 
4494  // Compare flat ASCII strings. Returns when done.
4495  if (equality) {
4497  masm, left, right, tmp1, tmp2, tmp3);
4498  } else {
4500  masm, left, right, tmp1, tmp2, tmp3, tmp4);
4501  }
4502 
4503  // Handle more complex cases in runtime.
4504  __ bind(&runtime);
4505  __ Push(left, right);
4506  if (equality) {
4507  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4508  } else {
4509  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
4510  }
4511 
4512  __ bind(&miss);
4513  GenerateMiss(masm);
4514 }
4515 
4516 
4517 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4518  ASSERT(state_ == CompareIC::OBJECT);
4519  Label miss;
4520  __ And(a2, a1, Operand(a0));
4521  __ JumpIfSmi(a2, &miss);
4522 
4523  __ GetObjectType(a0, a2, a2);
4524  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4525  __ GetObjectType(a1, a2, a2);
4526  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
4527 
4528  ASSERT(GetCondition() == eq);
4529  __ Ret(USE_DELAY_SLOT);
4530  __ subu(v0, a0, a1);
4531 
4532  __ bind(&miss);
4533  GenerateMiss(masm);
4534 }
4535 
4536 
4537 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4538  Label miss;
4539  __ And(a2, a1, a0);
4540  __ JumpIfSmi(a2, &miss);
4543  __ Branch(&miss, ne, a2, Operand(known_map_));
4544  __ Branch(&miss, ne, a3, Operand(known_map_));
4545 
4546  __ Ret(USE_DELAY_SLOT);
4547  __ subu(v0, a0, a1);
4548 
4549  __ bind(&miss);
4550  GenerateMiss(masm);
4551 }
4552 
4553 
4554 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4555  {
4556  // Call the runtime system in a fresh internal frame.
4557  ExternalReference miss =
4558  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
4559  FrameScope scope(masm, StackFrame::INTERNAL);
4560  __ Push(a1, a0);
4561  __ Push(ra, a1, a0);
4562  __ li(t0, Operand(Smi::FromInt(op_)));
4563  __ addiu(sp, sp, -kPointerSize);
4564  __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
4565  __ sw(t0, MemOperand(sp)); // In the delay slot.
4566  // Compute the entry point of the rewritten stub.
4567  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
4568  // Restore registers.
4569  __ Pop(a1, a0, ra);
4570  }
4571  __ Jump(a2);
4572 }
4573 
4574 
4575 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4576  // Make place for arguments to fit C calling convention. Most of the callers
4577  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
4578  // so they handle stack restoring and we don't have to do that here.
4579  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
4580  // kCArgsSlotsSize stack space after the call.
4581  __ Subu(sp, sp, Operand(kCArgsSlotsSize));
4582  // Place the return address on the stack, making the call
4583  // GC safe. The RegExp backend also relies on this.
4584  __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
4585  __ Call(t9); // Call the C++ function.
4586  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
4587 
4588  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
4589  // In case of an error the return address may point to a memory area
4590  // filled with kZapValue by the GC.
4591  // Dereference the address and check for this.
4592  __ lw(t0, MemOperand(t9));
4593  __ Assert(ne, kReceivedInvalidReturnAddress, t0,
4594  Operand(reinterpret_cast<uint32_t>(kZapValue)));
4595  }
4596  __ Jump(t9);
4597 }
4598 
4599 
4600 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4601  Register target) {
4602  intptr_t loc =
4603  reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
4604  __ Move(t9, target);
4605  __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
4606  __ Call(ra);
4607 }
4608 
4609 
4610 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4611  Label* miss,
4612  Label* done,
4613  Register receiver,
4614  Register properties,
4615  Handle<Name> name,
4616  Register scratch0) {
4617  ASSERT(name->IsUniqueName());
4618  // If names of slots in range from 1 to kProbes - 1 for the hash value are
4619  // not equal to the name and kProbes-th slot is not used (its name is the
4620  // undefined value), it guarantees the hash table doesn't contain the
4621  // property. It's true even if some slots represent deleted properties
4622  // (their names are the hole value).
4623  for (int i = 0; i < kInlinedProbes; i++) {
4624  // scratch0 points to properties hash.
4625  // Compute the masked index: (hash + i + i * i) & mask.
4626  Register index = scratch0;
4627  // Capacity is smi 2^n.
4628  __ lw(index, FieldMemOperand(properties, kCapacityOffset));
4629  __ Subu(index, index, Operand(1));
4630  __ And(index, index, Operand(
4631  Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4632 
4633  // Scale the index by multiplying by the entry size.
4635  __ sll(at, index, 1);
4636  __ Addu(index, index, at);
4637 
4638  Register entity_name = scratch0;
4639  // Having undefined at this place means the name is not contained.
4640  ASSERT_EQ(kSmiTagSize, 1);
4641  Register tmp = properties;
4642  __ sll(scratch0, index, 1);
4643  __ Addu(tmp, properties, scratch0);
4644  __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4645 
4646  ASSERT(!tmp.is(entity_name));
4647  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4648  __ Branch(done, eq, entity_name, Operand(tmp));
4649 
4650  // Load the hole ready for use below:
4651  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4652 
4653  // Stop if found the property.
4654  __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
4655 
4656  Label good;
4657  __ Branch(&good, eq, entity_name, Operand(tmp));
4658 
4659  // Check if the entry name is not a unique name.
4660  __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4661  __ lbu(entity_name,
4663  __ JumpIfNotUniqueName(entity_name, miss);
4664  __ bind(&good);
4665 
4666  // Restore the properties.
4667  __ lw(properties,
4669  }
4670 
4671  const int spill_mask =
4672  (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
4673  a2.bit() | a1.bit() | a0.bit() | v0.bit());
4674 
4675  __ MultiPush(spill_mask);
4676  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4677  __ li(a1, Operand(Handle<Name>(name)));
4679  __ CallStub(&stub);
4680  __ mov(at, v0);
4681  __ MultiPop(spill_mask);
4682 
4683  __ Branch(done, eq, at, Operand(zero_reg));
4684  __ Branch(miss, ne, at, Operand(zero_reg));
4685 }
4686 
4687 
4688 // Probe the name dictionary in the |elements| register. Jump to the
4689 // |done| label if a property with the given name is found. Jump to
4690 // the |miss| label otherwise.
4691 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4692 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4693  Label* miss,
4694  Label* done,
4695  Register elements,
4696  Register name,
4697  Register scratch1,
4698  Register scratch2) {
4699  ASSERT(!elements.is(scratch1));
4700  ASSERT(!elements.is(scratch2));
4701  ASSERT(!name.is(scratch1));
4702  ASSERT(!name.is(scratch2));
4703 
4704  __ AssertName(name);
4705 
4706  // Compute the capacity mask.
4707  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
4708  __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
4709  __ Subu(scratch1, scratch1, Operand(1));
4710 
4711  // Generate an unrolled loop that performs a few probes before
4712  // giving up. Measurements done on Gmail indicate that 2 probes
4713  // cover ~93% of loads from dictionaries.
4714  for (int i = 0; i < kInlinedProbes; i++) {
4715  // Compute the masked index: (hash + i + i * i) & mask.
4716  __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4717  if (i > 0) {
4718  // Add the probe offset (i + i * i) left shifted to avoid right shifting
4719  // the hash in a separate instruction. The value hash + i + i * i is right
4720  // shifted in the following and instruction.
4721  ASSERT(NameDictionary::GetProbeOffset(i) <
4722  1 << (32 - Name::kHashFieldOffset));
4723  __ Addu(scratch2, scratch2, Operand(
4724  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4725  }
4726  __ srl(scratch2, scratch2, Name::kHashShift);
4727  __ And(scratch2, scratch1, scratch2);
4728 
4729  // Scale the index by multiplying by the element size.
4731  // scratch2 = scratch2 * 3.
4732 
4733  __ sll(at, scratch2, 1);
4734  __ Addu(scratch2, scratch2, at);
4735 
4736  // Check if the key is identical to the name.
4737  __ sll(at, scratch2, 2);
4738  __ Addu(scratch2, elements, at);
4739  __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
4740  __ Branch(done, eq, name, Operand(at));
4741  }
4742 
4743  const int spill_mask =
4744  (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
4745  a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
4746  ~(scratch1.bit() | scratch2.bit());
4747 
4748  __ MultiPush(spill_mask);
4749  if (name.is(a0)) {
4750  ASSERT(!elements.is(a1));
4751  __ Move(a1, name);
4752  __ Move(a0, elements);
4753  } else {
4754  __ Move(a0, elements);
4755  __ Move(a1, name);
4756  }
4758  __ CallStub(&stub);
4759  __ mov(scratch2, a2);
4760  __ mov(at, v0);
4761  __ MultiPop(spill_mask);
4762 
4763  __ Branch(done, ne, at, Operand(zero_reg));
4764  __ Branch(miss, eq, at, Operand(zero_reg));
4765 }
4766 
4767 
4768 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4769  // This stub overrides SometimesSetsUpAFrame() to return false. That means
4770  // we cannot call anything that could cause a GC from this stub.
4771  // Registers:
4772  // result: NameDictionary to probe
4773  // a1: key
4774  // dictionary: NameDictionary to probe.
4775  // index: will hold an index of entry if lookup is successful.
4776  // might alias with result_.
4777  // Returns:
4778  // result_ is zero if lookup failed, non zero otherwise.
4779 
4780  Register result = v0;
4781  Register dictionary = a0;
4782  Register key = a1;
4783  Register index = a2;
4784  Register mask = a3;
4785  Register hash = t0;
4786  Register undefined = t1;
4787  Register entry_key = t2;
4788 
4789  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4790 
4791  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
4792  __ sra(mask, mask, kSmiTagSize);
4793  __ Subu(mask, mask, Operand(1));
4794 
4795  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4796 
4797  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4798 
4799  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4800  // Compute the masked index: (hash + i + i * i) & mask.
4801  // Capacity is smi 2^n.
4802  if (i > 0) {
4803  // Add the probe offset (i + i * i) left shifted to avoid right shifting
4804  // the hash in a separate instruction. The value hash + i + i * i is right
4805  // shifted in the following and instruction.
4806  ASSERT(NameDictionary::GetProbeOffset(i) <
4807  1 << (32 - Name::kHashFieldOffset));
4808  __ Addu(index, hash, Operand(
4809  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4810  } else {
4811  __ mov(index, hash);
4812  }
4813  __ srl(index, index, Name::kHashShift);
4814  __ And(index, mask, index);
4815 
4816  // Scale the index by multiplying by the entry size.
4818  // index *= 3.
4819  __ mov(at, index);
4820  __ sll(index, index, 1);
4821  __ Addu(index, index, at);
4822 
4823 
4824  ASSERT_EQ(kSmiTagSize, 1);
4825  __ sll(index, index, 2);
4826  __ Addu(index, index, dictionary);
4827  __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
4828 
4829  // Having undefined at this place means the name is not contained.
4830  __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
4831 
4832  // Stop if found the property.
4833  __ Branch(&in_dictionary, eq, entry_key, Operand(key));
4834 
4835  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4836  // Check if the entry name is not a unique name.
4837  __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4838  __ lbu(entry_key,
4840  __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4841  }
4842  }
4843 
4844  __ bind(&maybe_in_dictionary);
4845  // If we are doing negative lookup then probing failure should be
4846  // treated as a lookup success. For positive lookup probing failure
4847  // should be treated as lookup failure.
4848  if (mode_ == POSITIVE_LOOKUP) {
4849  __ Ret(USE_DELAY_SLOT);
4850  __ mov(result, zero_reg);
4851  }
4852 
4853  __ bind(&in_dictionary);
4854  __ Ret(USE_DELAY_SLOT);
4855  __ li(result, 1);
4856 
4857  __ bind(&not_in_dictionary);
4858  __ Ret(USE_DELAY_SLOT);
4859  __ mov(result, zero_reg);
4860 }
4861 
4862 
4864  Isolate* isolate) {
4866  stub1.GetCode(isolate);
4867  // Hydrogen code stubs need stub2 at snapshot time.
4869  stub2.GetCode(isolate);
4870 }
4871 
4872 
4873 bool CodeStub::CanUseFPRegisters() {
4874  return true; // FPU is a base requirement for V8.
4875 }
4876 
4877 
4878 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4879 // the value has just been written into the object, now this stub makes sure
4880 // we keep the GC informed. The word in the object where the value has been
4881 // written is in the address register.
4882 void RecordWriteStub::Generate(MacroAssembler* masm) {
4883  Label skip_to_incremental_noncompacting;
4884  Label skip_to_incremental_compacting;
4885 
4886  // The first two branch+nop instructions are generated with labels so as to
4887  // get the offset fixed up correctly by the bind(Label*) call. We patch it
4888  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
4889  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
4890  // incremental heap marking.
4891  // See RecordWriteStub::Patch for details.
4892  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
4893  __ nop();
4894  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
4895  __ nop();
4896 
4897  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4898  __ RememberedSetHelper(object_,
4899  address_,
4900  value_,
4901  save_fp_regs_mode_,
4903  }
4904  __ Ret();
4905 
4906  __ bind(&skip_to_incremental_noncompacting);
4907  GenerateIncremental(masm, INCREMENTAL);
4908 
4909  __ bind(&skip_to_incremental_compacting);
4910  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4911 
4912  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4913  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4914 
4915  PatchBranchIntoNop(masm, 0);
4917 }
4918 
4919 
4920 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4921  regs_.Save(masm);
4922 
4923  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4924  Label dont_need_remembered_set;
4925 
4926  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
4927  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4928  regs_.scratch0(),
4929  &dont_need_remembered_set);
4930 
4931  __ CheckPageFlag(regs_.object(),
4932  regs_.scratch0(),
4934  ne,
4935  &dont_need_remembered_set);
4936 
4937  // First notify the incremental marker if necessary, then update the
4938  // remembered set.
4939  CheckNeedsToInformIncrementalMarker(
4940  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4941  InformIncrementalMarker(masm);
4942  regs_.Restore(masm);
4943  __ RememberedSetHelper(object_,
4944  address_,
4945  value_,
4946  save_fp_regs_mode_,
4948 
4949  __ bind(&dont_need_remembered_set);
4950  }
4951 
4952  CheckNeedsToInformIncrementalMarker(
4953  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4954  InformIncrementalMarker(masm);
4955  regs_.Restore(masm);
4956  __ Ret();
4957 }
4958 
4959 
4960 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4961  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4962  int argument_count = 3;
4963  __ PrepareCallCFunction(argument_count, regs_.scratch0());
4964  Register address =
4965  a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4966  ASSERT(!address.is(regs_.object()));
4967  ASSERT(!address.is(a0));
4968  __ Move(address, regs_.address());
4969  __ Move(a0, regs_.object());
4970  __ Move(a1, address);
4971  __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
4972 
4973  AllowExternalCallThatCantCauseGC scope(masm);
4974  __ CallCFunction(
4975  ExternalReference::incremental_marking_record_write_function(
4976  masm->isolate()),
4977  argument_count);
4978  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4979 }
4980 
4981 
4982 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4983  MacroAssembler* masm,
4984  OnNoNeedToInformIncrementalMarker on_no_need,
4985  Mode mode) {
4986  Label on_black;
4987  Label need_incremental;
4988  Label need_incremental_pop_scratch;
4989 
4990  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4991  __ lw(regs_.scratch1(),
4992  MemOperand(regs_.scratch0(),
4994  __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
4995  __ sw(regs_.scratch1(),
4996  MemOperand(regs_.scratch0(),
4998  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
4999 
5000  // Let's look at the color of the object: If it is not black we don't have
5001  // to inform the incremental marker.
5002  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
5003 
5004  regs_.Restore(masm);
5005  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5006  __ RememberedSetHelper(object_,
5007  address_,
5008  value_,
5009  save_fp_regs_mode_,
5011  } else {
5012  __ Ret();
5013  }
5014 
5015  __ bind(&on_black);
5016 
5017  // Get the value from the slot.
5018  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5019 
5020  if (mode == INCREMENTAL_COMPACTION) {
5021  Label ensure_not_white;
5022 
5023  __ CheckPageFlag(regs_.scratch0(), // Contains value.
5024  regs_.scratch1(), // Scratch.
5026  eq,
5027  &ensure_not_white);
5028 
5029  __ CheckPageFlag(regs_.object(),
5030  regs_.scratch1(), // Scratch.
5032  eq,
5033  &need_incremental);
5034 
5035  __ bind(&ensure_not_white);
5036  }
5037 
5038  // We need extra registers for this, so we push the object and the address
5039  // register temporarily.
5040  __ Push(regs_.object(), regs_.address());
5041  __ EnsureNotWhite(regs_.scratch0(), // The value.
5042  regs_.scratch1(), // Scratch.
5043  regs_.object(), // Scratch.
5044  regs_.address(), // Scratch.
5045  &need_incremental_pop_scratch);
5046  __ Pop(regs_.object(), regs_.address());
5047 
5048  regs_.Restore(masm);
5049  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5050  __ RememberedSetHelper(object_,
5051  address_,
5052  value_,
5053  save_fp_regs_mode_,
5055  } else {
5056  __ Ret();
5057  }
5058 
5059  __ bind(&need_incremental_pop_scratch);
5060  __ Pop(regs_.object(), regs_.address());
5061 
5062  __ bind(&need_incremental);
5063 
5064  // Fall through when we need to inform the incremental marker.
5065 }
5066 
5067 
5068 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
5069  // ----------- S t a t e -------------
5070  // -- a0 : element value to store
5071  // -- a3 : element index as smi
5072  // -- sp[0] : array literal index in function as smi
5073  // -- sp[4] : array literal
5074  // clobbers a1, a2, t0
5075  // -----------------------------------
5076 
5077  Label element_done;
5078  Label double_elements;
5079  Label smi_element;
5080  Label slow_elements;
5081  Label fast_elements;
5082 
5083  // Get array literal index, array literal and its map.
5084  __ lw(t0, MemOperand(sp, 0 * kPointerSize));
5085  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
5087 
5088  __ CheckFastElements(a2, t1, &double_elements);
5089  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
5090  __ JumpIfSmi(a0, &smi_element);
5091  __ CheckFastSmiElements(a2, t1, &fast_elements);
5092 
5093  // Store into the array literal requires a elements transition. Call into
5094  // the runtime.
5095  __ bind(&slow_elements);
5096  // call.
5097  __ Push(a1, a3, a0);
5100  __ Push(t1, t0);
5101  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
5102 
5103  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
5104  __ bind(&fast_elements);
5106  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5107  __ Addu(t2, t1, t2);
5108  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5109  __ sw(a0, MemOperand(t2, 0));
5110  // Update the write barrier for the array store.
5111  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
5113  __ Ret(USE_DELAY_SLOT);
5114  __ mov(v0, a0);
5115 
5116  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
5117  // and value is Smi.
5118  __ bind(&smi_element);
5120  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5121  __ Addu(t2, t1, t2);
5123  __ Ret(USE_DELAY_SLOT);
5124  __ mov(v0, a0);
5125 
5126  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
5127  __ bind(&double_elements);
5129  __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
5130  __ Ret(USE_DELAY_SLOT);
5131  __ mov(v0, a0);
5132 }
5133 
5134 
5135 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
5136  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5137  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5138  int parameter_count_offset =
5140  __ lw(a1, MemOperand(fp, parameter_count_offset));
5141  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
5142  __ Addu(a1, a1, Operand(1));
5143  }
5144  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5145  __ sll(a1, a1, kPointerSizeLog2);
5146  __ Ret(USE_DELAY_SLOT);
5147  __ Addu(sp, sp, a1);
5148 }
5149 
5150 
5151 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
5152  if (masm->isolate()->function_entry_hook() != NULL) {
5153  ProfileEntryHookStub stub;
5154  __ push(ra);
5155  __ CallStub(&stub);
5156  __ pop(ra);
5157  }
5158 }
5159 
5160 
5161 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
5162  // The entry hook is a "push ra" instruction, followed by a call.
5163  // Note: on MIPS "push" is 2 instruction
5164  const int32_t kReturnAddressDistanceFromFunctionStart =
5166 
5167  // This should contain all kJSCallerSaved registers.
5168  const RegList kSavedRegs =
5169  kJSCallerSaved | // Caller saved registers.
5170  s5.bit(); // Saved stack pointer.
5171 
5172  // We also save ra, so the count here is one higher than the mask indicates.
5173  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
5174 
5175  // Save all caller-save registers as this may be called from anywhere.
5176  __ MultiPush(kSavedRegs | ra.bit());
5177 
5178  // Compute the function's address for the first argument.
5179  __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
5180 
5181  // The caller's return address is above the saved temporaries.
5182  // Grab that for the second argument to the hook.
5183  __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
5184 
5185  // Align the stack if necessary.
5186  int frame_alignment = masm->ActivationFrameAlignment();
5187  if (frame_alignment > kPointerSize) {
5188  __ mov(s5, sp);
5189  ASSERT(IsPowerOf2(frame_alignment));
5190  __ And(sp, sp, Operand(-frame_alignment));
5191  }
5192  __ Subu(sp, sp, kCArgsSlotsSize);
5193 #if defined(V8_HOST_ARCH_MIPS)
5194  int32_t entry_hook =
5195  reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5196  __ li(t9, Operand(entry_hook));
5197 #else
5198  // Under the simulator we need to indirect the entry hook through a
5199  // trampoline function at a known address.
5200  // It additionally takes an isolate as a third parameter.
5201  __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5202 
5203  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5204  __ li(t9, Operand(ExternalReference(&dispatcher,
5205  ExternalReference::BUILTIN_CALL,
5206  masm->isolate())));
5207 #endif
5208  // Call C function through t9 to conform ABI for PIC.
5209  __ Call(t9);
5210 
5211  // Restore the stack pointer if needed.
5212  if (frame_alignment > kPointerSize) {
5213  __ mov(sp, s5);
5214  } else {
5215  __ Addu(sp, sp, kCArgsSlotsSize);
5216  }
5217 
5218  // Also pop ra to get Ret(0).
5219  __ MultiPop(kSavedRegs | ra.bit());
5220  __ Ret();
5221 }
5222 
5223 
5224 template<class T>
5225 static void CreateArrayDispatch(MacroAssembler* masm,
5227  if (mode == DISABLE_ALLOCATION_SITES) {
5228  T stub(GetInitialFastElementsKind(), mode);
5229  __ TailCallStub(&stub);
5230  } else if (mode == DONT_OVERRIDE) {
5231  int last_index = GetSequenceIndexFromFastElementsKind(
5233  for (int i = 0; i <= last_index; ++i) {
5235  T stub(kind);
5236  __ TailCallStub(&stub, eq, a3, Operand(kind));
5237  }
5238 
5239  // If we reached this point there is a problem.
5240  __ Abort(kUnexpectedElementsKindInArrayConstructor);
5241  } else {
5242  UNREACHABLE();
5243  }
5244 }
5245 
5246 
5247 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5249  // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5250  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5251  // a0 - number of arguments
5252  // a1 - constructor?
5253  // sp[0] - last argument
5254  Label normal_sequence;
5255  if (mode == DONT_OVERRIDE) {
5256  ASSERT(FAST_SMI_ELEMENTS == 0);
5258  ASSERT(FAST_ELEMENTS == 2);
5262 
5263  // is the low bit set? If so, we are holey and that is good.
5264  __ And(at, a3, Operand(1));
5265  __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
5266  }
5267 
5268  // look at the first argument
5269  __ lw(t1, MemOperand(sp, 0));
5270  __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
5271 
5272  if (mode == DISABLE_ALLOCATION_SITES) {
5274  ElementsKind holey_initial = GetHoleyElementsKind(initial);
5275 
5276  ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5278  __ TailCallStub(&stub_holey);
5279 
5280  __ bind(&normal_sequence);
5281  ArraySingleArgumentConstructorStub stub(initial,
5283  __ TailCallStub(&stub);
5284  } else if (mode == DONT_OVERRIDE) {
5285  // We are going to create a holey array, but our kind is non-holey.
5286  // Fix kind and retry (only if we have an allocation site in the slot).
5287  __ Addu(a3, a3, Operand(1));
5288 
5289  if (FLAG_debug_code) {
5290  __ lw(t1, FieldMemOperand(a2, 0));
5291  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5292  __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
5293  }
5294 
5295  // Save the resulting elements kind in type info. We can't just store a3
5296  // in the AllocationSite::transition_info field because elements kind is
5297  // restricted to a portion of the field...upper bits need to be left alone.
5300  __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5302 
5303 
5304  __ bind(&normal_sequence);
5305  int last_index = GetSequenceIndexFromFastElementsKind(
5307  for (int i = 0; i <= last_index; ++i) {
5309  ArraySingleArgumentConstructorStub stub(kind);
5310  __ TailCallStub(&stub, eq, a3, Operand(kind));
5311  }
5312 
5313  // If we reached this point there is a problem.
5314  __ Abort(kUnexpectedElementsKindInArrayConstructor);
5315  } else {
5316  UNREACHABLE();
5317  }
5318 }
5319 
5320 
5321 template<class T>
5322 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5323  int to_index = GetSequenceIndexFromFastElementsKind(
5325  for (int i = 0; i <= to_index; ++i) {
5327  T stub(kind);
5328  stub.GetCode(isolate);
5330  T stub1(kind, DISABLE_ALLOCATION_SITES);
5331  stub1.GetCode(isolate);
5332  }
5333  }
5334 }
5335 
5336 
5338  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5339  isolate);
5340  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5341  isolate);
5342  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5343  isolate);
5344 }
5345 
5346 
5348  Isolate* isolate) {
5350  for (int i = 0; i < 2; i++) {
5351  // For internal arrays we only need a few things.
5352  InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5353  stubh1.GetCode(isolate);
5354  InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5355  stubh2.GetCode(isolate);
5356  InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5357  stubh3.GetCode(isolate);
5358  }
5359 }
5360 
5361 
5362 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5363  MacroAssembler* masm,
5365  if (argument_count_ == ANY) {
5366  Label not_zero_case, not_one_case;
5367  __ And(at, a0, a0);
5368  __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
5369  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5370 
5371  __ bind(&not_zero_case);
5372  __ Branch(&not_one_case, gt, a0, Operand(1));
5373  CreateArrayDispatchOneArgument(masm, mode);
5374 
5375  __ bind(&not_one_case);
5376  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5377  } else if (argument_count_ == NONE) {
5378  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5379  } else if (argument_count_ == ONE) {
5380  CreateArrayDispatchOneArgument(masm, mode);
5381  } else if (argument_count_ == MORE_THAN_ONE) {
5382  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5383  } else {
5384  UNREACHABLE();
5385  }
5386 }
5387 
5388 
5389 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5390  // ----------- S t a t e -------------
5391  // -- a0 : argc (only if argument_count_ == ANY)
5392  // -- a1 : constructor
5393  // -- a2 : AllocationSite or undefined
5394  // -- sp[0] : return address
5395  // -- sp[4] : last argument
5396  // -----------------------------------
5397 
5398  if (FLAG_debug_code) {
5399  // The array construct code is only set for the global and natives
5400  // builtin Array functions which always have maps.
5401 
5402  // Initial map for the builtin Array function should be a map.
5404  // Will both indicate a NULL and a Smi.
5405  __ SmiTst(t0, at);
5406  __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5407  at, Operand(zero_reg));
5408  __ GetObjectType(t0, t0, t1);
5409  __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5410  t1, Operand(MAP_TYPE));
5411 
5412  // We should either have undefined in a2 or a valid AllocationSite
5413  __ AssertUndefinedOrAllocationSite(a2, t0);
5414  }
5415 
5416  Label no_info;
5417  // Get the elements kind and case on that.
5418  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5419  __ Branch(&no_info, eq, a2, Operand(at));
5420 
5422  __ SmiUntag(a3);
5424  __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
5425  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5426 
5427  __ bind(&no_info);
5428  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5429 }
5430 
5431 
5432 void InternalArrayConstructorStub::GenerateCase(
5433  MacroAssembler* masm, ElementsKind kind) {
5434 
5435  InternalArrayNoArgumentConstructorStub stub0(kind);
5436  __ TailCallStub(&stub0, lo, a0, Operand(1));
5437 
5438  InternalArrayNArgumentsConstructorStub stubN(kind);
5439  __ TailCallStub(&stubN, hi, a0, Operand(1));
5440 
5441  if (IsFastPackedElementsKind(kind)) {
5442  // We might need to create a holey array
5443  // look at the first argument.
5444  __ lw(at, MemOperand(sp, 0));
5445 
5446  InternalArraySingleArgumentConstructorStub
5447  stub1_holey(GetHoleyElementsKind(kind));
5448  __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
5449  }
5450 
5451  InternalArraySingleArgumentConstructorStub stub1(kind);
5452  __ TailCallStub(&stub1);
5453 }
5454 
5455 
5456 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5457  // ----------- S t a t e -------------
5458  // -- a0 : argc
5459  // -- a1 : constructor
5460  // -- sp[0] : return address
5461  // -- sp[4] : last argument
5462  // -----------------------------------
5463 
5464  if (FLAG_debug_code) {
5465  // The array construct code is only set for the global and natives
5466  // builtin Array functions which always have maps.
5467 
5468  // Initial map for the builtin Array function should be a map.
5470  // Will both indicate a NULL and a Smi.
5471  __ SmiTst(a3, at);
5472  __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
5473  at, Operand(zero_reg));
5474  __ GetObjectType(a3, a3, t0);
5475  __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
5476  t0, Operand(MAP_TYPE));
5477  }
5478 
5479  // Figure out the right elements kind.
5481 
5482  // Load the map's "bit field 2" into a3. We only need the first byte,
5483  // but the following bit field extraction takes care of that anyway.
5485  // Retrieve elements_kind from bit field 2.
5487 
5488  if (FLAG_debug_code) {
5489  Label done;
5490  __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
5491  __ Assert(
5492  eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
5493  a3, Operand(FAST_HOLEY_ELEMENTS));
5494  __ bind(&done);
5495  }
5496 
5497  Label fast_elements_case;
5498  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
5499  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5500 
5501  __ bind(&fast_elements_case);
5502  GenerateCase(masm, FAST_ELEMENTS);
5503 }
5504 
5505 
5506 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5507  // ----------- S t a t e -------------
5508  // -- a0 : callee
5509  // -- t0 : call_data
5510  // -- a2 : holder
5511  // -- a1 : api_function_address
5512  // -- cp : context
5513  // --
5514  // -- sp[0] : last argument
5515  // -- ...
5516  // -- sp[(argc - 1)* 4] : first argument
5517  // -- sp[argc * 4] : receiver
5518  // -----------------------------------
5519 
5520  Register callee = a0;
5521  Register call_data = t0;
5522  Register holder = a2;
5523  Register api_function_address = a1;
5524  Register context = cp;
5525 
5526  int argc = ArgumentBits::decode(bit_field_);
5527  bool is_store = IsStoreBits::decode(bit_field_);
5528  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5529 
5530  typedef FunctionCallbackArguments FCA;
5531 
5532  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5533  STATIC_ASSERT(FCA::kCalleeIndex == 5);
5534  STATIC_ASSERT(FCA::kDataIndex == 4);
5535  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5536  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5537  STATIC_ASSERT(FCA::kIsolateIndex == 1);
5538  STATIC_ASSERT(FCA::kHolderIndex == 0);
5539  STATIC_ASSERT(FCA::kArgsLength == 7);
5540 
5541  Isolate* isolate = masm->isolate();
5542 
5543  // Save context, callee and call data.
5544  __ Push(context, callee, call_data);
5545  // Load context from callee.
5546  __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5547 
5548  Register scratch = call_data;
5549  if (!call_data_undefined) {
5550  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5551  }
5552  // Push return value and default return value.
5553  __ Push(scratch, scratch);
5554  __ li(scratch,
5555  Operand(ExternalReference::isolate_address(isolate)));
5556  // Push isolate and holder.
5557  __ Push(scratch, holder);
5558 
5559  // Prepare arguments.
5560  __ mov(scratch, sp);
5561 
5562  // Allocate the v8::Arguments structure in the arguments' space since
5563  // it's not controlled by GC.
5564  const int kApiStackSpace = 4;
5565 
5566  FrameScope frame_scope(masm, StackFrame::MANUAL);
5567  __ EnterExitFrame(false, kApiStackSpace);
5568 
5569  ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
5570  // a0 = FunctionCallbackInfo&
5571  // Arguments is after the return address.
5572  __ Addu(a0, sp, Operand(1 * kPointerSize));
5573  // FunctionCallbackInfo::implicit_args_
5574  __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
5575  // FunctionCallbackInfo::values_
5576  __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5577  __ sw(at, MemOperand(a0, 1 * kPointerSize));
5578  // FunctionCallbackInfo::length_ = argc
5579  __ li(at, Operand(argc));
5580  __ sw(at, MemOperand(a0, 2 * kPointerSize));
5581  // FunctionCallbackInfo::is_construct_call = 0
5582  __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
5583 
5584  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5585  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5586  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
5587  ApiFunction thunk_fun(thunk_address);
5588  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5589  masm->isolate());
5590 
5591  AllowExternalCallThatCantCauseGC scope(masm);
5592  MemOperand context_restore_operand(
5593  fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5594  // Stores return the first js argument.
5595  int return_value_offset = 0;
5596  if (is_store) {
5597  return_value_offset = 2 + FCA::kArgsLength;
5598  } else {
5599  return_value_offset = 2 + FCA::kReturnValueOffset;
5600  }
5601  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5602 
5603  __ CallApiFunctionAndReturn(api_function_address,
5604  thunk_ref,
5605  kStackUnwindSpace,
5606  return_value_operand,
5607  &context_restore_operand);
5608 }
5609 
5610 
5611 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5612  // ----------- S t a t e -------------
5613  // -- sp[0] : name
5614  // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5615  // -- ...
5616  // -- a2 : api_function_address
5617  // -----------------------------------
5618 
5619  Register api_function_address = a2;
5620 
5621  __ mov(a0, sp); // a0 = Handle<Name>
5622  __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
5623 
5624  const int kApiStackSpace = 1;
5625  FrameScope frame_scope(masm, StackFrame::MANUAL);
5626  __ EnterExitFrame(false, kApiStackSpace);
5627 
5628  // Create PropertyAccessorInfo instance on the stack above the exit frame with
5629  // a1 (internal::Object** args_) as the data.
5630  __ sw(a1, MemOperand(sp, 1 * kPointerSize));
5631  __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
5632 
5633  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5634 
5636  ExternalReference::Type thunk_type =
5637  ExternalReference::PROFILING_GETTER_CALL;
5638  ApiFunction thunk_fun(thunk_address);
5639  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5640  masm->isolate());
5641  __ CallApiFunctionAndReturn(api_function_address,
5642  thunk_ref,
5643  kStackUnwindSpace,
5644  MemOperand(fp, 6 * kPointerSize),
5645  NULL);
5646 }
5647 
5648 
5649 #undef __
5650 
5651 } } // namespace v8::internal
5652 
5653 #endif // V8_TARGET_ARCH_MIPS
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
const FPURegister f4
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateFast(MacroAssembler *masm)
const SwVfpRegister s2
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
const int kCArgsSlotsSize
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kEvacuationCandidateMask
Definition: spaces.h:430
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
bool IsHoleyElementsKind(ElementsKind kind)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
#define COMPARE(asm_, compare_string)
static const int kValueOffset
Definition: objects.h:9547
static int SlotOffset(int index)
Definition: contexts.h:498
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:217
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kCallerStackParameterCountFrameOffset
Definition: frames.h:776
void Generate(MacroAssembler *masm)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const int kFailureTypeTagSize
Definition: objects.h:1712
const Register cp
static const uint32_t kExponentMask
Definition: objects.h:1981
static Failure * InternalError()
Definition: objects-inl.h:1239
const FPURegister f0
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
void Generate(MacroAssembler *masm)
static const int kElementsKindBitCount
Definition: objects.h:6483
static const int kDataOffset
Definition: objects.h:7921
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:997
const int kNumCalleeSavedFPU
Definition: frames-mips.h:87
static Representation Integer32()
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:541
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
TypeImpl< ZoneTypeConfig > Type
static const int kExponentBias
Definition: objects.h:1985
int int32_t
Definition: unicode.cc:47
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor) V8_OVERRIDE
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
uint32_t RegList
Definition: frames.h:41
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Failure * Exception()
Definition: objects-inl.h:1244
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
AllocationSiteOverrideMode
Definition: code-stubs.h:759
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
const RegList kJSCallerSaved
Definition: frames-arm.h:47
static const int kContextOffset
Definition: frames.h:185
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static void GenerateAheadOfTime(Isolate *isolate)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:6680
const int kPointerSizeLog2
Definition: globals.h:281
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const uint32_t kFCSRUnderflowFlagMask
static const int kCallerFPOffset
Definition: frames.h:188
const bool FLAG_enable_slow_asserts
Definition: checks.h:307
RestoreRegistersStateStub(SaveFPRegsMode with_fp)
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:7523
const uint32_t kShortExternalStringMask
Definition: objects.h:643
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind)
static void GenerateAheadOfTime(Isolate *isolate)
static const int kLastSubjectOffset
Definition: jsregexp.h:190
ProfileEntryHookStub()
Definition: code-stubs.h:2504
const int kIntSize
Definition: globals.h:263
static const int kZeroHash
Definition: objects.h:8520
#define V8_INFINITY
Definition: globals.h:44
const RegList kCalleeSavedFPU
Definition: frames-mips.h:79
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void GenerateCall(MacroAssembler *masm, Register target)
static const int kLastCaptureCountOffset
Definition: jsregexp.h:188
const RegList kCallerSavedFPU
Definition: frames-mips.h:89
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, int flags)
static const int kFirstOffset
Definition: objects.h:9165
static const int kMinLength
Definition: objects.h:9170
const uint32_t kNotStringTag
Definition: objects.h:599
const Register sp
static const int kParentOffset
Definition: objects.h:9209
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1990
static const int kLiteralsOffset
Definition: objects.h:7524
#define UNREACHABLE()
Definition: checks.h:52
const int kFastElementsKindPackedToHoley
Definition: elements-kind.h:94
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
static const int kExponentShift
Definition: objects.h:1986
const intptr_t kFailureTagMask
Definition: v8globals.h:64
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const int kFailureTagSize
Definition: v8globals.h:63
bool IsFastPackedElementsKind(ElementsKind kind)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
#define kLithiumScratchDouble
const int kDoubleSize
Definition: globals.h:266
const uint32_t kFCSROverflowFlagMask
static const int kIrregexpCaptureCountOffset
Definition: objects.h:7967
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:577
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number)
const int kPointerSize
Definition: globals.h:268
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kTransitionInfoOffset
Definition: objects.h:8411
StoreRegistersStateStub(SaveFPRegsMode with_fp)
static void MaybeCallEntryHook(MacroAssembler *masm)
static void GenerateAheadOfTime(Isolate *isolate)
const Address kZapValue
Definition: v8globals.h:82
const int kHeapObjectTag
Definition: v8.h:5473
void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const RegList kCalleeSaved
Definition: frames-arm.h:63
#define __
static const int kCallerSPOffset
Definition: frames.h:190
const int kNumJSCallerSaved
Definition: frames-arm.h:53
static const int kPropertiesOffset
Definition: objects.h:2755
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
virtual void Generate(MacroAssembler *masm)
bool IsFastSmiElementsKind(ElementsKind kind)
static const int kMinLength
Definition: objects.h:9214
const SwVfpRegister s0
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
void Generate(MacroAssembler *masm)
static void GenerateStubsAheadOfTime(Isolate *isolate)
static const int kElementsOffset
Definition: objects.h:2756
bool IsPowerOf2(T x)
Definition: utils.h:51
const FPURegister f2
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kStringTag
Definition: objects.h:598
void Generate(MacroAssembler *masm)
static bool IsEqualityOp(Value op)
Definition: token.h:228
static Representation External()
static const int kCallTargetAddressOffset
static const int kOffsetOffset
Definition: objects.h:9210
friend class BlockTrampolinePoolScope
const SwVfpRegister s5
const uint32_t kInternalizedTag
Definition: objects.h:605
static const int kLengthOffset
Definition: objects.h:10076
#define T(name, string, precedence)
Definition: token.cc:48
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
const SwVfpRegister s1
static const int kLastMatchOverhead
Definition: jsregexp.h:185
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kHeaderSize
Definition: objects.h:3016
const intptr_t kPointerAlignmentMask
Definition: v8globals.h:49
void Generate(MacroAssembler *masm)
const uint32_t kFCSRInvalidOpFlagMask
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
static const int kMantissaBitsInTopWord
Definition: objects.h:1989
static const int kMaxShortLength
Definition: objects.h:9247
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:433
const uint32_t kIsNotStringMask
Definition: objects.h:597
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int kNumCalleeSaved
Definition: frames-arm.h:83
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
void Generate(MacroAssembler *masm)
#define kDoubleRegZero
static const int kSecondOffset
Definition: objects.h:9166
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:6675
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor) V8_OVERRIDE
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static const int kCallerFPOffset
Definition: frames-arm.h:105
static const int kArgumentsLengthIndex
Definition: heap.h:1104
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
Definition: v8globals.h:44
const FPUControlRegister FCSR
ElementsKind GetInitialFastElementsKind()
static const int kFirstCaptureOffset
Definition: jsregexp.h:194
static const uint32_t kSignMask
Definition: objects.h:1980
static const int kLastInputOffset
Definition: jsregexp.h:192
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiShiftSize
Definition: v8.h:5539
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
void InvokeAccessorGetterCallback(v8::Local< v8::String > property, const v8::PropertyCallbackInfo< v8::Value > &info, v8::AccessorGetterCallback getter)
Definition: api.cc:7628
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:7963
ICCompareStub(Token::Value op, CompareIC::State left, CompareIC::State right, CompareIC::State handler)
Definition: code-stubs.h:1329
static void GenerateStubsAheadOfTime(Isolate *isolate)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static const int kElementsKindShift
Definition: objects.h:6482
CodeStubInterfaceDescriptor * GetInterfaceDescriptor(Isolate *isolate)
Definition: code-stubs.h:395
const uint32_t kOneByteStringTag
Definition: objects.h:611
void Generate(MacroAssembler *masm)
static const int kArgumentsCalleeIndex
Definition: heap.h:1106
const int kSmiTag
Definition: v8.h:5478
static const int kIsUndetectable
Definition: objects.h:6472
static const int kHeaderSize
Definition: objects.h:2757
const FPURegister f12
Code::Kind kind()
Definition: code-stubs.h:831
static void InitializeForIsolate(Isolate *isolate)
const FPURegister f6
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int kFailureTag
Definition: v8globals.h:62
static const int kInstrSize
static const int kDataTagOffset
Definition: objects.h:7961
static const uint32_t kHashBitMask
Definition: objects.h:8646
static const int kPrototypeOffset
Definition: objects.h:6427
void Generate(MacroAssembler *masm)
static const int kHashShift
Definition: objects.h:8642
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:697
const Register no_reg
const Register fp
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Representation Tagged()
static const int kNativeContextOffset
Definition: objects.h:7611
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static const int kConstructStubOffset
Definition: objects.h:7106
static const int kExponentBits
Definition: objects.h:1984
static const int kCompilerHintsOffset
Definition: objects.h:7171
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
#define FUNCTION_ADDR(f)
Definition: globals.h:345
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kSloppyArgumentsObjectSize
Definition: heap.h:1098
void InvokeFunctionCallback(const v8::FunctionCallbackInfo< v8::Value > &info, v8::FunctionCallback callback)
Definition: api.cc:7642
const FPURegister f14
static const int kBitField2Offset
Definition: objects.h:6462
static const int kMantissaBits
Definition: objects.h:1983
void Generate(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:1492
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kExponentOffset
Definition: objects.h:1977
static const int kDataUC16CodeOffset
Definition: objects.h:7965
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static const int kStrictArgumentsObjectSize
Definition: heap.h:1101
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool IsFastDoubleElementsKind(ElementsKind kind)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:224
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
const FPURegister f8
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
Definition: objects.h:1976
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)