v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-arm.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 #include "stub-cache.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
42  Isolate* isolate,
43  CodeStubInterfaceDescriptor* descriptor) {
44  static Register registers[] = { r2 };
45  descriptor->register_param_count_ = 1;
46  descriptor->register_params_ = registers;
47  descriptor->deoptimization_handler_ =
48  Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
49 }
50 
51 
52 void FastNewContextStub::InitializeInterfaceDescriptor(
53  Isolate* isolate,
54  CodeStubInterfaceDescriptor* descriptor) {
55  static Register registers[] = { r1 };
56  descriptor->register_param_count_ = 1;
57  descriptor->register_params_ = registers;
58  descriptor->deoptimization_handler_ = NULL;
59 }
60 
61 
63  Isolate* isolate,
64  CodeStubInterfaceDescriptor* descriptor) {
65  static Register registers[] = { r0 };
66  descriptor->register_param_count_ = 1;
67  descriptor->register_params_ = registers;
68  descriptor->deoptimization_handler_ = NULL;
69 }
70 
71 
72 void NumberToStringStub::InitializeInterfaceDescriptor(
73  Isolate* isolate,
74  CodeStubInterfaceDescriptor* descriptor) {
75  static Register registers[] = { r0 };
76  descriptor->register_param_count_ = 1;
77  descriptor->register_params_ = registers;
78  descriptor->deoptimization_handler_ =
79  Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
80 }
81 
82 
84  Isolate* isolate,
85  CodeStubInterfaceDescriptor* descriptor) {
86  static Register registers[] = { r3, r2, r1 };
87  descriptor->register_param_count_ = 3;
88  descriptor->register_params_ = registers;
89  descriptor->deoptimization_handler_ =
91  Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
92 }
93 
94 
96  Isolate* isolate,
97  CodeStubInterfaceDescriptor* descriptor) {
98  static Register registers[] = { r3, r2, r1, r0 };
99  descriptor->register_param_count_ = 4;
100  descriptor->register_params_ = registers;
101  descriptor->deoptimization_handler_ =
102  Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
103 }
104 
105 
107  Isolate* isolate,
108  CodeStubInterfaceDescriptor* descriptor) {
109  static Register registers[] = { r2, r3 };
110  descriptor->register_param_count_ = 2;
111  descriptor->register_params_ = registers;
112  descriptor->deoptimization_handler_ = NULL;
113 }
114 
115 
117  Isolate* isolate,
118  CodeStubInterfaceDescriptor* descriptor) {
119  static Register registers[] = { r1, r0 };
120  descriptor->register_param_count_ = 2;
121  descriptor->register_params_ = registers;
122  descriptor->deoptimization_handler_ =
123  FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
124 }
125 
126 
128  Isolate* isolate,
129  CodeStubInterfaceDescriptor* descriptor) {
130  static Register registers[] = { r1, r0 };
131  descriptor->register_param_count_ = 2;
132  descriptor->register_params_ = registers;
133  descriptor->deoptimization_handler_ =
134  FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
135 }
136 
137 
138 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
139  Isolate* isolate,
140  CodeStubInterfaceDescriptor* descriptor) {
141  static Register registers[] = { r2, r1, r0 };
142  descriptor->register_param_count_ = 3;
143  descriptor->register_params_ = registers;
144  descriptor->deoptimization_handler_ =
145  Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
146 }
147 
148 
150  Isolate* isolate,
151  CodeStubInterfaceDescriptor* descriptor) {
152  static Register registers[] = { r0 };
153  descriptor->register_param_count_ = 1;
154  descriptor->register_params_ = registers;
155  descriptor->deoptimization_handler_ = NULL;
156 }
157 
158 
160  Isolate* isolate,
161  CodeStubInterfaceDescriptor* descriptor) {
162  static Register registers[] = { r1 };
163  descriptor->register_param_count_ = 1;
164  descriptor->register_params_ = registers;
165  descriptor->deoptimization_handler_ = NULL;
166 }
167 
168 
170  Isolate* isolate,
171  CodeStubInterfaceDescriptor* descriptor) {
172  static Register registers[] = { r0, r2 };
173  descriptor->register_param_count_ = 2;
174  descriptor->register_params_ = registers;
175  descriptor->deoptimization_handler_ = NULL;
176 }
177 
178 
180  Isolate* isolate,
181  CodeStubInterfaceDescriptor* descriptor) {
182  static Register registers[] = { r1, r0 };
183  descriptor->register_param_count_ = 2;
184  descriptor->register_params_ = registers;
185  descriptor->deoptimization_handler_ = NULL;
186 }
187 
188 
190  Isolate* isolate,
191  CodeStubInterfaceDescriptor* descriptor) {
192  static Register registers[] = { r2, r1, r0 };
193  descriptor->register_param_count_ = 3;
194  descriptor->register_params_ = registers;
195  descriptor->deoptimization_handler_ =
196  FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
197 }
198 
199 
201  Isolate* isolate,
202  CodeStubInterfaceDescriptor* descriptor) {
203  static Register registers[] = { r0, r1 };
204  descriptor->register_param_count_ = 2;
205  descriptor->register_params_ = registers;
206  Address entry =
207  Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
208  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
209 }
210 
211 
213  Isolate* isolate,
214  CodeStubInterfaceDescriptor* descriptor) {
215  static Register registers[] = { r0 };
216  descriptor->register_param_count_ = 1;
217  descriptor->register_params_ = registers;
218  descriptor->deoptimization_handler_ =
219  FUNCTION_ADDR(CompareNilIC_Miss);
220  descriptor->SetMissHandler(
221  ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
222 }
223 
224 
225 static void InitializeArrayConstructorDescriptor(
226  Isolate* isolate,
227  CodeStubInterfaceDescriptor* descriptor,
228  int constant_stack_parameter_count) {
229  // register state
230  // r0 -- number of arguments
231  // r1 -- function
232  // r2 -- allocation site with elements kind
233  static Register registers_variable_args[] = { r1, r2, r0 };
234  static Register registers_no_args[] = { r1, r2 };
235 
236  if (constant_stack_parameter_count == 0) {
237  descriptor->register_param_count_ = 2;
238  descriptor->register_params_ = registers_no_args;
239  } else {
240  // stack param count needs (constructor pointer, and single argument)
241  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
242  descriptor->stack_parameter_count_ = r0;
243  descriptor->register_param_count_ = 3;
244  descriptor->register_params_ = registers_variable_args;
245  }
246 
247  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
248  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
249  descriptor->deoptimization_handler_ =
250  Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
251 }
252 
253 
254 static void InitializeInternalArrayConstructorDescriptor(
255  Isolate* isolate,
256  CodeStubInterfaceDescriptor* descriptor,
257  int constant_stack_parameter_count) {
258  // register state
259  // r0 -- number of arguments
260  // r1 -- constructor function
261  static Register registers_variable_args[] = { r1, r0 };
262  static Register registers_no_args[] = { r1 };
263 
264  if (constant_stack_parameter_count == 0) {
265  descriptor->register_param_count_ = 1;
266  descriptor->register_params_ = registers_no_args;
267  } else {
268  // stack param count needs (constructor pointer, and single argument)
269  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
270  descriptor->stack_parameter_count_ = r0;
271  descriptor->register_param_count_ = 2;
272  descriptor->register_params_ = registers_variable_args;
273  }
274 
275  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
276  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
277  descriptor->deoptimization_handler_ =
278  Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
279 }
280 
281 
283  Isolate* isolate,
284  CodeStubInterfaceDescriptor* descriptor) {
285  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
286 }
287 
288 
290  Isolate* isolate,
291  CodeStubInterfaceDescriptor* descriptor) {
292  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
293 }
294 
295 
297  Isolate* isolate,
298  CodeStubInterfaceDescriptor* descriptor) {
299  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
300 }
301 
302 
304  Isolate* isolate,
305  CodeStubInterfaceDescriptor* descriptor) {
306  static Register registers[] = { r0 };
307  descriptor->register_param_count_ = 1;
308  descriptor->register_params_ = registers;
309  descriptor->deoptimization_handler_ =
310  FUNCTION_ADDR(ToBooleanIC_Miss);
311  descriptor->SetMissHandler(
312  ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
313 }
314 
315 
317  Isolate* isolate,
318  CodeStubInterfaceDescriptor* descriptor) {
319  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
320 }
321 
322 
324  Isolate* isolate,
325  CodeStubInterfaceDescriptor* descriptor) {
326  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
327 }
328 
329 
331  Isolate* isolate,
332  CodeStubInterfaceDescriptor* descriptor) {
333  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
334 }
335 
336 
338  Isolate* isolate,
339  CodeStubInterfaceDescriptor* descriptor) {
340  static Register registers[] = { r1, r2, r0 };
341  descriptor->register_param_count_ = 3;
342  descriptor->register_params_ = registers;
343  descriptor->deoptimization_handler_ =
344  FUNCTION_ADDR(StoreIC_MissFromStubFailure);
345 }
346 
347 
349  Isolate* isolate,
350  CodeStubInterfaceDescriptor* descriptor) {
351  static Register registers[] = { r0, r3, r1, r2 };
352  descriptor->register_param_count_ = 4;
353  descriptor->register_params_ = registers;
354  descriptor->deoptimization_handler_ =
355  FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
356 }
357 
358 
360  Isolate* isolate,
361  CodeStubInterfaceDescriptor* descriptor) {
362  static Register registers[] = { r1, r0 };
363  descriptor->register_param_count_ = 2;
364  descriptor->register_params_ = registers;
365  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
366  descriptor->SetMissHandler(
367  ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
368 }
369 
370 
371 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
372  Isolate* isolate,
373  CodeStubInterfaceDescriptor* descriptor) {
374  static Register registers[] = { r2, r1, r0 };
375  descriptor->register_param_count_ = 3;
376  descriptor->register_params_ = registers;
377  descriptor->deoptimization_handler_ =
378  FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
379 }
380 
381 
382 void StringAddStub::InitializeInterfaceDescriptor(
383  Isolate* isolate,
384  CodeStubInterfaceDescriptor* descriptor) {
385  static Register registers[] = { r1, r0 };
386  descriptor->register_param_count_ = 2;
387  descriptor->register_params_ = registers;
388  descriptor->deoptimization_handler_ =
389  Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
390 }
391 
392 
393 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
394  static PlatformCallInterfaceDescriptor default_descriptor =
395  PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
396 
397  static PlatformCallInterfaceDescriptor noInlineDescriptor =
398  PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
399 
400  {
401  CallInterfaceDescriptor* descriptor =
402  isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
403  static Register registers[] = { r1, // JSFunction
404  cp, // context
405  r0, // actual number of arguments
406  r2, // expected number of arguments
407  };
408  static Representation representations[] = {
409  Representation::Tagged(), // JSFunction
410  Representation::Tagged(), // context
411  Representation::Integer32(), // actual number of arguments
412  Representation::Integer32(), // expected number of arguments
413  };
414  descriptor->register_param_count_ = 4;
415  descriptor->register_params_ = registers;
416  descriptor->param_representations_ = representations;
417  descriptor->platform_specific_descriptor_ = &default_descriptor;
418  }
419  {
420  CallInterfaceDescriptor* descriptor =
421  isolate->call_descriptor(Isolate::KeyedCall);
422  static Register registers[] = { cp, // context
423  r2, // key
424  };
425  static Representation representations[] = {
426  Representation::Tagged(), // context
427  Representation::Tagged(), // key
428  };
429  descriptor->register_param_count_ = 2;
430  descriptor->register_params_ = registers;
431  descriptor->param_representations_ = representations;
432  descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
433  }
434  {
435  CallInterfaceDescriptor* descriptor =
436  isolate->call_descriptor(Isolate::NamedCall);
437  static Register registers[] = { cp, // context
438  r2, // name
439  };
440  static Representation representations[] = {
441  Representation::Tagged(), // context
442  Representation::Tagged(), // name
443  };
444  descriptor->register_param_count_ = 2;
445  descriptor->register_params_ = registers;
446  descriptor->param_representations_ = representations;
447  descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
448  }
449  {
450  CallInterfaceDescriptor* descriptor =
451  isolate->call_descriptor(Isolate::CallHandler);
452  static Register registers[] = { cp, // context
453  r0, // receiver
454  };
455  static Representation representations[] = {
456  Representation::Tagged(), // context
457  Representation::Tagged(), // receiver
458  };
459  descriptor->register_param_count_ = 2;
460  descriptor->register_params_ = registers;
461  descriptor->param_representations_ = representations;
462  descriptor->platform_specific_descriptor_ = &default_descriptor;
463  }
464  {
465  CallInterfaceDescriptor* descriptor =
466  isolate->call_descriptor(Isolate::ApiFunctionCall);
467  static Register registers[] = { r0, // callee
468  r4, // call_data
469  r2, // holder
470  r1, // api_function_address
471  cp, // context
472  };
473  static Representation representations[] = {
474  Representation::Tagged(), // callee
475  Representation::Tagged(), // call_data
476  Representation::Tagged(), // holder
477  Representation::External(), // api_function_address
478  Representation::Tagged(), // context
479  };
480  descriptor->register_param_count_ = 5;
481  descriptor->register_params_ = registers;
482  descriptor->param_representations_ = representations;
483  descriptor->platform_specific_descriptor_ = &default_descriptor;
484  }
485 }
486 
487 
488 #define __ ACCESS_MASM(masm)
489 
490 
491 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
492  Label* slow,
493  Condition cond);
494 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
495  Register lhs,
496  Register rhs,
497  Label* lhs_not_nan,
498  Label* slow,
499  bool strict);
500 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
501  Register lhs,
502  Register rhs);
503 
504 
505 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
506  // Update the static counter each time a new code stub is generated.
507  Isolate* isolate = masm->isolate();
508  isolate->counters()->code_stubs()->Increment();
509 
510  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
511  int param_count = descriptor->register_param_count_;
512  {
513  // Call the runtime system in a fresh internal frame.
514  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
515  ASSERT(descriptor->register_param_count_ == 0 ||
516  r0.is(descriptor->register_params_[param_count - 1]));
517  // Push arguments
518  for (int i = 0; i < param_count; ++i) {
519  __ push(descriptor->register_params_[i]);
520  }
521  ExternalReference miss = descriptor->miss_handler();
522  __ CallExternalReference(miss, descriptor->register_param_count_);
523  }
524 
525  __ Ret();
526 }
527 
528 
529 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
530 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
531 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
532 // scratch register. Destroys the source register. No GC occurs during this
533 // stub so you don't have to set up the frame.
534 class ConvertToDoubleStub : public PlatformCodeStub {
535  public:
536  ConvertToDoubleStub(Register result_reg_1,
537  Register result_reg_2,
538  Register source_reg,
539  Register scratch_reg)
540  : result1_(result_reg_1),
541  result2_(result_reg_2),
542  source_(source_reg),
543  zeros_(scratch_reg) { }
544 
545  private:
546  Register result1_;
547  Register result2_;
548  Register source_;
549  Register zeros_;
550 
551  // Minor key encoding in 16 bits.
552  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
553  class OpBits: public BitField<Token::Value, 2, 14> {};
554 
555  Major MajorKey() { return ConvertToDouble; }
556  int MinorKey() {
557  // Encode the parameters in a unique 16 bit value.
558  return result1_.code() +
559  (result2_.code() << 4) +
560  (source_.code() << 8) +
561  (zeros_.code() << 12);
562  }
563 
564  void Generate(MacroAssembler* masm);
565 };
566 
567 
568 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
569  Register exponent = result1_;
570  Register mantissa = result2_;
571 
572  Label not_special;
573  __ SmiUntag(source_);
574  // Move sign bit from source to destination. This works because the sign bit
575  // in the exponent word of the double has the same position and polarity as
576  // the 2's complement sign bit in a Smi.
577  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
578  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
579  // Subtract from 0 if source was negative.
580  __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
581 
582  // We have -1, 0 or 1, which we treat specially. Register source_ contains
583  // absolute value: it is either equal to 1 (special case of -1 and 1),
584  // greater than 1 (not a special case) or less than 1 (special case of 0).
585  __ cmp(source_, Operand(1));
586  __ b(gt, &not_special);
587 
588  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
589  const uint32_t exponent_word_for_1 =
591  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
592  // 1, 0 and -1 all have 0 for the second word.
593  __ mov(mantissa, Operand::Zero());
594  __ Ret();
595 
596  __ bind(&not_special);
597  __ clz(zeros_, source_);
598  // Compute exponent and or it into the exponent register.
599  // We use mantissa as a scratch register here. Use a fudge factor to
600  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
601  // that fit in the ARM's constant field.
602  int fudge = 0x400;
603  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
604  __ add(mantissa, mantissa, Operand(fudge));
605  __ orr(exponent,
606  exponent,
607  Operand(mantissa, LSL, HeapNumber::kExponentShift));
608  // Shift up the source chopping the top bit off.
609  __ add(zeros_, zeros_, Operand(1));
610  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
611  __ mov(source_, Operand(source_, LSL, zeros_));
612  // Compute lower part of fraction (last 12 bits).
613  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
614  // And the top (top 20 bits).
615  __ orr(exponent,
616  exponent,
617  Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
618  __ Ret();
619 }
620 
621 
622 void DoubleToIStub::Generate(MacroAssembler* masm) {
623  Label out_of_range, only_low, negate, done;
624  Register input_reg = source();
625  Register result_reg = destination();
627 
628  int double_offset = offset();
629  // Account for saved regs if input is sp.
630  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
631 
632  Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
633  Register scratch_low =
634  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
635  Register scratch_high =
636  GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
637  LowDwVfpRegister double_scratch = kScratchDoubleReg;
638 
639  __ Push(scratch_high, scratch_low, scratch);
640 
641  if (!skip_fastpath()) {
642  // Load double input.
643  __ vldr(double_scratch, MemOperand(input_reg, double_offset));
644  __ vmov(scratch_low, scratch_high, double_scratch);
645 
646  // Do fast-path convert from double to int.
647  __ vcvt_s32_f64(double_scratch.low(), double_scratch);
648  __ vmov(result_reg, double_scratch.low());
649 
650  // If result is not saturated (0x7fffffff or 0x80000000), we are done.
651  __ sub(scratch, result_reg, Operand(1));
652  __ cmp(scratch, Operand(0x7ffffffe));
653  __ b(lt, &done);
654  } else {
655  // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
656  // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
657  if (double_offset == 0) {
658  __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
659  } else {
660  __ ldr(scratch_low, MemOperand(input_reg, double_offset));
661  __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
662  }
663  }
664 
665  __ Ubfx(scratch, scratch_high,
666  HeapNumber::kExponentShift, HeapNumber::kExponentBits);
667  // Load scratch with exponent - 1. This is faster than loading
668  // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
669  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
670  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
671  // If exponent is greater than or equal to 84, the 32 less significant
672  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
673  // the result is 0.
674  // Compare exponent with 84 (compare exponent - 1 with 83).
675  __ cmp(scratch, Operand(83));
676  __ b(ge, &out_of_range);
677 
678  // If we reach this code, 31 <= exponent <= 83.
679  // So, we don't have to handle cases where 0 <= exponent <= 20 for
680  // which we would need to shift right the high part of the mantissa.
681  // Scratch contains exponent - 1.
682  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
683  __ rsb(scratch, scratch, Operand(51), SetCC);
684  __ b(ls, &only_low);
685  // 21 <= exponent <= 51, shift scratch_low and scratch_high
686  // to generate the result.
687  __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
688  // Scratch contains: 52 - exponent.
689  // We needs: exponent - 20.
690  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
691  __ rsb(scratch, scratch, Operand(32));
692  __ Ubfx(result_reg, scratch_high,
694  // Set the implicit 1 before the mantissa part in scratch_high.
695  __ orr(result_reg, result_reg,
696  Operand(1 << HeapNumber::kMantissaBitsInTopWord));
697  __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
698  __ b(&negate);
699 
700  __ bind(&out_of_range);
701  __ mov(result_reg, Operand::Zero());
702  __ b(&done);
703 
704  __ bind(&only_low);
705  // 52 <= exponent <= 83, shift only scratch_low.
706  // On entry, scratch contains: 52 - exponent.
707  __ rsb(scratch, scratch, Operand::Zero());
708  __ mov(result_reg, Operand(scratch_low, LSL, scratch));
709 
710  __ bind(&negate);
711  // If input was positive, scratch_high ASR 31 equals 0 and
712  // scratch_high LSR 31 equals zero.
713  // New result = (result eor 0) + 0 = result.
714  // If the input was negative, we have to negate the result.
715  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
716  // New result = (result eor 0xffffffff) + 1 = 0 - result.
717  __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
718  __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
719 
720  __ bind(&done);
721 
722  __ Pop(scratch_high, scratch_low, scratch);
723  __ Ret();
724 }
725 
726 
728  Isolate* isolate) {
731  stub1.GetCode(isolate);
732  stub2.GetCode(isolate);
733 }
734 
735 
736 // See comment for class.
737 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
738  Label max_negative_int;
739  // the_int_ has the answer which is a signed int32 but not a Smi.
740  // We test for the special value that has a different exponent. This test
741  // has the neat side effect of setting the flags according to the sign.
742  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
743  __ cmp(the_int_, Operand(0x80000000u));
744  __ b(eq, &max_negative_int);
745  // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
746  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
747  uint32_t non_smi_exponent =
748  (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
749  __ mov(scratch_, Operand(non_smi_exponent));
750  // Set the sign bit in scratch_ if the value was negative.
751  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
752  // Subtract from 0 if the value was negative.
753  __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
754  // We should be masking the implict first digit of the mantissa away here,
755  // but it just ends up combining harmlessly with the last digit of the
756  // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
757  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
758  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
759  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
760  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
761  __ str(scratch_, FieldMemOperand(the_heap_number_,
763  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
764  __ str(scratch_, FieldMemOperand(the_heap_number_,
766  __ Ret();
767 
768  __ bind(&max_negative_int);
769  // The max negative int32 is stored as a positive number in the mantissa of
770  // a double because it uses a sign bit instead of using two's complement.
771  // The actual mantissa bits stored are all 0 because the implicit most
772  // significant 1 bit is not stored.
773  non_smi_exponent += 1 << HeapNumber::kExponentShift;
774  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
775  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
776  __ mov(ip, Operand::Zero());
777  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
778  __ Ret();
779 }
780 
781 
782 // Handle the case where the lhs and rhs are the same object.
783 // Equality is almost reflexive (everything but NaN), so this is a test
784 // for "identity and not NaN".
785 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
786  Label* slow,
787  Condition cond) {
788  Label not_identical;
789  Label heap_number, return_equal;
790  __ cmp(r0, r1);
791  __ b(ne, &not_identical);
792 
793  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
794  // so we do the second best thing - test it ourselves.
795  // They are both equal and they are not both Smis so both of them are not
796  // Smis. If it's not a heap number, then return equal.
797  if (cond == lt || cond == gt) {
798  __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
799  __ b(ge, slow);
800  } else {
801  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
802  __ b(eq, &heap_number);
803  // Comparing JS objects with <=, >= is complicated.
804  if (cond != eq) {
805  __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
806  __ b(ge, slow);
807  // Normally here we fall through to return_equal, but undefined is
808  // special: (undefined == undefined) == true, but
809  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
810  if (cond == le || cond == ge) {
811  __ cmp(r4, Operand(ODDBALL_TYPE));
812  __ b(ne, &return_equal);
813  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
814  __ cmp(r0, r2);
815  __ b(ne, &return_equal);
816  if (cond == le) {
817  // undefined <= undefined should fail.
818  __ mov(r0, Operand(GREATER));
819  } else {
820  // undefined >= undefined should fail.
821  __ mov(r0, Operand(LESS));
822  }
823  __ Ret();
824  }
825  }
826  }
827 
828  __ bind(&return_equal);
829  if (cond == lt) {
830  __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
831  } else if (cond == gt) {
832  __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
833  } else {
834  __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
835  }
836  __ Ret();
837 
838  // For less and greater we don't have to check for NaN since the result of
839  // x < x is false regardless. For the others here is some code to check
840  // for NaN.
841  if (cond != lt && cond != gt) {
842  __ bind(&heap_number);
843  // It is a heap number, so return non-equal if it's NaN and equal if it's
844  // not NaN.
845 
846  // The representation of NaN values has all exponent bits (52..62) set,
847  // and not all mantissa bits (0..51) clear.
848  // Read top bits of double representation (second word of value).
850  // Test that exponent bits are all set.
851  __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
852  // NaNs have all-one exponents so they sign extend to -1.
853  __ cmp(r3, Operand(-1));
854  __ b(ne, &return_equal);
855 
856  // Shift out flag and all exponent bits, retaining only mantissa.
858  // Or with all low-bits of mantissa.
860  __ orr(r0, r3, Operand(r2), SetCC);
861  // For equal we already have the right value in r0: Return zero (equal)
862  // if all bits in mantissa are zero (it's an Infinity) and non-zero if
863  // not (it's a NaN). For <= and >= we need to load r0 with the failing
864  // value if it's a NaN.
865  if (cond != eq) {
866  // All-zero means Infinity means equal.
867  __ Ret(eq);
868  if (cond == le) {
869  __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
870  } else {
871  __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
872  }
873  }
874  __ Ret();
875  }
876  // No fall through here.
877 
878  __ bind(&not_identical);
879 }
880 
881 
882 // See comment at call site.
883 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
884  Register lhs,
885  Register rhs,
886  Label* lhs_not_nan,
887  Label* slow,
888  bool strict) {
889  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
890  (lhs.is(r1) && rhs.is(r0)));
891 
892  Label rhs_is_smi;
893  __ JumpIfSmi(rhs, &rhs_is_smi);
894 
895  // Lhs is a Smi. Check whether the rhs is a heap number.
896  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
897  if (strict) {
898  // If rhs is not a number and lhs is a Smi then strict equality cannot
899  // succeed. Return non-equal
900  // If rhs is r0 then there is already a non zero value in it.
901  if (!rhs.is(r0)) {
902  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
903  }
904  __ Ret(ne);
905  } else {
906  // Smi compared non-strictly with a non-Smi non-heap-number. Call
907  // the runtime.
908  __ b(ne, slow);
909  }
910 
911  // Lhs is a smi, rhs is a number.
912  // Convert lhs to a double in d7.
913  __ SmiToDouble(d7, lhs);
914  // Load the double from rhs, tagged HeapNumber r0, to d6.
916 
917  // We now have both loaded as doubles but we can skip the lhs nan check
918  // since it's a smi.
919  __ jmp(lhs_not_nan);
920 
921  __ bind(&rhs_is_smi);
922  // Rhs is a smi. Check whether the non-smi lhs is a heap number.
923  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
924  if (strict) {
925  // If lhs is not a number and rhs is a smi then strict equality cannot
926  // succeed. Return non-equal.
927  // If lhs is r0 then there is already a non zero value in it.
928  if (!lhs.is(r0)) {
929  __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
930  }
931  __ Ret(ne);
932  } else {
933  // Smi compared non-strictly with a non-smi non-heap-number. Call
934  // the runtime.
935  __ b(ne, slow);
936  }
937 
938  // Rhs is a smi, lhs is a heap number.
939  // Load the double from lhs, tagged HeapNumber r1, to d7.
941  // Convert rhs to a double in d6 .
942  __ SmiToDouble(d6, rhs);
943  // Fall through to both_loaded_as_doubles.
944 }
945 
946 
947 // See comment at call site.
948 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
949  Register lhs,
950  Register rhs) {
951  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
952  (lhs.is(r1) && rhs.is(r0)));
953 
954  // If either operand is a JS object or an oddball value, then they are
955  // not equal since their pointers are different.
956  // There is no test for undetectability in strict equality.
958  Label first_non_object;
959  // Get the type of the first operand into r2 and compare it with
960  // FIRST_SPEC_OBJECT_TYPE.
961  __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
962  __ b(lt, &first_non_object);
963 
964  // Return non-zero (r0 is not zero)
965  Label return_not_equal;
966  __ bind(&return_not_equal);
967  __ Ret();
968 
969  __ bind(&first_non_object);
970  // Check for oddballs: true, false, null, undefined.
971  __ cmp(r2, Operand(ODDBALL_TYPE));
972  __ b(eq, &return_not_equal);
973 
974  __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
975  __ b(ge, &return_not_equal);
976 
977  // Check for oddballs: true, false, null, undefined.
978  __ cmp(r3, Operand(ODDBALL_TYPE));
979  __ b(eq, &return_not_equal);
980 
981  // Now that we have the types we might as well check for
982  // internalized-internalized.
984  __ orr(r2, r2, Operand(r3));
985  __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
986  __ b(eq, &return_not_equal);
987 }
988 
989 
990 // See comment at call site.
991 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
992  Register lhs,
993  Register rhs,
994  Label* both_loaded_as_doubles,
995  Label* not_heap_numbers,
996  Label* slow) {
997  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
998  (lhs.is(r1) && rhs.is(r0)));
999 
1000  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1001  __ b(ne, not_heap_numbers);
1003  __ cmp(r2, r3);
1004  __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
1005 
1006  // Both are heap numbers. Load them up then jump to the code we have
1007  // for that.
1010  __ jmp(both_loaded_as_doubles);
1011 }
1012 
1013 
1014 // Fast negative check for internalized-to-internalized equality.
1015 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
1016  Register lhs,
1017  Register rhs,
1018  Label* possible_strings,
1019  Label* not_both_strings) {
1020  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1021  (lhs.is(r1) && rhs.is(r0)));
1022 
1023  // r2 is object type of rhs.
1024  Label object_test;
1026  __ tst(r2, Operand(kIsNotStringMask));
1027  __ b(ne, &object_test);
1028  __ tst(r2, Operand(kIsNotInternalizedMask));
1029  __ b(ne, possible_strings);
1030  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1031  __ b(ge, not_both_strings);
1032  __ tst(r3, Operand(kIsNotInternalizedMask));
1033  __ b(ne, possible_strings);
1034 
1035  // Both are internalized. We already checked they weren't the same pointer
1036  // so they are not equal.
1037  __ mov(r0, Operand(NOT_EQUAL));
1038  __ Ret();
1039 
1040  __ bind(&object_test);
1041  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1042  __ b(lt, not_both_strings);
1043  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1044  __ b(lt, not_both_strings);
1045  // If both objects are undetectable, they are equal. Otherwise, they
1046  // are not equal, since they are different objects and an object is not
1047  // equal to undefined.
1051  __ and_(r0, r2, Operand(r3));
1052  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1053  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1054  __ Ret();
1055 }
1056 
1057 
1058 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1059  Register input,
1060  Register scratch,
1061  CompareIC::State expected,
1062  Label* fail) {
1063  Label ok;
1064  if (expected == CompareIC::SMI) {
1065  __ JumpIfNotSmi(input, fail);
1066  } else if (expected == CompareIC::NUMBER) {
1067  __ JumpIfSmi(input, &ok);
1068  __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1070  }
1071  // We could be strict about internalized/non-internalized here, but as long as
1072  // hydrogen doesn't care, the stub doesn't have to care either.
1073  __ bind(&ok);
1074 }
1075 
1076 
1077 // On entry r1 and r2 are the values to be compared.
1078 // On exit r0 is 0, positive or negative to indicate the result of
1079 // the comparison.
1080 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1081  Register lhs = r1;
1082  Register rhs = r0;
1083  Condition cc = GetCondition();
1084 
1085  Label miss;
1086  ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
1087  ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
1088 
1089  Label slow; // Call builtin.
1090  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1091 
1092  Label not_two_smis, smi_done;
1093  __ orr(r2, r1, r0);
1094  __ JumpIfNotSmi(r2, &not_two_smis);
1095  __ mov(r1, Operand(r1, ASR, 1));
1096  __ sub(r0, r1, Operand(r0, ASR, 1));
1097  __ Ret();
1098  __ bind(&not_two_smis);
1099 
1100  // NOTICE! This code is only reached after a smi-fast-case check, so
1101  // it is certain that at least one operand isn't a smi.
1102 
1103  // Handle the case where the objects are identical. Either returns the answer
1104  // or goes to slow. Only falls through if the objects were not identical.
1105  EmitIdenticalObjectComparison(masm, &slow, cc);
1106 
1107  // If either is a Smi (we know that not both are), then they can only
1108  // be strictly equal if the other is a HeapNumber.
1109  STATIC_ASSERT(kSmiTag == 0);
1110  ASSERT_EQ(0, Smi::FromInt(0));
1111  __ and_(r2, lhs, Operand(rhs));
1112  __ JumpIfNotSmi(r2, &not_smis);
1113  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1114  // 1) Return the answer.
1115  // 2) Go to slow.
1116  // 3) Fall through to both_loaded_as_doubles.
1117  // 4) Jump to lhs_not_nan.
1118  // In cases 3 and 4 we have found out we were dealing with a number-number
1119  // comparison. If VFP3 is supported the double values of the numbers have
1120  // been loaded into d7 and d6. Otherwise, the double values have been loaded
1121  // into r0, r1, r2, and r3.
1122  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
1123 
1124  __ bind(&both_loaded_as_doubles);
1125  // The arguments have been converted to doubles and stored in d6 and d7, if
1126  // VFP3 is supported, or in r0, r1, r2, and r3.
1127  Isolate* isolate = masm->isolate();
1128  __ bind(&lhs_not_nan);
1129  Label no_nan;
1130  // ARMv7 VFP3 instructions to implement double precision comparison.
1131  __ VFPCompareAndSetFlags(d7, d6);
1132  Label nan;
1133  __ b(vs, &nan);
1134  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1135  __ mov(r0, Operand(LESS), LeaveCC, lt);
1136  __ mov(r0, Operand(GREATER), LeaveCC, gt);
1137  __ Ret();
1138 
1139  __ bind(&nan);
1140  // If one of the sides was a NaN then the v flag is set. Load r0 with
1141  // whatever it takes to make the comparison fail, since comparisons with NaN
1142  // always fail.
1143  if (cc == lt || cc == le) {
1144  __ mov(r0, Operand(GREATER));
1145  } else {
1146  __ mov(r0, Operand(LESS));
1147  }
1148  __ Ret();
1149 
1150  __ bind(&not_smis);
1151  // At this point we know we are dealing with two different objects,
1152  // and neither of them is a Smi. The objects are in rhs_ and lhs_.
1153  if (strict()) {
1154  // This returns non-equal for some object types, or falls through if it
1155  // was not lucky.
1156  EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1157  }
1158 
1159  Label check_for_internalized_strings;
1160  Label flat_string_check;
1161  // Check for heap-number-heap-number comparison. Can jump to slow case,
1162  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1163  // that case. If the inputs are not doubles then jumps to
1164  // check_for_internalized_strings.
1165  // In this case r2 will contain the type of rhs_. Never falls through.
1166  EmitCheckForTwoHeapNumbers(masm,
1167  lhs,
1168  rhs,
1169  &both_loaded_as_doubles,
1170  &check_for_internalized_strings,
1171  &flat_string_check);
1172 
1173  __ bind(&check_for_internalized_strings);
1174  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1175  // internalized strings.
1176  if (cc == eq && !strict()) {
1177  // Returns an answer for two internalized strings or two detectable objects.
1178  // Otherwise jumps to string case or not both strings case.
1179  // Assumes that r2 is the type of rhs_ on entry.
1180  EmitCheckForInternalizedStringsOrObjects(
1181  masm, lhs, rhs, &flat_string_check, &slow);
1182  }
1183 
1184  // Check for both being sequential ASCII strings, and inline if that is the
1185  // case.
1186  __ bind(&flat_string_check);
1187 
1188  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
1189 
1190  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1191  if (cc == eq) {
1193  lhs,
1194  rhs,
1195  r2,
1196  r3,
1197  r4);
1198  } else {
1200  lhs,
1201  rhs,
1202  r2,
1203  r3,
1204  r4,
1205  r5);
1206  }
1207  // Never falls through to here.
1208 
1209  __ bind(&slow);
1210 
1211  __ Push(lhs, rhs);
1212  // Figure out which native to call and setup the arguments.
1213  Builtins::JavaScript native;
1214  if (cc == eq) {
1215  native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1216  } else {
1217  native = Builtins::COMPARE;
1218  int ncr; // NaN compare result
1219  if (cc == lt || cc == le) {
1220  ncr = GREATER;
1221  } else {
1222  ASSERT(cc == gt || cc == ge); // remaining cases
1223  ncr = LESS;
1224  }
1225  __ mov(r0, Operand(Smi::FromInt(ncr)));
1226  __ push(r0);
1227  }
1228 
1229  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1230  // tagged as a small integer.
1231  __ InvokeBuiltin(native, JUMP_FUNCTION);
1232 
1233  __ bind(&miss);
1234  GenerateMiss(masm);
1235 }
1236 
1237 
1238 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1239  // We don't allow a GC during a store buffer overflow so there is no need to
1240  // store the registers in any particular way, but we do have to store and
1241  // restore them.
1242  __ stm(db_w, sp, kCallerSaved | lr.bit());
1243 
1244  const Register scratch = r1;
1245 
1246  if (save_doubles_ == kSaveFPRegs) {
1247  __ SaveFPRegs(sp, scratch);
1248  }
1249  const int argument_count = 1;
1250  const int fp_argument_count = 0;
1251 
1252  AllowExternalCallThatCantCauseGC scope(masm);
1253  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1254  __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate())));
1255  __ CallCFunction(
1256  ExternalReference::store_buffer_overflow_function(masm->isolate()),
1257  argument_count);
1258  if (save_doubles_ == kSaveFPRegs) {
1259  __ RestoreFPRegs(sp, scratch);
1260  }
1261  __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1262 }
1263 
1264 
1265 void MathPowStub::Generate(MacroAssembler* masm) {
1266  const Register base = r1;
1267  const Register exponent = r2;
1268  const Register heapnumbermap = r5;
1269  const Register heapnumber = r0;
1270  const DwVfpRegister double_base = d0;
1271  const DwVfpRegister double_exponent = d1;
1272  const DwVfpRegister double_result = d2;
1273  const DwVfpRegister double_scratch = d3;
1274  const SwVfpRegister single_scratch = s6;
1275  const Register scratch = r9;
1276  const Register scratch2 = r4;
1277 
1278  Label call_runtime, done, int_exponent;
1279  if (exponent_type_ == ON_STACK) {
1280  Label base_is_smi, unpack_exponent;
1281  // The exponent and base are supplied as arguments on the stack.
1282  // This can only happen if the stub is called from non-optimized code.
1283  // Load input parameters from stack to double registers.
1284  __ ldr(base, MemOperand(sp, 1 * kPointerSize));
1285  __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
1286 
1287  __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1288 
1289  __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1290  __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1291  __ cmp(scratch, heapnumbermap);
1292  __ b(ne, &call_runtime);
1293 
1294  __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1295  __ jmp(&unpack_exponent);
1296 
1297  __ bind(&base_is_smi);
1298  __ vmov(single_scratch, scratch);
1299  __ vcvt_f64_s32(double_base, single_scratch);
1300  __ bind(&unpack_exponent);
1301 
1302  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1303 
1304  __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1305  __ cmp(scratch, heapnumbermap);
1306  __ b(ne, &call_runtime);
1307  __ vldr(double_exponent,
1309  } else if (exponent_type_ == TAGGED) {
1310  // Base is already in double_base.
1311  __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1312 
1313  __ vldr(double_exponent,
1315  }
1316 
1317  if (exponent_type_ != INTEGER) {
1318  Label int_exponent_convert;
1319  // Detect integer exponents stored as double.
1320  __ vcvt_u32_f64(single_scratch, double_exponent);
1321  // We do not check for NaN or Infinity here because comparing numbers on
1322  // ARM correctly distinguishes NaNs. We end up calling the built-in.
1323  __ vcvt_f64_u32(double_scratch, single_scratch);
1324  __ VFPCompareAndSetFlags(double_scratch, double_exponent);
1325  __ b(eq, &int_exponent_convert);
1326 
1327  if (exponent_type_ == ON_STACK) {
1328  // Detect square root case. Crankshaft detects constant +/-0.5 at
1329  // compile time and uses DoMathPowHalf instead. We then skip this check
1330  // for non-constant cases of +/-0.5 as these hardly occur.
1331  Label not_plus_half;
1332 
1333  // Test for 0.5.
1334  __ vmov(double_scratch, 0.5, scratch);
1335  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1336  __ b(ne, &not_plus_half);
1337 
1338  // Calculates square root of base. Check for the special case of
1339  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1340  __ vmov(double_scratch, -V8_INFINITY, scratch);
1341  __ VFPCompareAndSetFlags(double_base, double_scratch);
1342  __ vneg(double_result, double_scratch, eq);
1343  __ b(eq, &done);
1344 
1345  // Add +0 to convert -0 to +0.
1346  __ vadd(double_scratch, double_base, kDoubleRegZero);
1347  __ vsqrt(double_result, double_scratch);
1348  __ jmp(&done);
1349 
1350  __ bind(&not_plus_half);
1351  __ vmov(double_scratch, -0.5, scratch);
1352  __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1353  __ b(ne, &call_runtime);
1354 
1355  // Calculates square root of base. Check for the special case of
1356  // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1357  __ vmov(double_scratch, -V8_INFINITY, scratch);
1358  __ VFPCompareAndSetFlags(double_base, double_scratch);
1359  __ vmov(double_result, kDoubleRegZero, eq);
1360  __ b(eq, &done);
1361 
1362  // Add +0 to convert -0 to +0.
1363  __ vadd(double_scratch, double_base, kDoubleRegZero);
1364  __ vmov(double_result, 1.0, scratch);
1365  __ vsqrt(double_scratch, double_scratch);
1366  __ vdiv(double_result, double_result, double_scratch);
1367  __ jmp(&done);
1368  }
1369 
1370  __ push(lr);
1371  {
1372  AllowExternalCallThatCantCauseGC scope(masm);
1373  __ PrepareCallCFunction(0, 2, scratch);
1374  __ MovToFloatParameters(double_base, double_exponent);
1375  __ CallCFunction(
1376  ExternalReference::power_double_double_function(masm->isolate()),
1377  0, 2);
1378  }
1379  __ pop(lr);
1380  __ MovFromFloatResult(double_result);
1381  __ jmp(&done);
1382 
1383  __ bind(&int_exponent_convert);
1384  __ vcvt_u32_f64(single_scratch, double_exponent);
1385  __ vmov(scratch, single_scratch);
1386  }
1387 
1388  // Calculate power with integer exponent.
1389  __ bind(&int_exponent);
1390 
1391  // Get two copies of exponent in the registers scratch and exponent.
1392  if (exponent_type_ == INTEGER) {
1393  __ mov(scratch, exponent);
1394  } else {
1395  // Exponent has previously been stored into scratch as untagged integer.
1396  __ mov(exponent, scratch);
1397  }
1398  __ vmov(double_scratch, double_base); // Back up base.
1399  __ vmov(double_result, 1.0, scratch2);
1400 
1401  // Get absolute value of exponent.
1402  __ cmp(scratch, Operand::Zero());
1403  __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
1404  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
1405 
1406  Label while_true;
1407  __ bind(&while_true);
1408  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
1409  __ vmul(double_result, double_result, double_scratch, cs);
1410  __ vmul(double_scratch, double_scratch, double_scratch, ne);
1411  __ b(ne, &while_true);
1412 
1413  __ cmp(exponent, Operand::Zero());
1414  __ b(ge, &done);
1415  __ vmov(double_scratch, 1.0, scratch);
1416  __ vdiv(double_result, double_scratch, double_result);
1417  // Test whether result is zero. Bail out to check for subnormal result.
1418  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1419  __ VFPCompareAndSetFlags(double_result, 0.0);
1420  __ b(ne, &done);
1421  // double_exponent may not containe the exponent value if the input was a
1422  // smi. We set it with exponent value before bailing out.
1423  __ vmov(single_scratch, exponent);
1424  __ vcvt_f64_s32(double_exponent, single_scratch);
1425 
1426  // Returning or bailing out.
1427  Counters* counters = masm->isolate()->counters();
1428  if (exponent_type_ == ON_STACK) {
1429  // The arguments are still on the stack.
1430  __ bind(&call_runtime);
1431  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1432 
1433  // The stub is called from non-optimized code, which expects the result
1434  // as heap number in exponent.
1435  __ bind(&done);
1436  __ AllocateHeapNumber(
1437  heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1438  __ vstr(double_result,
1440  ASSERT(heapnumber.is(r0));
1441  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1442  __ Ret(2);
1443  } else {
1444  __ push(lr);
1445  {
1446  AllowExternalCallThatCantCauseGC scope(masm);
1447  __ PrepareCallCFunction(0, 2, scratch);
1448  __ MovToFloatParameters(double_base, double_exponent);
1449  __ CallCFunction(
1450  ExternalReference::power_double_double_function(masm->isolate()),
1451  0, 2);
1452  }
1453  __ pop(lr);
1454  __ MovFromFloatResult(double_result);
1455 
1456  __ bind(&done);
1457  __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1458  __ Ret();
1459  }
1460 }
1461 
1462 
1463 bool CEntryStub::NeedsImmovableCode() {
1464  return true;
1465 }
1466 
1467 
1468 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1476  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1477 }
1478 
1479 
1480 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1482  CEntryStub save_doubles(1, mode);
1483  StoreBufferOverflowStub stub(mode);
1484  // These stubs might already be in the snapshot, detect that and don't
1485  // regenerate, which would lead to code stub initialization state being messed
1486  // up.
1487  Code* save_doubles_code;
1488  if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1489  save_doubles_code = *save_doubles.GetCode(isolate);
1490  }
1491  Code* store_buffer_overflow_code;
1492  if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1493  store_buffer_overflow_code = *stub.GetCode(isolate);
1494  }
1495  isolate->set_fp_stubs_generated(true);
1496 }
1497 
1498 
1499 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1500  CEntryStub stub(1, kDontSaveFPRegs);
1501  stub.GetCode(isolate);
1502 }
1503 
1504 
1505 void CEntryStub::GenerateCore(MacroAssembler* masm,
1506  Label* throw_normal_exception,
1507  Label* throw_termination_exception,
1508  bool do_gc,
1509  bool always_allocate) {
1510  // r0: result parameter for PerformGC, if any
1511  // r4: number of arguments including receiver (C callee-saved)
1512  // r5: pointer to builtin function (C callee-saved)
1513  // r6: pointer to the first argument (C callee-saved)
1514  Isolate* isolate = masm->isolate();
1515 
1516  if (do_gc) {
1517  // Passing r0.
1518  __ PrepareCallCFunction(2, 0, r1);
1519  __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
1520  __ CallCFunction(ExternalReference::perform_gc_function(isolate),
1521  2, 0);
1522  }
1523 
1524  ExternalReference scope_depth =
1525  ExternalReference::heap_always_allocate_scope_depth(isolate);
1526  if (always_allocate) {
1527  __ mov(r0, Operand(scope_depth));
1528  __ ldr(r1, MemOperand(r0));
1529  __ add(r1, r1, Operand(1));
1530  __ str(r1, MemOperand(r0));
1531  }
1532 
1533  // Call C built-in.
1534  // r0 = argc, r1 = argv
1535  __ mov(r0, Operand(r4));
1536  __ mov(r1, Operand(r6));
1537 
1538 #if V8_HOST_ARCH_ARM
1539  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1540  int frame_alignment_mask = frame_alignment - 1;
1541  if (FLAG_debug_code) {
1542  if (frame_alignment > kPointerSize) {
1543  Label alignment_as_expected;
1544  ASSERT(IsPowerOf2(frame_alignment));
1545  __ tst(sp, Operand(frame_alignment_mask));
1546  __ b(eq, &alignment_as_expected);
1547  // Don't use Check here, as it will call Runtime_Abort re-entering here.
1548  __ stop("Unexpected alignment");
1549  __ bind(&alignment_as_expected);
1550  }
1551  }
1552 #endif
1553 
1554  __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
1555 
1556  // To let the GC traverse the return address of the exit frames, we need to
1557  // know where the return address is. The CEntryStub is unmovable, so
1558  // we can store the address on the stack to be able to find it again and
1559  // we never have to restore it, because it will not change.
1560  // Compute the return address in lr to return to after the jump below. Pc is
1561  // already at '+ 8' from the current instruction but return is after three
1562  // instructions so add another 4 to pc to get the return address.
1563  {
1564  // Prevent literal pool emission before return address.
1565  Assembler::BlockConstPoolScope block_const_pool(masm);
1566  __ add(lr, pc, Operand(4));
1567  __ str(lr, MemOperand(sp, 0));
1568  __ Call(r5);
1569  }
1570 
1571  __ VFPEnsureFPSCRState(r2);
1572 
1573  if (always_allocate) {
1574  // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
1575  // though (contain the result).
1576  __ mov(r2, Operand(scope_depth));
1577  __ ldr(r3, MemOperand(r2));
1578  __ sub(r3, r3, Operand(1));
1579  __ str(r3, MemOperand(r2));
1580  }
1581 
1582  // check for failure result
1583  Label failure_returned;
1584  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1585  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
1586  __ add(r2, r0, Operand(1));
1587  __ tst(r2, Operand(kFailureTagMask));
1588  __ b(eq, &failure_returned);
1589 
1590  // Exit C frame and return.
1591  // r0:r1: result
1592  // sp: stack pointer
1593  // fp: frame pointer
1594  // Callee-saved register r4 still holds argc.
1595  __ LeaveExitFrame(save_doubles_, r4, true);
1596  __ mov(pc, lr);
1597 
1598  // check if we should retry or throw exception
1599  Label retry;
1600  __ bind(&failure_returned);
1602  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
1603  __ b(eq, &retry);
1604 
1605  // Retrieve the pending exception.
1606  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1607  isolate)));
1608  __ ldr(r0, MemOperand(ip));
1609 
1610  // Clear the pending exception.
1611  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
1612  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1613  isolate)));
1614  __ str(r3, MemOperand(ip));
1615 
1616  // Special handling of termination exceptions which are uncatchable
1617  // by javascript code.
1618  __ LoadRoot(r3, Heap::kTerminationExceptionRootIndex);
1619  __ cmp(r0, r3);
1620  __ b(eq, throw_termination_exception);
1621 
1622  // Handle normal exception.
1623  __ jmp(throw_normal_exception);
1624 
1625  __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
1626 }
1627 
1628 
1629 void CEntryStub::Generate(MacroAssembler* masm) {
1630  // Called from JavaScript; parameters are on stack as if calling JS function
1631  // r0: number of arguments including receiver
1632  // r1: pointer to builtin function
1633  // fp: frame pointer (restored after C call)
1634  // sp: stack pointer (restored as callee's sp after C call)
1635  // cp: current context (C callee-saved)
1636 
1638 
1639  // Result returned in r0 or r0+r1 by default.
1640 
1641  // NOTE: Invocations of builtins may return failure objects
1642  // instead of a proper result. The builtin entry handles
1643  // this by performing a garbage collection and retrying the
1644  // builtin once.
1645 
1646  // Compute the argv pointer in a callee-saved register.
1647  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
1648  __ sub(r6, r6, Operand(kPointerSize));
1649 
1650  // Enter the exit frame that transitions from JavaScript to C++.
1651  FrameAndConstantPoolScope scope(masm, StackFrame::MANUAL);
1652  __ EnterExitFrame(save_doubles_);
1653 
1654  // Set up argc and the builtin function in callee-saved registers.
1655  __ mov(r4, Operand(r0));
1656  __ mov(r5, Operand(r1));
1657 
1658  // r4: number of arguments (C callee-saved)
1659  // r5: pointer to builtin function (C callee-saved)
1660  // r6: pointer to first argument (C callee-saved)
1661 
1662  Label throw_normal_exception;
1663  Label throw_termination_exception;
1664 
1665  // Call into the runtime system.
1666  GenerateCore(masm,
1667  &throw_normal_exception,
1668  &throw_termination_exception,
1669  false,
1670  false);
1671 
1672  // Do space-specific GC and retry runtime call.
1673  GenerateCore(masm,
1674  &throw_normal_exception,
1675  &throw_termination_exception,
1676  true,
1677  false);
1678 
1679  // Do full GC and retry runtime call one final time.
1680  Failure* failure = Failure::InternalError();
1681  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
1682  GenerateCore(masm,
1683  &throw_normal_exception,
1684  &throw_termination_exception,
1685  true,
1686  true);
1687 
1688  { FrameScope scope(masm, StackFrame::MANUAL);
1689  __ PrepareCallCFunction(0, r0);
1690  __ CallCFunction(
1691  ExternalReference::out_of_memory_function(masm->isolate()), 0, 0);
1692  }
1693 
1694  __ bind(&throw_termination_exception);
1695  __ ThrowUncatchable(r0);
1696 
1697  __ bind(&throw_normal_exception);
1698  __ Throw(r0);
1699 }
1700 
1701 
1702 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1703  // r0: code entry
1704  // r1: function
1705  // r2: receiver
1706  // r3: argc
1707  // [sp+0]: argv
1708 
1709  Label invoke, handler_entry, exit;
1710 
1712 
1713  // Called from C, so do not pop argc and args on exit (preserve sp)
1714  // No need to save register-passed args
1715  // Save callee-saved registers (incl. cp and fp), sp, and lr
1716  __ stm(db_w, sp, kCalleeSaved | lr.bit());
1717 
1718  // Save callee-saved vfp registers.
1720  // Set up the reserved register for 0.0.
1721  __ vmov(kDoubleRegZero, 0.0);
1722  __ VFPEnsureFPSCRState(r4);
1723 
1724  // Get address of argv, see stm above.
1725  // r0: code entry
1726  // r1: function
1727  // r2: receiver
1728  // r3: argc
1729 
1730  // Set up argv in r4.
1731  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1732  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1733  __ ldr(r4, MemOperand(sp, offset_to_argv));
1734 
1735  // Push a frame with special values setup to mark it as an entry frame.
1736  // r0: code entry
1737  // r1: function
1738  // r2: receiver
1739  // r3: argc
1740  // r4: argv
1741  Isolate* isolate = masm->isolate();
1742  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1743  if (FLAG_enable_ool_constant_pool) {
1744  __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array()));
1745  }
1746  __ mov(r7, Operand(Smi::FromInt(marker)));
1747  __ mov(r6, Operand(Smi::FromInt(marker)));
1748  __ mov(r5,
1749  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1750  __ ldr(r5, MemOperand(r5));
1751  __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1752  __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1753  (FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
1754  ip.bit());
1755 
1756  // Set up frame pointer for the frame to be pushed.
1757  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1758 
1759  // If this is the outermost JS call, set js_entry_sp value.
1760  Label non_outermost_js;
1761  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1762  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1763  __ ldr(r6, MemOperand(r5));
1764  __ cmp(r6, Operand::Zero());
1765  __ b(ne, &non_outermost_js);
1766  __ str(fp, MemOperand(r5));
1767  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1768  Label cont;
1769  __ b(&cont);
1770  __ bind(&non_outermost_js);
1771  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1772  __ bind(&cont);
1773  __ push(ip);
1774 
1775  // Jump to a faked try block that does the invoke, with a faked catch
1776  // block that sets the pending exception.
1777  __ jmp(&invoke);
1778 
1779  // Block literal pool emission whilst taking the position of the handler
1780  // entry. This avoids making the assumption that literal pools are always
1781  // emitted after an instruction is emitted, rather than before.
1782  {
1783  Assembler::BlockConstPoolScope block_const_pool(masm);
1784  __ bind(&handler_entry);
1785  handler_offset_ = handler_entry.pos();
1786  // Caught exception: Store result (exception) in the pending exception
1787  // field in the JSEnv and return a failure sentinel. Coming in here the
1788  // fp will be invalid because the PushTryHandler below sets it to 0 to
1789  // signal the existence of the JSEntry frame.
1790  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1791  isolate)));
1792  }
1793  __ str(r0, MemOperand(ip));
1794  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
1795  __ b(&exit);
1796 
1797  // Invoke: Link this frame into the handler chain. There's only one
1798  // handler block in this code object, so its index is 0.
1799  __ bind(&invoke);
1800  // Must preserve r0-r4, r5-r6 are available.
1801  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1802  // If an exception not caught by another handler occurs, this handler
1803  // returns control to the code after the bl(&invoke) above, which
1804  // restores all kCalleeSaved registers (including cp and fp) to their
1805  // saved values before returning a failure to C.
1806 
1807  // Clear any pending exceptions.
1808  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
1809  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1810  isolate)));
1811  __ str(r5, MemOperand(ip));
1812 
1813  // Invoke the function by calling through JS entry trampoline builtin.
1814  // Notice that we cannot store a reference to the trampoline code directly in
1815  // this stub, because runtime stubs are not traversed when doing GC.
1816 
1817  // Expected registers by Builtins::JSEntryTrampoline
1818  // r0: code entry
1819  // r1: function
1820  // r2: receiver
1821  // r3: argc
1822  // r4: argv
1823  if (is_construct) {
1824  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1825  isolate);
1826  __ mov(ip, Operand(construct_entry));
1827  } else {
1828  ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
1829  __ mov(ip, Operand(entry));
1830  }
1831  __ ldr(ip, MemOperand(ip)); // deref address
1832  __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1833 
1834  // Branch and link to JSEntryTrampoline.
1835  __ Call(ip);
1836 
1837  // Unlink this frame from the handler chain.
1838  __ PopTryHandler();
1839 
1840  __ bind(&exit); // r0 holds result
1841  // Check if the current stack frame is marked as the outermost JS frame.
1842  Label non_outermost_js_2;
1843  __ pop(r5);
1844  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1845  __ b(ne, &non_outermost_js_2);
1846  __ mov(r6, Operand::Zero());
1847  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1848  __ str(r6, MemOperand(r5));
1849  __ bind(&non_outermost_js_2);
1850 
1851  // Restore the top frame descriptors from the stack.
1852  __ pop(r3);
1853  __ mov(ip,
1854  Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1855  __ str(r3, MemOperand(ip));
1856 
1857  // Reset the stack to the callee saved registers.
1858  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1859 
1860  // Restore callee-saved registers and return.
1861 #ifdef DEBUG
1862  if (FLAG_debug_code) {
1863  __ mov(lr, Operand(pc));
1864  }
1865 #endif
1866 
1867  // Restore callee-saved vfp registers.
1869 
1870  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1871 }
1872 
1873 
1874 // Uses registers r0 to r4.
1875 // Expected input (depending on whether args are in registers or on the stack):
1876 // * object: r0 or at sp + 1 * kPointerSize.
1877 // * function: r1 or at sp.
1878 //
1879 // An inlined call site may have been generated before calling this stub.
1880 // In this case the offset to the inline site to patch is passed in r5.
1881 // (See LCodeGen::DoInstanceOfKnownGlobal)
1882 void InstanceofStub::Generate(MacroAssembler* masm) {
1883  // Call site inlining and patching implies arguments in registers.
1884  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1885  // ReturnTrueFalse is only implemented for inlined call sites.
1886  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
1887 
1888  // Fixed register usage throughout the stub:
1889  const Register object = r0; // Object (lhs).
1890  Register map = r3; // Map of the object.
1891  const Register function = r1; // Function (rhs).
1892  const Register prototype = r4; // Prototype of the function.
1893  const Register inline_site = r9;
1894  const Register scratch = r2;
1895 
1896  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
1897 
1898  Label slow, loop, is_instance, is_not_instance, not_js_object;
1899 
1900  if (!HasArgsInRegisters()) {
1901  __ ldr(object, MemOperand(sp, 1 * kPointerSize));
1902  __ ldr(function, MemOperand(sp, 0));
1903  }
1904 
1905  // Check that the left hand is a JS object and load map.
1906  __ JumpIfSmi(object, &not_js_object);
1907  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
1908 
1909  // If there is a call site cache don't look in the global cache, but do the
1910  // real lookup and update the call site cache.
1911  if (!HasCallSiteInlineCheck()) {
1912  Label miss;
1913  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1914  __ b(ne, &miss);
1915  __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1916  __ b(ne, &miss);
1917  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1918  __ Ret(HasArgsInRegisters() ? 0 : 2);
1919 
1920  __ bind(&miss);
1921  }
1922 
1923  // Get the prototype of the function.
1924  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1925 
1926  // Check that the function prototype is a JS object.
1927  __ JumpIfSmi(prototype, &slow);
1928  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1929 
1930  // Update the global instanceof or call site inlined cache with the current
1931  // map and function. The cached answer will be set when it is known below.
1932  if (!HasCallSiteInlineCheck()) {
1933  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1934  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1935  } else {
1936  ASSERT(HasArgsInRegisters());
1937  // Patch the (relocated) inlined map check.
1938 
1939  // The offset was stored in r5
1940  // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1941  const Register offset = r5;
1942  __ sub(inline_site, lr, offset);
1943  // Get the map location in r5 and patch it.
1944  __ GetRelocatedValueLocation(inline_site, offset);
1945  __ ldr(offset, MemOperand(offset));
1946  __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
1947  }
1948 
1949  // Register mapping: r3 is object map and r4 is function prototype.
1950  // Get prototype of object into r2.
1951  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1952 
1953  // We don't need map any more. Use it as a scratch register.
1954  Register scratch2 = map;
1955  map = no_reg;
1956 
1957  // Loop through the prototype chain looking for the function prototype.
1958  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
1959  __ bind(&loop);
1960  __ cmp(scratch, Operand(prototype));
1961  __ b(eq, &is_instance);
1962  __ cmp(scratch, scratch2);
1963  __ b(eq, &is_not_instance);
1964  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1965  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1966  __ jmp(&loop);
1967 
1968  __ bind(&is_instance);
1969  if (!HasCallSiteInlineCheck()) {
1970  __ mov(r0, Operand(Smi::FromInt(0)));
1971  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1972  } else {
1973  // Patch the call site to return true.
1974  __ LoadRoot(r0, Heap::kTrueValueRootIndex);
1975  __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1976  // Get the boolean result location in scratch and patch it.
1977  __ GetRelocatedValueLocation(inline_site, scratch);
1978  __ str(r0, MemOperand(scratch));
1979 
1980  if (!ReturnTrueFalseObject()) {
1981  __ mov(r0, Operand(Smi::FromInt(0)));
1982  }
1983  }
1984  __ Ret(HasArgsInRegisters() ? 0 : 2);
1985 
1986  __ bind(&is_not_instance);
1987  if (!HasCallSiteInlineCheck()) {
1988  __ mov(r0, Operand(Smi::FromInt(1)));
1989  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1990  } else {
1991  // Patch the call site to return false.
1992  __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1993  __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1994  // Get the boolean result location in scratch and patch it.
1995  __ GetRelocatedValueLocation(inline_site, scratch);
1996  __ str(r0, MemOperand(scratch));
1997 
1998  if (!ReturnTrueFalseObject()) {
1999  __ mov(r0, Operand(Smi::FromInt(1)));
2000  }
2001  }
2002  __ Ret(HasArgsInRegisters() ? 0 : 2);
2003 
2004  Label object_not_null, object_not_null_or_smi;
2005  __ bind(&not_js_object);
2006  // Before null, smi and string value checks, check that the rhs is a function
2007  // as for a non-function rhs an exception needs to be thrown.
2008  __ JumpIfSmi(function, &slow);
2009  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
2010  __ b(ne, &slow);
2011 
2012  // Null is not instance of anything.
2013  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
2014  __ b(ne, &object_not_null);
2015  __ mov(r0, Operand(Smi::FromInt(1)));
2016  __ Ret(HasArgsInRegisters() ? 0 : 2);
2017 
2018  __ bind(&object_not_null);
2019  // Smi values are not instances of anything.
2020  __ JumpIfNotSmi(object, &object_not_null_or_smi);
2021  __ mov(r0, Operand(Smi::FromInt(1)));
2022  __ Ret(HasArgsInRegisters() ? 0 : 2);
2023 
2024  __ bind(&object_not_null_or_smi);
2025  // String values are not instances of anything.
2026  __ IsObjectJSStringType(object, scratch, &slow);
2027  __ mov(r0, Operand(Smi::FromInt(1)));
2028  __ Ret(HasArgsInRegisters() ? 0 : 2);
2029 
2030  // Slow-case. Tail call builtin.
2031  __ bind(&slow);
2032  if (!ReturnTrueFalseObject()) {
2033  if (HasArgsInRegisters()) {
2034  __ Push(r0, r1);
2035  }
2036  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2037  } else {
2038  {
2039  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2040  __ Push(r0, r1);
2041  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2042  }
2043  __ cmp(r0, Operand::Zero());
2044  __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
2045  __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
2046  __ Ret(HasArgsInRegisters() ? 0 : 2);
2047  }
2048 }
2049 
2050 
2051 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2052  Label miss;
2053  Register receiver;
2054  if (kind() == Code::KEYED_LOAD_IC) {
2055  // ----------- S t a t e -------------
2056  // -- lr : return address
2057  // -- r0 : key
2058  // -- r1 : receiver
2059  // -----------------------------------
2060  __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
2061  __ b(ne, &miss);
2062  receiver = r1;
2063  } else {
2064  ASSERT(kind() == Code::LOAD_IC);
2065  // ----------- S t a t e -------------
2066  // -- r2 : name
2067  // -- lr : return address
2068  // -- r0 : receiver
2069  // -- sp[0] : receiver
2070  // -----------------------------------
2071  receiver = r0;
2072  }
2073 
2074  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
2075  __ bind(&miss);
2076  StubCompiler::TailCallBuiltin(
2078 }
2079 
2080 
2081 Register InstanceofStub::left() { return r0; }
2082 
2083 
2084 Register InstanceofStub::right() { return r1; }
2085 
2086 
2087 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2088  // The displacement is the offset of the last parameter (if any)
2089  // relative to the frame pointer.
2090  const int kDisplacement =
2092 
2093  // Check that the key is a smi.
2094  Label slow;
2095  __ JumpIfNotSmi(r1, &slow);
2096 
2097  // Check if the calling frame is an arguments adaptor frame.
2098  Label adaptor;
2102  __ b(eq, &adaptor);
2103 
2104  // Check index against formal parameters count limit passed in
2105  // through register r0. Use unsigned comparison to get negative
2106  // check for free.
2107  __ cmp(r1, r0);
2108  __ b(hs, &slow);
2109 
2110  // Read the argument from the stack and return it.
2111  __ sub(r3, r0, r1);
2112  __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
2113  __ ldr(r0, MemOperand(r3, kDisplacement));
2114  __ Jump(lr);
2115 
2116  // Arguments adaptor case: Check index against actual arguments
2117  // limit found in the arguments adaptor frame. Use unsigned
2118  // comparison to get negative check for free.
2119  __ bind(&adaptor);
2121  __ cmp(r1, r0);
2122  __ b(cs, &slow);
2123 
2124  // Read the argument from the adaptor frame and return it.
2125  __ sub(r3, r0, r1);
2126  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
2127  __ ldr(r0, MemOperand(r3, kDisplacement));
2128  __ Jump(lr);
2129 
2130  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2131  // by calling the runtime system.
2132  __ bind(&slow);
2133  __ push(r1);
2134  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2135 }
2136 
2137 
2138 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2139  // sp[0] : number of parameters
2140  // sp[4] : receiver displacement
2141  // sp[8] : function
2142 
2143  // Check if the calling frame is an arguments adaptor frame.
2144  Label runtime;
2148  __ b(ne, &runtime);
2149 
2150  // Patch the arguments.length and the parameters pointer in the current frame.
2152  __ str(r2, MemOperand(sp, 0 * kPointerSize));
2153  __ add(r3, r3, Operand(r2, LSL, 1));
2155  __ str(r3, MemOperand(sp, 1 * kPointerSize));
2156 
2157  __ bind(&runtime);
2158  __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2159 }
2160 
2161 
2162 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2163  // Stack layout:
2164  // sp[0] : number of parameters (tagged)
2165  // sp[4] : address of receiver argument
2166  // sp[8] : function
2167  // Registers used over whole function:
2168  // r6 : allocated object (tagged)
2169  // r9 : mapped parameter count (tagged)
2170 
2171  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2172  // r1 = parameter count (tagged)
2173 
2174  // Check if the calling frame is an arguments adaptor frame.
2175  Label runtime;
2176  Label adaptor_frame, try_allocate;
2180  __ b(eq, &adaptor_frame);
2181 
2182  // No adaptor, parameter count = argument count.
2183  __ mov(r2, r1);
2184  __ b(&try_allocate);
2185 
2186  // We have an adaptor frame. Patch the parameters pointer.
2187  __ bind(&adaptor_frame);
2189  __ add(r3, r3, Operand(r2, LSL, 1));
2191  __ str(r3, MemOperand(sp, 1 * kPointerSize));
2192 
2193  // r1 = parameter count (tagged)
2194  // r2 = argument count (tagged)
2195  // Compute the mapped parameter count = min(r1, r2) in r1.
2196  __ cmp(r1, Operand(r2));
2197  __ mov(r1, Operand(r2), LeaveCC, gt);
2198 
2199  __ bind(&try_allocate);
2200 
2201  // Compute the sizes of backing store, parameter map, and arguments object.
2202  // 1. Parameter map, has 2 extra words containing context and backing store.
2203  const int kParameterMapHeaderSize =
2205  // If there are no mapped parameters, we do not need the parameter_map.
2206  __ cmp(r1, Operand(Smi::FromInt(0)));
2207  __ mov(r9, Operand::Zero(), LeaveCC, eq);
2208  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
2209  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
2210 
2211  // 2. Backing store.
2212  __ add(r9, r9, Operand(r2, LSL, 1));
2213  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
2214 
2215  // 3. Arguments object.
2216  __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
2217 
2218  // Do the allocation of all three objects in one go.
2219  __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
2220 
2221  // r0 = address of new object(s) (tagged)
2222  // r2 = argument count (tagged)
2223  // Get the arguments boilerplate from the current native context into r4.
2224  const int kNormalOffset =
2226  const int kAliasedOffset =
2228 
2231  __ cmp(r1, Operand::Zero());
2232  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
2233  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
2234 
2235  // r0 = address of new object (tagged)
2236  // r1 = mapped parameter count (tagged)
2237  // r2 = argument count (tagged)
2238  // r4 = address of boilerplate object (tagged)
2239  // Copy the JS object part.
2240  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2241  __ ldr(r3, FieldMemOperand(r4, i));
2242  __ str(r3, FieldMemOperand(r0, i));
2243  }
2244 
2245  // Set up the callee in-object property.
2247  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
2248  const int kCalleeOffset = JSObject::kHeaderSize +
2250  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
2251 
2252  // Use the length (smi tagged) and set that as an in-object property too.
2254  const int kLengthOffset = JSObject::kHeaderSize +
2256  __ str(r2, FieldMemOperand(r0, kLengthOffset));
2257 
2258  // Set up the elements pointer in the allocated arguments object.
2259  // If we allocated a parameter map, r4 will point there, otherwise
2260  // it will point to the backing store.
2261  __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
2263 
2264  // r0 = address of new object (tagged)
2265  // r1 = mapped parameter count (tagged)
2266  // r2 = argument count (tagged)
2267  // r4 = address of parameter map or backing store (tagged)
2268  // Initialize parameter map. If there are no mapped arguments, we're done.
2269  Label skip_parameter_map;
2270  __ cmp(r1, Operand(Smi::FromInt(0)));
2271  // Move backing store address to r3, because it is
2272  // expected there when filling in the unmapped arguments.
2273  __ mov(r3, r4, LeaveCC, eq);
2274  __ b(eq, &skip_parameter_map);
2275 
2276  __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
2278  __ add(r6, r1, Operand(Smi::FromInt(2)));
2280  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
2281  __ add(r6, r4, Operand(r1, LSL, 1));
2282  __ add(r6, r6, Operand(kParameterMapHeaderSize));
2283  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
2284 
2285  // Copy the parameter slots and the holes in the arguments.
2286  // We need to fill in mapped_parameter_count slots. They index the context,
2287  // where parameters are stored in reverse order, at
2288  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2289  // The mapped parameter thus need to get indices
2290  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2291  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2292  // We loop from right to left.
2293  Label parameters_loop, parameters_test;
2294  __ mov(r6, r1);
2295  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
2296  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2297  __ sub(r9, r9, Operand(r1));
2298  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2299  __ add(r3, r4, Operand(r6, LSL, 1));
2300  __ add(r3, r3, Operand(kParameterMapHeaderSize));
2301 
2302  // r6 = loop variable (tagged)
2303  // r1 = mapping index (tagged)
2304  // r3 = address of backing store (tagged)
2305  // r4 = address of parameter map (tagged), which is also the address of new
2306  // object + Heap::kSloppyArgumentsObjectSize (tagged)
2307  // r0 = temporary scratch (a.o., for address calculation)
2308  // r5 = the hole value
2309  __ jmp(&parameters_test);
2310 
2311  __ bind(&parameters_loop);
2312  __ sub(r6, r6, Operand(Smi::FromInt(1)));
2313  __ mov(r0, Operand(r6, LSL, 1));
2314  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2315  __ str(r9, MemOperand(r4, r0));
2316  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2317  __ str(r5, MemOperand(r3, r0));
2318  __ add(r9, r9, Operand(Smi::FromInt(1)));
2319  __ bind(&parameters_test);
2320  __ cmp(r6, Operand(Smi::FromInt(0)));
2321  __ b(ne, &parameters_loop);
2322 
2323  // Restore r0 = new object (tagged)
2324  __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
2325 
2326  __ bind(&skip_parameter_map);
2327  // r0 = address of new object (tagged)
2328  // r2 = argument count (tagged)
2329  // r3 = address of backing store (tagged)
2330  // r5 = scratch
2331  // Copy arguments header and remaining slots (if there are any).
2332  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
2335 
2336  Label arguments_loop, arguments_test;
2337  __ mov(r9, r1);
2338  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
2339  __ sub(r4, r4, Operand(r9, LSL, 1));
2340  __ jmp(&arguments_test);
2341 
2342  __ bind(&arguments_loop);
2343  __ sub(r4, r4, Operand(kPointerSize));
2344  __ ldr(r6, MemOperand(r4, 0));
2345  __ add(r5, r3, Operand(r9, LSL, 1));
2347  __ add(r9, r9, Operand(Smi::FromInt(1)));
2348 
2349  __ bind(&arguments_test);
2350  __ cmp(r9, Operand(r2));
2351  __ b(lt, &arguments_loop);
2352 
2353  // Return and remove the on-stack parameters.
2354  __ add(sp, sp, Operand(3 * kPointerSize));
2355  __ Ret();
2356 
2357  // Do the runtime call to allocate the arguments object.
2358  // r0 = address of new object (tagged)
2359  // r2 = argument count (tagged)
2360  __ bind(&runtime);
2361  __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
2362  __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2363 }
2364 
2365 
2366 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2367  // sp[0] : number of parameters
2368  // sp[4] : receiver displacement
2369  // sp[8] : function
2370  // Check if the calling frame is an arguments adaptor frame.
2371  Label adaptor_frame, try_allocate, runtime;
2375  __ b(eq, &adaptor_frame);
2376 
2377  // Get the length from the frame.
2378  __ ldr(r1, MemOperand(sp, 0));
2379  __ b(&try_allocate);
2380 
2381  // Patch the arguments.length and the parameters pointer.
2382  __ bind(&adaptor_frame);
2384  __ str(r1, MemOperand(sp, 0));
2385  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
2387  __ str(r3, MemOperand(sp, 1 * kPointerSize));
2388 
2389  // Try the new space allocation. Start out with computing the size
2390  // of the arguments object and the elements array in words.
2391  Label add_arguments_object;
2392  __ bind(&try_allocate);
2393  __ SmiUntag(r1, SetCC);
2394  __ b(eq, &add_arguments_object);
2395  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
2396  __ bind(&add_arguments_object);
2397  __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
2398 
2399  // Do the allocation of both objects in one go.
2400  __ Allocate(r1, r0, r2, r3, &runtime,
2401  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2402 
2403  // Get the arguments boilerplate from the current native context.
2408 
2409  // Copy the JS object part.
2410  __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
2411 
2412  // Get the length (smi tagged) and set that as an in-object property too.
2414  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2415  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
2416  Heap::kArgumentsLengthIndex * kPointerSize));
2417 
2418  // If there are no actual arguments, we're done.
2419  Label done;
2420  __ cmp(r1, Operand::Zero());
2421  __ b(eq, &done);
2422 
2423  // Get the parameters pointer from the stack.
2424  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
2425 
2426  // Set up the elements pointer in the allocated arguments object and
2427  // initialize the header in the elements fixed array.
2428  __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
2430  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
2433  __ SmiUntag(r1);
2434 
2435  // Copy the fixed array slots.
2436  Label loop;
2437  // Set up r4 to point to the first array slot.
2438  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2439  __ bind(&loop);
2440  // Pre-decrement r2 with kPointerSize on each iteration.
2441  // Pre-decrement in order to skip receiver.
2442  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
2443  // Post-increment r4 with kPointerSize on each iteration.
2444  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
2445  __ sub(r1, r1, Operand(1));
2446  __ cmp(r1, Operand::Zero());
2447  __ b(ne, &loop);
2448 
2449  // Return and remove the on-stack parameters.
2450  __ bind(&done);
2451  __ add(sp, sp, Operand(3 * kPointerSize));
2452  __ Ret();
2453 
2454  // Do the runtime call to allocate the arguments object.
2455  __ bind(&runtime);
2456  __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
2457 }
2458 
2459 
2460 void RegExpExecStub::Generate(MacroAssembler* masm) {
2461  // Just jump directly to runtime if native RegExp is not selected at compile
2462  // time or if regexp entry in generated code is turned off runtime switch or
2463  // at compilation.
2464 #ifdef V8_INTERPRETED_REGEXP
2465  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2466 #else // V8_INTERPRETED_REGEXP
2467 
2468  // Stack frame on entry.
2469  // sp[0]: last_match_info (expected JSArray)
2470  // sp[4]: previous index
2471  // sp[8]: subject string
2472  // sp[12]: JSRegExp object
2473 
2474  const int kLastMatchInfoOffset = 0 * kPointerSize;
2475  const int kPreviousIndexOffset = 1 * kPointerSize;
2476  const int kSubjectOffset = 2 * kPointerSize;
2477  const int kJSRegExpOffset = 3 * kPointerSize;
2478 
2479  Label runtime;
2480  // Allocation of registers for this function. These are in callee save
2481  // registers and will be preserved by the call to the native RegExp code, as
2482  // this code is called using the normal C calling convention. When calling
2483  // directly from generated code the native RegExp code will not do a GC and
2484  // therefore the content of these registers are safe to use after the call.
2485  Register subject = r4;
2486  Register regexp_data = r5;
2487  Register last_match_info_elements = no_reg; // will be r6;
2488 
2489  // Ensure that a RegExp stack is allocated.
2490  Isolate* isolate = masm->isolate();
2491  ExternalReference address_of_regexp_stack_memory_address =
2492  ExternalReference::address_of_regexp_stack_memory_address(isolate);
2493  ExternalReference address_of_regexp_stack_memory_size =
2494  ExternalReference::address_of_regexp_stack_memory_size(isolate);
2495  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
2496  __ ldr(r0, MemOperand(r0, 0));
2497  __ cmp(r0, Operand::Zero());
2498  __ b(eq, &runtime);
2499 
2500  // Check that the first argument is a JSRegExp object.
2501  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
2502  __ JumpIfSmi(r0, &runtime);
2503  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
2504  __ b(ne, &runtime);
2505 
2506  // Check that the RegExp has been compiled (data contains a fixed array).
2507  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
2508  if (FLAG_debug_code) {
2509  __ SmiTst(regexp_data);
2510  __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2511  __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
2512  __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2513  }
2514 
2515  // regexp_data: RegExp data (FixedArray)
2516  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2517  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2518  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2519  __ b(ne, &runtime);
2520 
2521  // regexp_data: RegExp data (FixedArray)
2522  // Check that the number of captures fit in the static offsets vector buffer.
2523  __ ldr(r2,
2525  // Check (number_of_captures + 1) * 2 <= offsets vector size
2526  // Or number_of_captures * 2 <= offsets vector size - 2
2527  // Multiplying by 2 comes for free since r2 is smi-tagged.
2528  STATIC_ASSERT(kSmiTag == 0);
2531  __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2532  __ b(hi, &runtime);
2533 
2534  // Reset offset for possibly sliced string.
2535  __ mov(r9, Operand::Zero());
2536  __ ldr(subject, MemOperand(sp, kSubjectOffset));
2537  __ JumpIfSmi(subject, &runtime);
2538  __ mov(r3, subject); // Make a copy of the original subject string.
2539  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2541  // subject: subject string
2542  // r3: subject string
2543  // r0: subject string instance type
2544  // regexp_data: RegExp data (FixedArray)
2545  // Handle subject string according to its encoding and representation:
2546  // (1) Sequential string? If yes, go to (5).
2547  // (2) Anything but sequential or cons? If yes, go to (6).
2548  // (3) Cons string. If the string is flat, replace subject with first string.
2549  // Otherwise bailout.
2550  // (4) Is subject external? If yes, go to (7).
2551  // (5) Sequential string. Load regexp code according to encoding.
2552  // (E) Carry on.
2554 
2555  // Deferred code at the end of the stub:
2556  // (6) Not a long external string? If yes, go to (8).
2557  // (7) External string. Make it, offset-wise, look like a sequential string.
2558  // Go to (5).
2559  // (8) Short external string or not a string? If yes, bail out to runtime.
2560  // (9) Sliced string. Replace subject with parent. Go to (4).
2561 
2562  Label seq_string /* 5 */, external_string /* 7 */,
2563  check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2564  not_long_external /* 8 */;
2565 
2566  // (1) Sequential string? If yes, go to (5).
2567  __ and_(r1,
2568  r0,
2569  Operand(kIsNotStringMask |
2572  SetCC);
2574  __ b(eq, &seq_string); // Go to (5).
2575 
2576  // (2) Anything but sequential or cons? If yes, go to (6).
2581  __ cmp(r1, Operand(kExternalStringTag));
2582  __ b(ge, &not_seq_nor_cons); // Go to (6).
2583 
2584  // (3) Cons string. Check that it's flat.
2585  // Replace subject with first string and reload instance type.
2587  __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2588  __ b(ne, &runtime);
2589  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2590 
2591  // (4) Is subject external? If yes, go to (7).
2592  __ bind(&check_underlying);
2593  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2596  __ tst(r0, Operand(kStringRepresentationMask));
2597  // The underlying external string is never a short external string.
2600  __ b(ne, &external_string); // Go to (7).
2601 
2602  // (5) Sequential string. Load regexp code according to encoding.
2603  __ bind(&seq_string);
2604  // subject: sequential subject string (or look-alike, external string)
2605  // r3: original subject string
2606  // Load previous index and check range before r3 is overwritten. We have to
2607  // use r3 instead of subject here because subject might have been only made
2608  // to look like a sequential string when it actually is an external string.
2609  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2610  __ JumpIfNotSmi(r1, &runtime);
2612  __ cmp(r3, Operand(r1));
2613  __ b(ls, &runtime);
2614  __ SmiUntag(r1);
2615 
2618  __ and_(r0, r0, Operand(kStringEncodingMask));
2619  __ mov(r3, Operand(r0, ASR, 2), SetCC);
2620  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
2621  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2622 
2623  // (E) Carry on. String handling is done.
2624  // r6: irregexp code
2625  // Check that the irregexp code has been generated for the actual string
2626  // encoding. If it has, the field contains a code object otherwise it contains
2627  // a smi (code flushing support).
2628  __ JumpIfSmi(r6, &runtime);
2629 
2630  // r1: previous index
2631  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
2632  // r6: code
2633  // subject: Subject string
2634  // regexp_data: RegExp data (FixedArray)
2635  // All checks done. Now push arguments for native regexp code.
2636  __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
2637 
2638  // Isolates: note we add an additional parameter here (isolate pointer).
2639  const int kRegExpExecuteArguments = 9;
2640  const int kParameterRegisters = 4;
2641  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2642 
2643  // Stack pointer now points to cell where return address is to be written.
2644  // Arguments are before that on the stack or in registers.
2645 
2646  // Argument 9 (sp[20]): Pass current isolate address.
2647  __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
2648  __ str(r0, MemOperand(sp, 5 * kPointerSize));
2649 
2650  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2651  __ mov(r0, Operand(1));
2652  __ str(r0, MemOperand(sp, 4 * kPointerSize));
2653 
2654  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2655  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2656  __ ldr(r0, MemOperand(r0, 0));
2657  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2658  __ ldr(r2, MemOperand(r2, 0));
2659  __ add(r0, r0, Operand(r2));
2660  __ str(r0, MemOperand(sp, 3 * kPointerSize));
2661 
2662  // Argument 6: Set the number of capture registers to zero to force global
2663  // regexps to behave as non-global. This does not affect non-global regexps.
2664  __ mov(r0, Operand::Zero());
2665  __ str(r0, MemOperand(sp, 2 * kPointerSize));
2666 
2667  // Argument 5 (sp[4]): static offsets vector buffer.
2668  __ mov(r0,
2669  Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
2670  __ str(r0, MemOperand(sp, 1 * kPointerSize));
2671 
2672  // For arguments 4 and 3 get string length, calculate start of string data and
2673  // calculate the shift of the index (0 for ASCII and 1 for two byte).
2674  __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2675  __ eor(r3, r3, Operand(1));
2676  // Load the length from the original subject string from the previous stack
2677  // frame. Therefore we have to use fp, which points exactly to two pointer
2678  // sizes below the previous sp. (Because creating a new stack frame pushes
2679  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2680  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2681  // If slice offset is not 0, load the length from the original sliced string.
2682  // Argument 4, r3: End of string data
2683  // Argument 3, r2: Start of string data
2684  // Prepare start and end index of the input.
2685  __ add(r9, r7, Operand(r9, LSL, r3));
2686  __ add(r2, r9, Operand(r1, LSL, r3));
2687 
2688  __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2689  __ SmiUntag(r7);
2690  __ add(r3, r9, Operand(r7, LSL, r3));
2691 
2692  // Argument 2 (r1): Previous index.
2693  // Already there
2694 
2695  // Argument 1 (r0): Subject string.
2696  __ mov(r0, subject);
2697 
2698  // Locate the code entry and call it.
2699  __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2700  DirectCEntryStub stub;
2701  stub.GenerateCall(masm, r6);
2702 
2703  __ LeaveExitFrame(false, no_reg, true);
2704 
2705  last_match_info_elements = r6;
2706 
2707  // r0: result
2708  // subject: subject string (callee saved)
2709  // regexp_data: RegExp data (callee saved)
2710  // last_match_info_elements: Last match info elements (callee saved)
2711  // Check the result.
2712  Label success;
2713  __ cmp(r0, Operand(1));
2714  // We expect exactly one result since we force the called regexp to behave
2715  // as non-global.
2716  __ b(eq, &success);
2717  Label failure;
2718  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2719  __ b(eq, &failure);
2721  // If not exception it can only be retry. Handle that in the runtime system.
2722  __ b(ne, &runtime);
2723  // Result must now be exception. If there is no pending exception already a
2724  // stack overflow (on the backtrack stack) was detected in RegExp code but
2725  // haven't created the exception yet. Handle that in the runtime system.
2726  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2727  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
2728  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2729  isolate)));
2730  __ ldr(r0, MemOperand(r2, 0));
2731  __ cmp(r0, r1);
2732  __ b(eq, &runtime);
2733 
2734  __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
2735 
2736  // Check if the exception is a termination. If so, throw as uncatchable.
2737  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
2738 
2739  Label termination_exception;
2740  __ b(eq, &termination_exception);
2741 
2742  __ Throw(r0);
2743 
2744  __ bind(&termination_exception);
2745  __ ThrowUncatchable(r0);
2746 
2747  __ bind(&failure);
2748  // For failure and exception return null.
2749  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
2750  __ add(sp, sp, Operand(4 * kPointerSize));
2751  __ Ret();
2752 
2753  // Process the result from the native regexp code.
2754  __ bind(&success);
2755  __ ldr(r1,
2757  // Calculate number of capture registers (number_of_captures + 1) * 2.
2758  // Multiplying by 2 comes for free since r1 is smi-tagged.
2759  STATIC_ASSERT(kSmiTag == 0);
2761  __ add(r1, r1, Operand(2)); // r1 was a smi.
2762 
2763  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2764  __ JumpIfSmi(r0, &runtime);
2765  __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2766  __ b(ne, &runtime);
2767  // Check that the JSArray is in fast case.
2768  __ ldr(last_match_info_elements,
2770  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2771  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2772  __ b(ne, &runtime);
2773  // Check that the last match info has space for the capture registers and the
2774  // additional information.
2775  __ ldr(r0,
2776  FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2777  __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2778  __ cmp(r2, Operand::SmiUntag(r0));
2779  __ b(gt, &runtime);
2780 
2781  // r1: number of capture registers
2782  // r4: subject string
2783  // Store the capture count.
2784  __ SmiTag(r2, r1);
2785  __ str(r2, FieldMemOperand(last_match_info_elements,
2787  // Store last subject and last input.
2788  __ str(subject,
2789  FieldMemOperand(last_match_info_elements,
2791  __ mov(r2, subject);
2792  __ RecordWriteField(last_match_info_elements,
2794  subject,
2795  r3,
2797  kDontSaveFPRegs);
2798  __ mov(subject, r2);
2799  __ str(subject,
2800  FieldMemOperand(last_match_info_elements,
2802  __ RecordWriteField(last_match_info_elements,
2804  subject,
2805  r3,
2807  kDontSaveFPRegs);
2808 
2809  // Get the static offsets vector filled by the native regexp code.
2810  ExternalReference address_of_static_offsets_vector =
2811  ExternalReference::address_of_static_offsets_vector(isolate);
2812  __ mov(r2, Operand(address_of_static_offsets_vector));
2813 
2814  // r1: number of capture registers
2815  // r2: offsets vector
2816  Label next_capture, done;
2817  // Capture register counter starts from number of capture registers and
2818  // counts down until wraping after zero.
2819  __ add(r0,
2820  last_match_info_elements,
2822  __ bind(&next_capture);
2823  __ sub(r1, r1, Operand(1), SetCC);
2824  __ b(mi, &done);
2825  // Read the value from the static offsets vector buffer.
2826  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2827  // Store the smi value in the last match info.
2828  __ SmiTag(r3);
2829  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2830  __ jmp(&next_capture);
2831  __ bind(&done);
2832 
2833  // Return last match info.
2834  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2835  __ add(sp, sp, Operand(4 * kPointerSize));
2836  __ Ret();
2837 
2838  // Do the runtime call to execute the regexp.
2839  __ bind(&runtime);
2840  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2841 
2842  // Deferred code for string handling.
2843  // (6) Not a long external string? If yes, go to (8).
2844  __ bind(&not_seq_nor_cons);
2845  // Compare flags are still set.
2846  __ b(gt, &not_long_external); // Go to (8).
2847 
2848  // (7) External string. Make it, offset-wise, look like a sequential string.
2849  __ bind(&external_string);
2850  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2852  if (FLAG_debug_code) {
2853  // Assert that we do not have a cons or slice (indirect strings) here.
2854  // Sequential strings have already been ruled out.
2855  __ tst(r0, Operand(kIsIndirectStringMask));
2856  __ Assert(eq, kExternalStringExpectedButNotFound);
2857  }
2858  __ ldr(subject,
2860  // Move the pointer so that offset-wise, it looks like a sequential string.
2862  __ sub(subject,
2863  subject,
2865  __ jmp(&seq_string); // Go to (5).
2866 
2867  // (8) Short external string or not a string? If yes, bail out to runtime.
2868  __ bind(&not_long_external);
2871  __ b(ne, &runtime);
2872 
2873  // (9) Sliced string. Replace subject with parent. Go to (4).
2874  // Load offset into r9 and replace subject string with parent.
2876  __ SmiUntag(r9);
2877  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2878  __ jmp(&check_underlying); // Go to (4).
2879 #endif // V8_INTERPRETED_REGEXP
2880 }
2881 
2882 
2883 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2884  // Cache the called function in a feedback vector slot. Cache states
2885  // are uninitialized, monomorphic (indicated by a JSFunction), and
2886  // megamorphic.
2887  // r0 : number of arguments to the construct function
2888  // r1 : the function to call
2889  // r2 : Feedback vector
2890  // r3 : slot in feedback vector (Smi)
2891  Label initialize, done, miss, megamorphic, not_array_function;
2892 
2894  masm->isolate()->heap()->megamorphic_symbol());
2896  masm->isolate()->heap()->uninitialized_symbol());
2897 
2898  // Load the cache state into r4.
2899  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2901 
2902  // A monomorphic cache hit or an already megamorphic state: invoke the
2903  // function without changing the state.
2904  __ cmp(r4, r1);
2905  __ b(eq, &done);
2906 
2907  if (!FLAG_pretenuring_call_new) {
2908  // If we came here, we need to see if we are the array function.
2909  // If we didn't have a matching function, and we didn't find the megamorph
2910  // sentinel, then we have in the slot either some other function or an
2911  // AllocationSite. Do a map check on the object in ecx.
2912  __ ldr(r5, FieldMemOperand(r4, 0));
2913  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2914  __ b(ne, &miss);
2915 
2916  // Make sure the function is the Array() function
2917  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2918  __ cmp(r1, r4);
2919  __ b(ne, &megamorphic);
2920  __ jmp(&done);
2921  }
2922 
2923  __ bind(&miss);
2924 
2925  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2926  // megamorphic.
2927  __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
2928  __ b(eq, &initialize);
2929  // MegamorphicSentinel is an immortal immovable object (undefined) so no
2930  // write-barrier is needed.
2931  __ bind(&megamorphic);
2932  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2933  __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
2935  __ jmp(&done);
2936 
2937  // An uninitialized cache is patched with the function
2938  __ bind(&initialize);
2939 
2940  if (!FLAG_pretenuring_call_new) {
2941  // Make sure the function is the Array() function
2942  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
2943  __ cmp(r1, r4);
2944  __ b(ne, &not_array_function);
2945 
2946  // The target function is the Array constructor,
2947  // Create an AllocationSite if we don't already have it, store it in the
2948  // slot.
2949  {
2950  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2951 
2952  // Arguments register must be smi-tagged to call out.
2953  __ SmiTag(r0);
2954  __ Push(r3, r2, r1, r0);
2955 
2956  CreateAllocationSiteStub create_stub;
2957  __ CallStub(&create_stub);
2958 
2959  __ Pop(r3, r2, r1, r0);
2960  __ SmiUntag(r0);
2961  }
2962  __ b(&done);
2963 
2964  __ bind(&not_array_function);
2965  }
2966 
2967  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2968  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2969  __ str(r1, MemOperand(r4, 0));
2970 
2971  __ Push(r4, r2, r1);
2972  __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
2974  __ Pop(r4, r2, r1);
2975 
2976  __ bind(&done);
2977 }
2978 
2979 
2980 void CallFunctionStub::Generate(MacroAssembler* masm) {
2981  // r1 : the function to call
2982  // r2 : feedback vector
2983  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
2984  // vector (Smi)
2985  Label slow, non_function, wrap, cont;
2986 
2987  if (NeedsChecks()) {
2988  // Check that the function is really a JavaScript function.
2989  // r1: pushed function (to be verified)
2990  __ JumpIfSmi(r1, &non_function);
2991 
2992  // Goto slow case if we do not have a function.
2993  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2994  __ b(ne, &slow);
2995 
2996  if (RecordCallTarget()) {
2997  GenerateRecordCallTarget(masm);
2998  // Type information was updated. Because we may call Array, which
2999  // expects either undefined or an AllocationSite in ebx we need
3000  // to set ebx to undefined.
3001  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3002  }
3003  }
3004 
3005  // Fast-case: Invoke the function now.
3006  // r1: pushed function
3007  ParameterCount actual(argc_);
3008 
3009  if (CallAsMethod()) {
3010  if (NeedsChecks()) {
3011  // Do not transform the receiver for strict mode functions.
3014  __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
3015  kSmiTagSize)));
3016  __ b(ne, &cont);
3017 
3018  // Do not transform the receiver for native (Compilerhints already in r3).
3019  __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3020  __ b(ne, &cont);
3021  }
3022 
3023  // Compute the receiver in sloppy mode.
3024  __ ldr(r3, MemOperand(sp, argc_ * kPointerSize));
3025 
3026  if (NeedsChecks()) {
3027  __ JumpIfSmi(r3, &wrap);
3028  __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
3029  __ b(lt, &wrap);
3030  } else {
3031  __ jmp(&wrap);
3032  }
3033 
3034  __ bind(&cont);
3035  }
3036  __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
3037 
3038  if (NeedsChecks()) {
3039  // Slow-case: Non-function called.
3040  __ bind(&slow);
3041  if (RecordCallTarget()) {
3042  // If there is a call target cache, mark it megamorphic in the
3043  // non-function case. MegamorphicSentinel is an immortal immovable
3044  // object (megamorphic symbol) so no write barrier is needed.
3046  masm->isolate()->heap()->megamorphic_symbol());
3047  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
3048  __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
3050  }
3051  // Check for function proxy.
3052  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
3053  __ b(ne, &non_function);
3054  __ push(r1); // put proxy as additional argument
3055  __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
3056  __ mov(r2, Operand::Zero());
3057  __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
3058  {
3059  Handle<Code> adaptor =
3060  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3061  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3062  }
3063 
3064  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3065  // of the original receiver from the call site).
3066  __ bind(&non_function);
3067  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
3068  __ mov(r0, Operand(argc_)); // Set up the number of arguments.
3069  __ mov(r2, Operand::Zero());
3070  __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
3071  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3072  RelocInfo::CODE_TARGET);
3073  }
3074 
3075  if (CallAsMethod()) {
3076  __ bind(&wrap);
3077  // Wrap the receiver and patch it back onto the stack.
3078  { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
3079  __ Push(r1, r3);
3080  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3081  __ pop(r1);
3082  }
3083  __ str(r0, MemOperand(sp, argc_ * kPointerSize));
3084  __ jmp(&cont);
3085  }
3086 }
3087 
3088 
3089 void CallConstructStub::Generate(MacroAssembler* masm) {
3090  // r0 : number of arguments
3091  // r1 : the function to call
3092  // r2 : feedback vector
3093  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
3094  // vector (Smi)
3095  Label slow, non_function_call;
3096 
3097  // Check that the function is not a smi.
3098  __ JumpIfSmi(r1, &non_function_call);
3099  // Check that the function is a JSFunction.
3100  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
3101  __ b(ne, &slow);
3102 
3103  if (RecordCallTarget()) {
3104  GenerateRecordCallTarget(masm);
3105 
3106  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
3107  if (FLAG_pretenuring_call_new) {
3108  // Put the AllocationSite from the feedback vector into r2.
3109  // By adding kPointerSize we encode that we know the AllocationSite
3110  // entry is at the feedback vector slot given by r3 + 1.
3111  __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
3112  } else {
3113  Label feedback_register_initialized;
3114  // Put the AllocationSite from the feedback vector into r2, or undefined.
3117  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
3118  __ b(eq, &feedback_register_initialized);
3119  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3120  __ bind(&feedback_register_initialized);
3121  }
3122 
3123  __ AssertUndefinedOrAllocationSite(r2, r5);
3124  }
3125 
3126  // Jump to the function-specific construct stub.
3127  Register jmp_reg = r4;
3129  __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
3131  __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3132 
3133  // r0: number of arguments
3134  // r1: called object
3135  // r4: object type
3136  Label do_call;
3137  __ bind(&slow);
3138  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
3139  __ b(ne, &non_function_call);
3140  __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3141  __ jmp(&do_call);
3142 
3143  __ bind(&non_function_call);
3144  __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3145  __ bind(&do_call);
3146  // Set expected number of arguments to zero (not changing r0).
3147  __ mov(r2, Operand::Zero());
3148  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3149  RelocInfo::CODE_TARGET);
3150 }
3151 
3152 
3153 // StringCharCodeAtGenerator
3154 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3155  Label flat_string;
3156  Label ascii_string;
3157  Label got_char_code;
3158  Label sliced_string;
3159 
3160  // If the receiver is a smi trigger the non-string case.
3161  __ JumpIfSmi(object_, receiver_not_string_);
3162 
3163  // Fetch the instance type of the receiver into result register.
3164  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3165  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3166  // If the receiver is not a string trigger the non-string case.
3167  __ tst(result_, Operand(kIsNotStringMask));
3168  __ b(ne, receiver_not_string_);
3169 
3170  // If the index is non-smi trigger the non-smi case.
3171  __ JumpIfNotSmi(index_, &index_not_smi_);
3172  __ bind(&got_smi_index_);
3173 
3174  // Check for index out of range.
3175  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
3176  __ cmp(ip, Operand(index_));
3177  __ b(ls, index_out_of_range_);
3178 
3179  __ SmiUntag(index_);
3180 
3182  object_,
3183  index_,
3184  result_,
3185  &call_runtime_);
3186 
3187  __ SmiTag(result_);
3188  __ bind(&exit_);
3189 }
3190 
3191 
3193  MacroAssembler* masm,
3194  const RuntimeCallHelper& call_helper) {
3195  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3196 
3197  // Index is not a smi.
3198  __ bind(&index_not_smi_);
3199  // If index is a heap number, try converting it to an integer.
3200  __ CheckMap(index_,
3201  result_,
3202  Heap::kHeapNumberMapRootIndex,
3203  index_not_number_,
3205  call_helper.BeforeCall(masm);
3206  __ push(object_);
3207  __ push(index_); // Consumed by runtime conversion function.
3208  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3209  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3210  } else {
3211  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3212  // NumberToSmi discards numbers that are not exact integers.
3213  __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
3214  }
3215  // Save the conversion result before the pop instructions below
3216  // have a chance to overwrite it.
3217  __ Move(index_, r0);
3218  __ pop(object_);
3219  // Reload the instance type.
3220  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3221  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3222  call_helper.AfterCall(masm);
3223  // If index is still not a smi, it must be out of range.
3224  __ JumpIfNotSmi(index_, index_out_of_range_);
3225  // Otherwise, return to the fast path.
3226  __ jmp(&got_smi_index_);
3227 
3228  // Call runtime. We get here when the receiver is a string and the
3229  // index is a number, but the code of getting the actual character
3230  // is too complex (e.g., when the string needs to be flattened).
3231  __ bind(&call_runtime_);
3232  call_helper.BeforeCall(masm);
3233  __ SmiTag(index_);
3234  __ Push(object_, index_);
3235  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
3236  __ Move(result_, r0);
3237  call_helper.AfterCall(masm);
3238  __ jmp(&exit_);
3239 
3240  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3241 }
3242 
3243 
3244 // -------------------------------------------------------------------------
3245 // StringCharFromCodeGenerator
3246 
3247 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3248  // Fast case of Heap::LookupSingleCharacterStringFromCode.
3249  STATIC_ASSERT(kSmiTag == 0);
3252  __ tst(code_,
3253  Operand(kSmiTagMask |
3255  __ b(ne, &slow_case_);
3256 
3257  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3258  // At this point code register contains smi tagged ASCII char code.
3259  __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
3260  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3261  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3262  __ b(eq, &slow_case_);
3263  __ bind(&exit_);
3264 }
3265 
3266 
3268  MacroAssembler* masm,
3269  const RuntimeCallHelper& call_helper) {
3270  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3271 
3272  __ bind(&slow_case_);
3273  call_helper.BeforeCall(masm);
3274  __ push(code_);
3275  __ CallRuntime(Runtime::kCharFromCode, 1);
3276  __ Move(result_, r0);
3277  call_helper.AfterCall(masm);
3278  __ jmp(&exit_);
3279 
3280  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3281 }
3282 
3283 
3284 enum CopyCharactersFlags {
3285  COPY_ASCII = 1,
3286  DEST_ALWAYS_ALIGNED = 2
3287 };
3288 
3289 
3290 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3291  Register dest,
3292  Register src,
3293  Register count,
3294  Register scratch1,
3295  Register scratch2,
3296  Register scratch3,
3297  Register scratch4,
3298  int flags) {
3299  bool ascii = (flags & COPY_ASCII) != 0;
3300  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3301 
3302  if (dest_always_aligned && FLAG_debug_code) {
3303  // Check that destination is actually word aligned if the flag says
3304  // that it is.
3305  __ tst(dest, Operand(kPointerAlignmentMask));
3306  __ Check(eq, kDestinationOfCopyNotAligned);
3307  }
3308 
3309  const int kReadAlignment = 4;
3310  const int kReadAlignmentMask = kReadAlignment - 1;
3311  // Ensure that reading an entire aligned word containing the last character
3312  // of a string will not read outside the allocated area (because we pad up
3313  // to kObjectAlignment).
3314  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3315  // Assumes word reads and writes are little endian.
3316  // Nothing to do for zero characters.
3317  Label done;
3318  if (!ascii) {
3319  __ add(count, count, Operand(count), SetCC);
3320  } else {
3321  __ cmp(count, Operand::Zero());
3322  }
3323  __ b(eq, &done);
3324 
3325  // Assume that you cannot read (or write) unaligned.
3326  Label byte_loop;
3327  // Must copy at least eight bytes, otherwise just do it one byte at a time.
3328  __ cmp(count, Operand(8));
3329  __ add(count, dest, Operand(count));
3330  Register limit = count; // Read until src equals this.
3331  __ b(lt, &byte_loop);
3332 
3333  if (!dest_always_aligned) {
3334  // Align dest by byte copying. Copies between zero and three bytes.
3335  __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
3336  Label dest_aligned;
3337  __ b(eq, &dest_aligned);
3338  __ cmp(scratch4, Operand(2));
3339  __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
3340  __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
3341  __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
3342  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3343  __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
3344  __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
3345  __ bind(&dest_aligned);
3346  }
3347 
3348  Label simple_loop;
3349 
3350  __ sub(scratch4, dest, Operand(src));
3351  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
3352  __ b(eq, &simple_loop);
3353  // Shift register is number of bits in a source word that
3354  // must be combined with bits in the next source word in order
3355  // to create a destination word.
3356 
3357  // Complex loop for src/dst that are not aligned the same way.
3358  {
3359  Label loop;
3360  __ mov(scratch4, Operand(scratch4, LSL, 3));
3361  Register left_shift = scratch4;
3362  __ and_(src, src, Operand(~3)); // Round down to load previous word.
3363  __ ldr(scratch1, MemOperand(src, 4, PostIndex));
3364  // Store the "shift" most significant bits of scratch in the least
3365  // signficant bits (i.e., shift down by (32-shift)).
3366  __ rsb(scratch2, left_shift, Operand(32));
3367  Register right_shift = scratch2;
3368  __ mov(scratch1, Operand(scratch1, LSR, right_shift));
3369 
3370  __ bind(&loop);
3371  __ ldr(scratch3, MemOperand(src, 4, PostIndex));
3372  __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
3373  __ str(scratch1, MemOperand(dest, 4, PostIndex));
3374  __ mov(scratch1, Operand(scratch3, LSR, right_shift));
3375  // Loop if four or more bytes left to copy.
3376  __ sub(scratch3, limit, Operand(dest));
3377  __ sub(scratch3, scratch3, Operand(4), SetCC);
3378  __ b(ge, &loop);
3379  }
3380  // There is now between zero and three bytes left to copy (negative that
3381  // number is in scratch3), and between one and three bytes already read into
3382  // scratch1 (eight times that number in scratch4). We may have read past
3383  // the end of the string, but because objects are aligned, we have not read
3384  // past the end of the object.
3385  // Find the minimum of remaining characters to move and preloaded characters
3386  // and write those as bytes.
3387  __ add(scratch3, scratch3, Operand(4), SetCC);
3388  __ b(eq, &done);
3389  __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
3390  // Move minimum of bytes read and bytes left to copy to scratch4.
3391  __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
3392  // Between one and three (value in scratch3) characters already read into
3393  // scratch ready to write.
3394  __ cmp(scratch3, Operand(2));
3395  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3396  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
3397  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
3398  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
3399  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
3400  // Copy any remaining bytes.
3401  __ b(&byte_loop);
3402 
3403  // Simple loop.
3404  // Copy words from src to dst, until less than four bytes left.
3405  // Both src and dest are word aligned.
3406  __ bind(&simple_loop);
3407  {
3408  Label loop;
3409  __ bind(&loop);
3410  __ ldr(scratch1, MemOperand(src, 4, PostIndex));
3411  __ sub(scratch3, limit, Operand(dest));
3412  __ str(scratch1, MemOperand(dest, 4, PostIndex));
3413  // Compare to 8, not 4, because we do the substraction before increasing
3414  // dest.
3415  __ cmp(scratch3, Operand(8));
3416  __ b(ge, &loop);
3417  }
3418 
3419  // Copy bytes from src to dst until dst hits limit.
3420  __ bind(&byte_loop);
3421  __ cmp(dest, Operand(limit));
3422  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
3423  __ b(ge, &done);
3424  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3425  __ b(&byte_loop);
3426 
3427  __ bind(&done);
3428 }
3429 
3430 
3431 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3432  Register hash,
3433  Register character) {
3434  // hash = character + (character << 10);
3435  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3436  // Untag smi seed and add the character.
3437  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
3438  // hash += hash << 10;
3439  __ add(hash, hash, Operand(hash, LSL, 10));
3440  // hash ^= hash >> 6;
3441  __ eor(hash, hash, Operand(hash, LSR, 6));
3442 }
3443 
3444 
3445 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3446  Register hash,
3447  Register character) {
3448  // hash += character;
3449  __ add(hash, hash, Operand(character));
3450  // hash += hash << 10;
3451  __ add(hash, hash, Operand(hash, LSL, 10));
3452  // hash ^= hash >> 6;
3453  __ eor(hash, hash, Operand(hash, LSR, 6));
3454 }
3455 
3456 
3457 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3458  Register hash) {
3459  // hash += hash << 3;
3460  __ add(hash, hash, Operand(hash, LSL, 3));
3461  // hash ^= hash >> 11;
3462  __ eor(hash, hash, Operand(hash, LSR, 11));
3463  // hash += hash << 15;
3464  __ add(hash, hash, Operand(hash, LSL, 15));
3465 
3466  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
3467 
3468  // if (hash == 0) hash = 27;
3469  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
3470 }
3471 
3472 
3473 void SubStringStub::Generate(MacroAssembler* masm) {
3474  Label runtime;
3475 
3476  // Stack frame on entry.
3477  // lr: return address
3478  // sp[0]: to
3479  // sp[4]: from
3480  // sp[8]: string
3481 
3482  // This stub is called from the native-call %_SubString(...), so
3483  // nothing can be assumed about the arguments. It is tested that:
3484  // "string" is a sequential string,
3485  // both "from" and "to" are smis, and
3486  // 0 <= from <= to <= string.length.
3487  // If any of these assumptions fail, we call the runtime system.
3488 
3489  const int kToOffset = 0 * kPointerSize;
3490  const int kFromOffset = 1 * kPointerSize;
3491  const int kStringOffset = 2 * kPointerSize;
3492 
3493  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
3494  STATIC_ASSERT(kFromOffset == kToOffset + 4);
3495  STATIC_ASSERT(kSmiTag == 0);
3497 
3498  // Arithmetic shift right by one un-smi-tags. In this case we rotate right
3499  // instead because we bail out on non-smi values: ROR and ASR are equivalent
3500  // for smis but they set the flags in a way that's easier to optimize.
3501  __ mov(r2, Operand(r2, ROR, 1), SetCC);
3502  __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
3503  // If either to or from had the smi tag bit set, then C is set now, and N
3504  // has the same value: we rotated by 1, so the bottom bit is now the top bit.
3505  // We want to bailout to runtime here if From is negative. In that case, the
3506  // next instruction is not executed and we fall through to bailing out to
3507  // runtime.
3508  // Executed if both r2 and r3 are untagged integers.
3509  __ sub(r2, r2, Operand(r3), SetCC, cc);
3510  // One of the above un-smis or the above SUB could have set N==1.
3511  __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
3512 
3513  // Make sure first argument is a string.
3514  __ ldr(r0, MemOperand(sp, kStringOffset));
3515  // Do a JumpIfSmi, but fold its jump into the subsequent string test.
3516  __ SmiTst(r0);
3517  Condition is_string = masm->IsObjectStringType(r0, r1, ne);
3518  ASSERT(is_string == eq);
3519  __ b(NegateCondition(is_string), &runtime);
3520 
3521  Label single_char;
3522  __ cmp(r2, Operand(1));
3523  __ b(eq, &single_char);
3524 
3525  // Short-cut for the case of trivial substring.
3526  Label return_r0;
3527  // r0: original string
3528  // r2: result string length
3530  __ cmp(r2, Operand(r4, ASR, 1));
3531  // Return original string.
3532  __ b(eq, &return_r0);
3533  // Longer than original string's length or negative: unsafe arguments.
3534  __ b(hi, &runtime);
3535  // Shorter than original string's length: an actual substring.
3536 
3537  // Deal with different string types: update the index if necessary
3538  // and put the underlying string into r5.
3539  // r0: original string
3540  // r1: instance type
3541  // r2: length
3542  // r3: from index (untagged)
3543  Label underlying_unpacked, sliced_string, seq_or_external_string;
3544  // If the string is not indirect, it can only be sequential or external.
3547  __ tst(r1, Operand(kIsIndirectStringMask));
3548  __ b(eq, &seq_or_external_string);
3549 
3550  __ tst(r1, Operand(kSlicedNotConsMask));
3551  __ b(ne, &sliced_string);
3552  // Cons string. Check whether it is flat, then fetch first part.
3554  __ CompareRoot(r5, Heap::kempty_stringRootIndex);
3555  __ b(ne, &runtime);
3557  // Update instance type.
3560  __ jmp(&underlying_unpacked);
3561 
3562  __ bind(&sliced_string);
3563  // Sliced string. Fetch parent and correct start index by offset.
3566  __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
3567  // Update instance type.
3570  __ jmp(&underlying_unpacked);
3571 
3572  __ bind(&seq_or_external_string);
3573  // Sequential or external string. Just move string to the expected register.
3574  __ mov(r5, r0);
3575 
3576  __ bind(&underlying_unpacked);
3577 
3578  if (FLAG_string_slices) {
3579  Label copy_routine;
3580  // r5: underlying subject string
3581  // r1: instance type of underlying subject string
3582  // r2: length
3583  // r3: adjusted start index (untagged)
3584  __ cmp(r2, Operand(SlicedString::kMinLength));
3585  // Short slice. Copy instead of slicing.
3586  __ b(lt, &copy_routine);
3587  // Allocate new sliced string. At this point we do not reload the instance
3588  // type including the string encoding because we simply rely on the info
3589  // provided by the original string. It does not matter if the original
3590  // string's encoding is wrong because we always have to recheck encoding of
3591  // the newly created string's parent anyways due to externalized strings.
3592  Label two_byte_slice, set_slice_header;
3595  __ tst(r1, Operand(kStringEncodingMask));
3596  __ b(eq, &two_byte_slice);
3597  __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
3598  __ jmp(&set_slice_header);
3599  __ bind(&two_byte_slice);
3600  __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
3601  __ bind(&set_slice_header);
3602  __ mov(r3, Operand(r3, LSL, 1));
3605  __ jmp(&return_r0);
3606 
3607  __ bind(&copy_routine);
3608  }
3609 
3610  // r5: underlying subject string
3611  // r1: instance type of underlying subject string
3612  // r2: length
3613  // r3: adjusted start index (untagged)
3614  Label two_byte_sequential, sequential_string, allocate_result;
3617  __ tst(r1, Operand(kExternalStringTag));
3618  __ b(eq, &sequential_string);
3619 
3620  // Handle external string.
3621  // Rule out short external strings.
3623  __ tst(r1, Operand(kShortExternalStringTag));
3624  __ b(ne, &runtime);
3626  // r5 already points to the first character of underlying string.
3627  __ jmp(&allocate_result);
3628 
3629  __ bind(&sequential_string);
3630  // Locate first character of underlying subject string.
3633 
3634  __ bind(&allocate_result);
3635  // Sequential acii string. Allocate the result.
3637  __ tst(r1, Operand(kStringEncodingMask));
3638  __ b(eq, &two_byte_sequential);
3639 
3640  // Allocate and copy the resulting ASCII string.
3641  __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
3642 
3643  // Locate first character of substring to copy.
3644  __ add(r5, r5, r3);
3645  // Locate first character of result.
3647 
3648  // r0: result string
3649  // r1: first character of result string
3650  // r2: result string length
3651  // r5: first character of substring to copy
3654  COPY_ASCII | DEST_ALWAYS_ALIGNED);
3655  __ jmp(&return_r0);
3656 
3657  // Allocate and copy the resulting two-byte string.
3658  __ bind(&two_byte_sequential);
3659  __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
3660 
3661  // Locate first character of substring to copy.
3662  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
3663  __ add(r5, r5, Operand(r3, LSL, 1));
3664  // Locate first character of result.
3666 
3667  // r0: result string.
3668  // r1: first character of result.
3669  // r2: result length.
3670  // r5: first character of substring to copy.
3673  masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
3674 
3675  __ bind(&return_r0);
3676  Counters* counters = masm->isolate()->counters();
3677  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
3678  __ Drop(3);
3679  __ Ret();
3680 
3681  // Just jump to runtime to create the sub string.
3682  __ bind(&runtime);
3683  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
3684 
3685  __ bind(&single_char);
3686  // r0: original string
3687  // r1: instance type
3688  // r2: length
3689  // r3: from index (untagged)
3690  __ SmiTag(r3, r3);
3691  StringCharAtGenerator generator(
3692  r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
3693  generator.GenerateFast(masm);
3694  __ Drop(3);
3695  __ Ret();
3696  generator.SkipSlow(masm, &runtime);
3697 }
3698 
3699 
3700 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
3701  Register left,
3702  Register right,
3703  Register scratch1,
3704  Register scratch2,
3705  Register scratch3) {
3706  Register length = scratch1;
3707 
3708  // Compare lengths.
3709  Label strings_not_equal, check_zero_length;
3710  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3711  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3712  __ cmp(length, scratch2);
3713  __ b(eq, &check_zero_length);
3714  __ bind(&strings_not_equal);
3715  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3716  __ Ret();
3717 
3718  // Check if the length is zero.
3719  Label compare_chars;
3720  __ bind(&check_zero_length);
3721  STATIC_ASSERT(kSmiTag == 0);
3722  __ cmp(length, Operand::Zero());
3723  __ b(ne, &compare_chars);
3724  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3725  __ Ret();
3726 
3727  // Compare characters.
3728  __ bind(&compare_chars);
3729  GenerateAsciiCharsCompareLoop(masm,
3730  left, right, length, scratch2, scratch3,
3731  &strings_not_equal);
3732 
3733  // Characters are equal.
3734  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3735  __ Ret();
3736 }
3737 
3738 
3739 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
3740  Register left,
3741  Register right,
3742  Register scratch1,
3743  Register scratch2,
3744  Register scratch3,
3745  Register scratch4) {
3746  Label result_not_equal, compare_lengths;
3747  // Find minimum length and length difference.
3748  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3749  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3750  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3751  Register length_delta = scratch3;
3752  __ mov(scratch1, scratch2, LeaveCC, gt);
3753  Register min_length = scratch1;
3754  STATIC_ASSERT(kSmiTag == 0);
3755  __ cmp(min_length, Operand::Zero());
3756  __ b(eq, &compare_lengths);
3757 
3758  // Compare loop.
3759  GenerateAsciiCharsCompareLoop(masm,
3760  left, right, min_length, scratch2, scratch4,
3761  &result_not_equal);
3762 
3763  // Compare lengths - strings up to min-length are equal.
3764  __ bind(&compare_lengths);
3765  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3766  // Use length_delta as result if it's zero.
3767  __ mov(r0, Operand(length_delta), SetCC);
3768  __ bind(&result_not_equal);
3769  // Conditionally update the result based either on length_delta or
3770  // the last comparion performed in the loop above.
3771  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3772  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3773  __ Ret();
3774 }
3775 
3776 
3777 void StringCompareStub::GenerateAsciiCharsCompareLoop(
3778  MacroAssembler* masm,
3779  Register left,
3780  Register right,
3781  Register length,
3782  Register scratch1,
3783  Register scratch2,
3784  Label* chars_not_equal) {
3785  // Change index to run from -length to -1 by adding length to string
3786  // start. This means that loop ends when index reaches zero, which
3787  // doesn't need an additional compare.
3788  __ SmiUntag(length);
3789  __ add(scratch1, length,
3791  __ add(left, left, Operand(scratch1));
3792  __ add(right, right, Operand(scratch1));
3793  __ rsb(length, length, Operand::Zero());
3794  Register index = length; // index = -length;
3795 
3796  // Compare loop.
3797  Label loop;
3798  __ bind(&loop);
3799  __ ldrb(scratch1, MemOperand(left, index));
3800  __ ldrb(scratch2, MemOperand(right, index));
3801  __ cmp(scratch1, scratch2);
3802  __ b(ne, chars_not_equal);
3803  __ add(index, index, Operand(1), SetCC);
3804  __ b(ne, &loop);
3805 }
3806 
3807 
3808 void StringCompareStub::Generate(MacroAssembler* masm) {
3809  Label runtime;
3810 
3811  Counters* counters = masm->isolate()->counters();
3812 
3813  // Stack frame on entry.
3814  // sp[0]: right string
3815  // sp[4]: left string
3816  __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
3817 
3818  Label not_same;
3819  __ cmp(r0, r1);
3820  __ b(ne, &not_same);
3821  STATIC_ASSERT(EQUAL == 0);
3822  STATIC_ASSERT(kSmiTag == 0);
3823  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3824  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
3825  __ add(sp, sp, Operand(2 * kPointerSize));
3826  __ Ret();
3827 
3828  __ bind(&not_same);
3829 
3830  // Check that both objects are sequential ASCII strings.
3831  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
3832 
3833  // Compare flat ASCII strings natively. Remove arguments from stack first.
3834  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
3835  __ add(sp, sp, Operand(2 * kPointerSize));
3837 
3838  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3839  // tagged as a small integer.
3840  __ bind(&runtime);
3841  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
3842 }
3843 
3844 
3845 void ArrayPushStub::Generate(MacroAssembler* masm) {
3846  Register receiver = r0;
3847  Register scratch = r1;
3848 
3849  int argc = arguments_count();
3850 
3851  if (argc == 0) {
3852  // Nothing to do, just return the length.
3853  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
3854  __ Drop(argc + 1);
3855  __ Ret();
3856  return;
3857  }
3858 
3859  Isolate* isolate = masm->isolate();
3860 
3861  if (argc != 1) {
3862  __ TailCallExternalReference(
3863  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3864  return;
3865  }
3866 
3867  Label call_builtin, attempt_to_grow_elements, with_write_barrier;
3868 
3869  Register elements = r6;
3870  Register end_elements = r5;
3871  // Get the elements array of the object.
3872  __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
3873 
3874  if (IsFastSmiOrObjectElementsKind(elements_kind())) {
3875  // Check that the elements are in fast mode and writable.
3876  __ CheckMap(elements,
3877  scratch,
3878  Heap::kFixedArrayMapRootIndex,
3879  &call_builtin,
3881  }
3882 
3883  // Get the array's length into scratch and calculate new length.
3884  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
3885  __ add(scratch, scratch, Operand(Smi::FromInt(argc)));
3886 
3887  // Get the elements' length.
3889 
3890  // Check if we could survive without allocation.
3891  __ cmp(scratch, r4);
3892 
3893  const int kEndElementsOffset =
3895 
3896  if (IsFastSmiOrObjectElementsKind(elements_kind())) {
3897  __ b(gt, &attempt_to_grow_elements);
3898 
3899  // Check if value is a smi.
3900  __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
3901  __ JumpIfNotSmi(r4, &with_write_barrier);
3902 
3903  // Store the value.
3904  // We may need a register containing the address end_elements below, so
3905  // write back the value in end_elements.
3906  __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
3907  __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
3908  } else {
3909  // Check if we could survive without allocation.
3910  __ cmp(scratch, r4);
3911  __ b(gt, &call_builtin);
3912 
3913  __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
3914  __ StoreNumberToDoubleElements(r4, scratch, elements, r5, d0,
3915  &call_builtin, argc * kDoubleSize);
3916  }
3917 
3918  // Save new length.
3919  __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
3920  __ Drop(argc + 1);
3921  __ mov(r0, scratch);
3922  __ Ret();
3923 
3924  if (IsFastDoubleElementsKind(elements_kind())) {
3925  __ bind(&call_builtin);
3926  __ TailCallExternalReference(
3927  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3928  return;
3929  }
3930 
3931  __ bind(&with_write_barrier);
3932 
3933  if (IsFastSmiElementsKind(elements_kind())) {
3934  if (FLAG_trace_elements_transitions) __ jmp(&call_builtin);
3935 
3937  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3938  __ cmp(r9, ip);
3939  __ b(eq, &call_builtin);
3940 
3941  ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
3946  const int header_size = FixedArrayBase::kHeaderSize;
3947  // Verify that the object can be transitioned in place.
3948  const int origin_offset = header_size + elements_kind() * kPointerSize;
3949  __ ldr(r2, FieldMemOperand(receiver, origin_offset));
3951  __ cmp(r2, ip);
3952  __ b(ne, &call_builtin);
3953 
3954  const int target_offset = header_size + target_kind * kPointerSize;
3955  __ ldr(r3, FieldMemOperand(r3, target_offset));
3956  __ mov(r2, receiver);
3959  }
3960 
3961  // Save new length.
3962  __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
3963 
3964  // Store the value.
3965  // We may need a register containing the address end_elements below, so write
3966  // back the value in end_elements.
3967  __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
3968  __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
3969 
3970  __ RecordWrite(elements,
3971  end_elements,
3972  r4,
3976  OMIT_SMI_CHECK);
3977  __ Drop(argc + 1);
3978  __ mov(r0, scratch);
3979  __ Ret();
3980 
3981  __ bind(&attempt_to_grow_elements);
3982  // scratch: array's length + 1.
3983 
3984  if (!FLAG_inline_new) {
3985  __ bind(&call_builtin);
3986  __ TailCallExternalReference(
3987  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
3988  return;
3989  }
3990 
3991  __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
3992  // Growing elements that are SMI-only requires special handling in case the
3993  // new element is non-Smi. For now, delegate to the builtin.
3994  if (IsFastSmiElementsKind(elements_kind())) {
3995  __ JumpIfNotSmi(r2, &call_builtin);
3996  }
3997 
3998  // We could be lucky and the elements array could be at the top of new-space.
3999  // In this case we can just grow it in place by moving the allocation pointer
4000  // up.
4001  ExternalReference new_space_allocation_top =
4002  ExternalReference::new_space_allocation_top_address(isolate);
4003  ExternalReference new_space_allocation_limit =
4004  ExternalReference::new_space_allocation_limit_address(isolate);
4005 
4006  const int kAllocationDelta = 4;
4007  ASSERT(kAllocationDelta >= argc);
4008  // Load top and check if it is the end of elements.
4009  __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(scratch));
4010  __ add(end_elements, end_elements, Operand(kEndElementsOffset));
4011  __ mov(r4, Operand(new_space_allocation_top));
4012  __ ldr(r3, MemOperand(r4));
4013  __ cmp(end_elements, r3);
4014  __ b(ne, &call_builtin);
4015 
4016  __ mov(r9, Operand(new_space_allocation_limit));
4017  __ ldr(r9, MemOperand(r9));
4018  __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
4019  __ cmp(r3, r9);
4020  __ b(hi, &call_builtin);
4021 
4022  // We fit and could grow elements.
4023  // Update new_space_allocation_top.
4024  __ str(r3, MemOperand(r4));
4025  // Push the argument.
4026  __ str(r2, MemOperand(end_elements));
4027  // Fill the rest with holes.
4028  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
4029  for (int i = 1; i < kAllocationDelta; i++) {
4030  __ str(r3, MemOperand(end_elements, i * kPointerSize));
4031  }
4032 
4033  // Update elements' and array's sizes.
4034  __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
4036  __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
4038 
4039  // Elements are in new space, so write barrier is not required.
4040  __ Drop(argc + 1);
4041  __ mov(r0, scratch);
4042  __ Ret();
4043 
4044  __ bind(&call_builtin);
4045  __ TailCallExternalReference(
4046  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4047 }
4048 
4049 
4050 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4051  // ----------- S t a t e -------------
4052  // -- r1 : left
4053  // -- r0 : right
4054  // -- lr : return address
4055  // -----------------------------------
4056  Isolate* isolate = masm->isolate();
4057 
4058  // Load r2 with the allocation site. We stick an undefined dummy value here
4059  // and replace it with the real allocation site later when we instantiate this
4060  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4061  __ Move(r2, handle(isolate->heap()->undefined_value()));
4062 
4063  // Make sure that we actually patched the allocation site.
4064  if (FLAG_debug_code) {
4065  __ tst(r2, Operand(kSmiTagMask));
4066  __ Assert(ne, kExpectedAllocationSite);
4067  __ push(r2);
4069  __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
4070  __ cmp(r2, ip);
4071  __ pop(r2);
4072  __ Assert(eq, kExpectedAllocationSite);
4073  }
4074 
4075  // Tail call into the stub that handles binary operations with allocation
4076  // sites.
4077  BinaryOpWithAllocationSiteStub stub(state_);
4078  __ TailCallStub(&stub);
4079 }
4080 
4081 
4082 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4083  ASSERT(state_ == CompareIC::SMI);
4084  Label miss;
4085  __ orr(r2, r1, r0);
4086  __ JumpIfNotSmi(r2, &miss);
4087 
4088  if (GetCondition() == eq) {
4089  // For equality we do not care about the sign of the result.
4090  __ sub(r0, r0, r1, SetCC);
4091  } else {
4092  // Untag before subtracting to avoid handling overflow.
4093  __ SmiUntag(r1);
4094  __ sub(r0, r1, Operand::SmiUntag(r0));
4095  }
4096  __ Ret();
4097 
4098  __ bind(&miss);
4099  GenerateMiss(masm);
4100 }
4101 
4102 
4103 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4104  ASSERT(state_ == CompareIC::NUMBER);
4105 
4106  Label generic_stub;
4107  Label unordered, maybe_undefined1, maybe_undefined2;
4108  Label miss;
4109 
4110  if (left_ == CompareIC::SMI) {
4111  __ JumpIfNotSmi(r1, &miss);
4112  }
4113  if (right_ == CompareIC::SMI) {
4114  __ JumpIfNotSmi(r0, &miss);
4115  }
4116 
4117  // Inlining the double comparison and falling back to the general compare
4118  // stub if NaN is involved.
4119  // Load left and right operand.
4120  Label done, left, left_smi, right_smi;
4121  __ JumpIfSmi(r0, &right_smi);
4122  __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4124  __ sub(r2, r0, Operand(kHeapObjectTag));
4125  __ vldr(d1, r2, HeapNumber::kValueOffset);
4126  __ b(&left);
4127  __ bind(&right_smi);
4128  __ SmiToDouble(d1, r0);
4129 
4130  __ bind(&left);
4131  __ JumpIfSmi(r1, &left_smi);
4132  __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4134  __ sub(r2, r1, Operand(kHeapObjectTag));
4135  __ vldr(d0, r2, HeapNumber::kValueOffset);
4136  __ b(&done);
4137  __ bind(&left_smi);
4138  __ SmiToDouble(d0, r1);
4139 
4140  __ bind(&done);
4141  // Compare operands.
4142  __ VFPCompareAndSetFlags(d0, d1);
4143 
4144  // Don't base result on status bits when a NaN is involved.
4145  __ b(vs, &unordered);
4146 
4147  // Return a result of -1, 0, or 1, based on status bits.
4148  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
4149  __ mov(r0, Operand(LESS), LeaveCC, lt);
4150  __ mov(r0, Operand(GREATER), LeaveCC, gt);
4151  __ Ret();
4152 
4153  __ bind(&unordered);
4154  __ bind(&generic_stub);
4157  __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4158 
4159  __ bind(&maybe_undefined1);
4161  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
4162  __ b(ne, &miss);
4163  __ JumpIfSmi(r1, &unordered);
4164  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
4165  __ b(ne, &maybe_undefined2);
4166  __ jmp(&unordered);
4167  }
4168 
4169  __ bind(&maybe_undefined2);
4171  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
4172  __ b(eq, &unordered);
4173  }
4174 
4175  __ bind(&miss);
4176  GenerateMiss(masm);
4177 }
4178 
4179 
4180 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4182  Label miss;
4183 
4184  // Registers containing left and right operands respectively.
4185  Register left = r1;
4186  Register right = r0;
4187  Register tmp1 = r2;
4188  Register tmp2 = r3;
4189 
4190  // Check that both operands are heap objects.
4191  __ JumpIfEitherSmi(left, right, &miss);
4192 
4193  // Check that both operands are internalized strings.
4194  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4195  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4196  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4197  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4199  __ orr(tmp1, tmp1, Operand(tmp2));
4200  __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4201  __ b(ne, &miss);
4202 
4203  // Internalized strings are compared by identity.
4204  __ cmp(left, right);
4205  // Make sure r0 is non-zero. At this point input operands are
4206  // guaranteed to be non-zero.
4207  ASSERT(right.is(r0));
4208  STATIC_ASSERT(EQUAL == 0);
4209  STATIC_ASSERT(kSmiTag == 0);
4210  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4211  __ Ret();
4212 
4213  __ bind(&miss);
4214  GenerateMiss(masm);
4215 }
4216 
4217 
4218 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4219  ASSERT(state_ == CompareIC::UNIQUE_NAME);
4220  ASSERT(GetCondition() == eq);
4221  Label miss;
4222 
4223  // Registers containing left and right operands respectively.
4224  Register left = r1;
4225  Register right = r0;
4226  Register tmp1 = r2;
4227  Register tmp2 = r3;
4228 
4229  // Check that both operands are heap objects.
4230  __ JumpIfEitherSmi(left, right, &miss);
4231 
4232  // Check that both operands are unique names. This leaves the instance
4233  // types loaded in tmp1 and tmp2.
4234  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4235  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4236  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4237  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4238 
4239  __ JumpIfNotUniqueName(tmp1, &miss);
4240  __ JumpIfNotUniqueName(tmp2, &miss);
4241 
4242  // Unique names are compared by identity.
4243  __ cmp(left, right);
4244  // Make sure r0 is non-zero. At this point input operands are
4245  // guaranteed to be non-zero.
4246  ASSERT(right.is(r0));
4247  STATIC_ASSERT(EQUAL == 0);
4248  STATIC_ASSERT(kSmiTag == 0);
4249  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4250  __ Ret();
4251 
4252  __ bind(&miss);
4253  GenerateMiss(masm);
4254 }
4255 
4256 
4257 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4258  ASSERT(state_ == CompareIC::STRING);
4259  Label miss;
4260 
4261  bool equality = Token::IsEqualityOp(op_);
4262 
4263  // Registers containing left and right operands respectively.
4264  Register left = r1;
4265  Register right = r0;
4266  Register tmp1 = r2;
4267  Register tmp2 = r3;
4268  Register tmp3 = r4;
4269  Register tmp4 = r5;
4270 
4271  // Check that both operands are heap objects.
4272  __ JumpIfEitherSmi(left, right, &miss);
4273 
4274  // Check that both operands are strings. This leaves the instance
4275  // types loaded in tmp1 and tmp2.
4276  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4277  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4278  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4279  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4281  __ orr(tmp3, tmp1, tmp2);
4282  __ tst(tmp3, Operand(kIsNotStringMask));
4283  __ b(ne, &miss);
4284 
4285  // Fast check for identical strings.
4286  __ cmp(left, right);
4287  STATIC_ASSERT(EQUAL == 0);
4288  STATIC_ASSERT(kSmiTag == 0);
4289  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4290  __ Ret(eq);
4291 
4292  // Handle not identical strings.
4293 
4294  // Check that both strings are internalized strings. If they are, we're done
4295  // because we already know they are not identical. We know they are both
4296  // strings.
4297  if (equality) {
4298  ASSERT(GetCondition() == eq);
4300  __ orr(tmp3, tmp1, Operand(tmp2));
4301  __ tst(tmp3, Operand(kIsNotInternalizedMask));
4302  // Make sure r0 is non-zero. At this point input operands are
4303  // guaranteed to be non-zero.
4304  ASSERT(right.is(r0));
4305  __ Ret(eq);
4306  }
4307 
4308  // Check that both strings are sequential ASCII.
4309  Label runtime;
4310  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4311  tmp1, tmp2, tmp3, tmp4, &runtime);
4312 
4313  // Compare flat ASCII strings. Returns when done.
4314  if (equality) {
4316  masm, left, right, tmp1, tmp2, tmp3);
4317  } else {
4319  masm, left, right, tmp1, tmp2, tmp3, tmp4);
4320  }
4321 
4322  // Handle more complex cases in runtime.
4323  __ bind(&runtime);
4324  __ Push(left, right);
4325  if (equality) {
4326  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4327  } else {
4328  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
4329  }
4330 
4331  __ bind(&miss);
4332  GenerateMiss(masm);
4333 }
4334 
4335 
4336 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4337  ASSERT(state_ == CompareIC::OBJECT);
4338  Label miss;
4339  __ and_(r2, r1, Operand(r0));
4340  __ JumpIfSmi(r2, &miss);
4341 
4342  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
4343  __ b(ne, &miss);
4344  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
4345  __ b(ne, &miss);
4346 
4347  ASSERT(GetCondition() == eq);
4348  __ sub(r0, r0, Operand(r1));
4349  __ Ret();
4350 
4351  __ bind(&miss);
4352  GenerateMiss(masm);
4353 }
4354 
4355 
4356 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
4357  Label miss;
4358  __ and_(r2, r1, Operand(r0));
4359  __ JumpIfSmi(r2, &miss);
4362  __ cmp(r2, Operand(known_map_));
4363  __ b(ne, &miss);
4364  __ cmp(r3, Operand(known_map_));
4365  __ b(ne, &miss);
4366 
4367  __ sub(r0, r0, Operand(r1));
4368  __ Ret();
4369 
4370  __ bind(&miss);
4371  GenerateMiss(masm);
4372 }
4373 
4374 
4375 
4376 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4377  {
4378  // Call the runtime system in a fresh internal frame.
4379  ExternalReference miss =
4380  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
4381 
4382  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
4383  __ Push(r1, r0);
4384  __ Push(lr, r1, r0);
4385  __ mov(ip, Operand(Smi::FromInt(op_)));
4386  __ push(ip);
4387  __ CallExternalReference(miss, 3);
4388  // Compute the entry point of the rewritten stub.
4389  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
4390  // Restore registers.
4391  __ pop(lr);
4392  __ Pop(r1, r0);
4393  }
4394 
4395  __ Jump(r2);
4396 }
4397 
4398 
4399 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4400  // Place the return address on the stack, making the call
4401  // GC safe. The RegExp backend also relies on this.
4402  __ str(lr, MemOperand(sp, 0));
4403  __ blx(ip); // Call the C++ function.
4404  __ VFPEnsureFPSCRState(r2);
4405  __ ldr(pc, MemOperand(sp, 0));
4406 }
4407 
4408 
4409 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4410  Register target) {
4411  intptr_t code =
4412  reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
4413  __ Move(ip, target);
4414  __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4415  __ blx(lr); // Call the stub.
4416 }
4417 
4418 
4419 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
4420  Label* miss,
4421  Label* done,
4422  Register receiver,
4423  Register properties,
4424  Handle<Name> name,
4425  Register scratch0) {
4426  ASSERT(name->IsUniqueName());
4427  // If names of slots in range from 1 to kProbes - 1 for the hash value are
4428  // not equal to the name and kProbes-th slot is not used (its name is the
4429  // undefined value), it guarantees the hash table doesn't contain the
4430  // property. It's true even if some slots represent deleted properties
4431  // (their names are the hole value).
4432  for (int i = 0; i < kInlinedProbes; i++) {
4433  // scratch0 points to properties hash.
4434  // Compute the masked index: (hash + i + i * i) & mask.
4435  Register index = scratch0;
4436  // Capacity is smi 2^n.
4437  __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
4438  __ sub(index, index, Operand(1));
4439  __ and_(index, index, Operand(
4440  Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
4441 
4442  // Scale the index by multiplying by the entry size.
4444  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4445 
4446  Register entity_name = scratch0;
4447  // Having undefined at this place means the name is not contained.
4448  ASSERT_EQ(kSmiTagSize, 1);
4449  Register tmp = properties;
4450  __ add(tmp, properties, Operand(index, LSL, 1));
4451  __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4452 
4453  ASSERT(!tmp.is(entity_name));
4454  __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4455  __ cmp(entity_name, tmp);
4456  __ b(eq, done);
4457 
4458  // Load the hole ready for use below:
4459  __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4460 
4461  // Stop if found the property.
4462  __ cmp(entity_name, Operand(Handle<Name>(name)));
4463  __ b(eq, miss);
4464 
4465  Label good;
4466  __ cmp(entity_name, tmp);
4467  __ b(eq, &good);
4468 
4469  // Check if the entry name is not a unique name.
4470  __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4471  __ ldrb(entity_name,
4473  __ JumpIfNotUniqueName(entity_name, miss);
4474  __ bind(&good);
4475 
4476  // Restore the properties.
4477  __ ldr(properties,
4479  }
4480 
4481  const int spill_mask =
4482  (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
4483  r2.bit() | r1.bit() | r0.bit());
4484 
4485  __ stm(db_w, sp, spill_mask);
4487  __ mov(r1, Operand(Handle<Name>(name)));
4489  __ CallStub(&stub);
4490  __ cmp(r0, Operand::Zero());
4491  __ ldm(ia_w, sp, spill_mask);
4492 
4493  __ b(eq, done);
4494  __ b(ne, miss);
4495 }
4496 
4497 
4498 // Probe the name dictionary in the |elements| register. Jump to the
4499 // |done| label if a property with the given name is found. Jump to
4500 // the |miss| label otherwise.
4501 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4502 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
4503  Label* miss,
4504  Label* done,
4505  Register elements,
4506  Register name,
4507  Register scratch1,
4508  Register scratch2) {
4509  ASSERT(!elements.is(scratch1));
4510  ASSERT(!elements.is(scratch2));
4511  ASSERT(!name.is(scratch1));
4512  ASSERT(!name.is(scratch2));
4513 
4514  __ AssertName(name);
4515 
4516  // Compute the capacity mask.
4517  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
4518  __ SmiUntag(scratch1);
4519  __ sub(scratch1, scratch1, Operand(1));
4520 
4521  // Generate an unrolled loop that performs a few probes before
4522  // giving up. Measurements done on Gmail indicate that 2 probes
4523  // cover ~93% of loads from dictionaries.
4524  for (int i = 0; i < kInlinedProbes; i++) {
4525  // Compute the masked index: (hash + i + i * i) & mask.
4526  __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4527  if (i > 0) {
4528  // Add the probe offset (i + i * i) left shifted to avoid right shifting
4529  // the hash in a separate instruction. The value hash + i + i * i is right
4530  // shifted in the following and instruction.
4531  ASSERT(NameDictionary::GetProbeOffset(i) <
4532  1 << (32 - Name::kHashFieldOffset));
4533  __ add(scratch2, scratch2, Operand(
4534  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4535  }
4536  __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
4537 
4538  // Scale the index by multiplying by the element size.
4540  // scratch2 = scratch2 * 3.
4541  __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4542 
4543  // Check if the key is identical to the name.
4544  __ add(scratch2, elements, Operand(scratch2, LSL, 2));
4545  __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
4546  __ cmp(name, Operand(ip));
4547  __ b(eq, done);
4548  }
4549 
4550  const int spill_mask =
4551  (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
4552  r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
4553  ~(scratch1.bit() | scratch2.bit());
4554 
4555  __ stm(db_w, sp, spill_mask);
4556  if (name.is(r0)) {
4557  ASSERT(!elements.is(r1));
4558  __ Move(r1, name);
4559  __ Move(r0, elements);
4560  } else {
4561  __ Move(r0, elements);
4562  __ Move(r1, name);
4563  }
4565  __ CallStub(&stub);
4566  __ cmp(r0, Operand::Zero());
4567  __ mov(scratch2, Operand(r2));
4568  __ ldm(ia_w, sp, spill_mask);
4569 
4570  __ b(ne, done);
4571  __ b(eq, miss);
4572 }
4573 
4574 
4575 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4576  // This stub overrides SometimesSetsUpAFrame() to return false. That means
4577  // we cannot call anything that could cause a GC from this stub.
4578  // Registers:
4579  // result: NameDictionary to probe
4580  // r1: key
4581  // dictionary: NameDictionary to probe.
4582  // index: will hold an index of entry if lookup is successful.
4583  // might alias with result_.
4584  // Returns:
4585  // result_ is zero if lookup failed, non zero otherwise.
4586 
4587  Register result = r0;
4588  Register dictionary = r0;
4589  Register key = r1;
4590  Register index = r2;
4591  Register mask = r3;
4592  Register hash = r4;
4593  Register undefined = r5;
4594  Register entry_key = r6;
4595 
4596  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4597 
4598  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
4599  __ SmiUntag(mask);
4600  __ sub(mask, mask, Operand(1));
4601 
4602  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4603 
4604  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4605 
4606  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4607  // Compute the masked index: (hash + i + i * i) & mask.
4608  // Capacity is smi 2^n.
4609  if (i > 0) {
4610  // Add the probe offset (i + i * i) left shifted to avoid right shifting
4611  // the hash in a separate instruction. The value hash + i + i * i is right
4612  // shifted in the following and instruction.
4613  ASSERT(NameDictionary::GetProbeOffset(i) <
4614  1 << (32 - Name::kHashFieldOffset));
4615  __ add(index, hash, Operand(
4616  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4617  } else {
4618  __ mov(index, Operand(hash));
4619  }
4620  __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
4621 
4622  // Scale the index by multiplying by the entry size.
4624  __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
4625 
4626  ASSERT_EQ(kSmiTagSize, 1);
4627  __ add(index, dictionary, Operand(index, LSL, 2));
4628  __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
4629 
4630  // Having undefined at this place means the name is not contained.
4631  __ cmp(entry_key, Operand(undefined));
4632  __ b(eq, &not_in_dictionary);
4633 
4634  // Stop if found the property.
4635  __ cmp(entry_key, Operand(key));
4636  __ b(eq, &in_dictionary);
4637 
4638  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
4639  // Check if the entry name is not a unique name.
4640  __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4641  __ ldrb(entry_key,
4643  __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
4644  }
4645  }
4646 
4647  __ bind(&maybe_in_dictionary);
4648  // If we are doing negative lookup then probing failure should be
4649  // treated as a lookup success. For positive lookup probing failure
4650  // should be treated as lookup failure.
4651  if (mode_ == POSITIVE_LOOKUP) {
4652  __ mov(result, Operand::Zero());
4653  __ Ret();
4654  }
4655 
4656  __ bind(&in_dictionary);
4657  __ mov(result, Operand(1));
4658  __ Ret();
4659 
4660  __ bind(&not_in_dictionary);
4661  __ mov(result, Operand::Zero());
4662  __ Ret();
4663 }
4664 
4665 
4667  Isolate* isolate) {
4669  stub1.GetCode(isolate);
4670  // Hydrogen code stubs need stub2 at snapshot time.
4672  stub2.GetCode(isolate);
4673 }
4674 
4675 
4676 bool CodeStub::CanUseFPRegisters() {
4677  return true; // VFP2 is a base requirement for V8
4678 }
4679 
4680 
4681 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4682 // the value has just been written into the object, now this stub makes sure
4683 // we keep the GC informed. The word in the object where the value has been
4684 // written is in the address register.
4685 void RecordWriteStub::Generate(MacroAssembler* masm) {
4686  Label skip_to_incremental_noncompacting;
4687  Label skip_to_incremental_compacting;
4688 
4689  // The first two instructions are generated with labels so as to get the
4690  // offset fixed up correctly by the bind(Label*) call. We patch it back and
4691  // forth between a compare instructions (a nop in this position) and the
4692  // real branch when we start and stop incremental heap marking.
4693  // See RecordWriteStub::Patch for details.
4694  {
4695  // Block literal pool emission, as the position of these two instructions
4696  // is assumed by the patching code.
4697  Assembler::BlockConstPoolScope block_const_pool(masm);
4698  __ b(&skip_to_incremental_noncompacting);
4699  __ b(&skip_to_incremental_compacting);
4700  }
4701 
4702  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4703  __ RememberedSetHelper(object_,
4704  address_,
4705  value_,
4706  save_fp_regs_mode_,
4708  }
4709  __ Ret();
4710 
4711  __ bind(&skip_to_incremental_noncompacting);
4712  GenerateIncremental(masm, INCREMENTAL);
4713 
4714  __ bind(&skip_to_incremental_compacting);
4715  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4716 
4717  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4718  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4719  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
4720  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
4721  PatchBranchIntoNop(masm, 0);
4723 }
4724 
4725 
4726 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4727  regs_.Save(masm);
4728 
4729  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4730  Label dont_need_remembered_set;
4731 
4732  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4733  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4734  regs_.scratch0(),
4735  &dont_need_remembered_set);
4736 
4737  __ CheckPageFlag(regs_.object(),
4738  regs_.scratch0(),
4740  ne,
4741  &dont_need_remembered_set);
4742 
4743  // First notify the incremental marker if necessary, then update the
4744  // remembered set.
4745  CheckNeedsToInformIncrementalMarker(
4746  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4747  InformIncrementalMarker(masm);
4748  regs_.Restore(masm);
4749  __ RememberedSetHelper(object_,
4750  address_,
4751  value_,
4752  save_fp_regs_mode_,
4754 
4755  __ bind(&dont_need_remembered_set);
4756  }
4757 
4758  CheckNeedsToInformIncrementalMarker(
4759  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4760  InformIncrementalMarker(masm);
4761  regs_.Restore(masm);
4762  __ Ret();
4763 }
4764 
4765 
4766 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4767  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4768  int argument_count = 3;
4769  __ PrepareCallCFunction(argument_count, regs_.scratch0());
4770  Register address =
4771  r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4772  ASSERT(!address.is(regs_.object()));
4773  ASSERT(!address.is(r0));
4774  __ Move(address, regs_.address());
4775  __ Move(r0, regs_.object());
4776  __ Move(r1, address);
4777  __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
4778 
4779  AllowExternalCallThatCantCauseGC scope(masm);
4780  __ CallCFunction(
4781  ExternalReference::incremental_marking_record_write_function(
4782  masm->isolate()),
4783  argument_count);
4784  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4785 }
4786 
4787 
4788 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4789  MacroAssembler* masm,
4790  OnNoNeedToInformIncrementalMarker on_no_need,
4791  Mode mode) {
4792  Label on_black;
4793  Label need_incremental;
4794  Label need_incremental_pop_scratch;
4795 
4796  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
4797  __ ldr(regs_.scratch1(),
4798  MemOperand(regs_.scratch0(),
4800  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4801  __ str(regs_.scratch1(),
4802  MemOperand(regs_.scratch0(),
4804  __ b(mi, &need_incremental);
4805 
4806  // Let's look at the color of the object: If it is not black we don't have
4807  // to inform the incremental marker.
4808  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4809 
4810  regs_.Restore(masm);
4811  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4812  __ RememberedSetHelper(object_,
4813  address_,
4814  value_,
4815  save_fp_regs_mode_,
4817  } else {
4818  __ Ret();
4819  }
4820 
4821  __ bind(&on_black);
4822 
4823  // Get the value from the slot.
4824  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4825 
4826  if (mode == INCREMENTAL_COMPACTION) {
4827  Label ensure_not_white;
4828 
4829  __ CheckPageFlag(regs_.scratch0(), // Contains value.
4830  regs_.scratch1(), // Scratch.
4832  eq,
4833  &ensure_not_white);
4834 
4835  __ CheckPageFlag(regs_.object(),
4836  regs_.scratch1(), // Scratch.
4838  eq,
4839  &need_incremental);
4840 
4841  __ bind(&ensure_not_white);
4842  }
4843 
4844  // We need extra registers for this, so we push the object and the address
4845  // register temporarily.
4846  __ Push(regs_.object(), regs_.address());
4847  __ EnsureNotWhite(regs_.scratch0(), // The value.
4848  regs_.scratch1(), // Scratch.
4849  regs_.object(), // Scratch.
4850  regs_.address(), // Scratch.
4851  &need_incremental_pop_scratch);
4852  __ Pop(regs_.object(), regs_.address());
4853 
4854  regs_.Restore(masm);
4855  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4856  __ RememberedSetHelper(object_,
4857  address_,
4858  value_,
4859  save_fp_regs_mode_,
4861  } else {
4862  __ Ret();
4863  }
4864 
4865  __ bind(&need_incremental_pop_scratch);
4866  __ Pop(regs_.object(), regs_.address());
4867 
4868  __ bind(&need_incremental);
4869 
4870  // Fall through when we need to inform the incremental marker.
4871 }
4872 
4873 
4874 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4875  // ----------- S t a t e -------------
4876  // -- r0 : element value to store
4877  // -- r3 : element index as smi
4878  // -- sp[0] : array literal index in function as smi
4879  // -- sp[4] : array literal
4880  // clobbers r1, r2, r4
4881  // -----------------------------------
4882 
4883  Label element_done;
4884  Label double_elements;
4885  Label smi_element;
4886  Label slow_elements;
4887  Label fast_elements;
4888 
4889  // Get array literal index, array literal and its map.
4890  __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
4891  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
4893 
4894  __ CheckFastElements(r2, r5, &double_elements);
4895  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4896  __ JumpIfSmi(r0, &smi_element);
4897  __ CheckFastSmiElements(r2, r5, &fast_elements);
4898 
4899  // Store into the array literal requires a elements transition. Call into
4900  // the runtime.
4901  __ bind(&slow_elements);
4902  // call.
4903  __ Push(r1, r3, r0);
4906  __ Push(r5, r4);
4907  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4908 
4909  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4910  __ bind(&fast_elements);
4912  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4913  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4914  __ str(r0, MemOperand(r6, 0));
4915  // Update the write barrier for the array store.
4916  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
4918  __ Ret();
4919 
4920  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4921  // and value is Smi.
4922  __ bind(&smi_element);
4924  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
4926  __ Ret();
4927 
4928  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4929  __ bind(&double_elements);
4931  __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
4932  __ Ret();
4933 }
4934 
4935 
4936 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4937  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
4938  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4939  int parameter_count_offset =
4941  __ ldr(r1, MemOperand(fp, parameter_count_offset));
4942  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4943  __ add(r1, r1, Operand(1));
4944  }
4945  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4946  __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4947  __ add(sp, sp, r1);
4948  __ Ret();
4949 }
4950 
4951 
4952 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4953  if (masm->isolate()->function_entry_hook() != NULL) {
4954  PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
4955  ProfileEntryHookStub stub;
4956  __ push(lr);
4957  __ CallStub(&stub);
4958  __ pop(lr);
4959  }
4960 }
4961 
4962 
4963 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4964  // The entry hook is a "push lr" instruction, followed by a call.
4965  const int32_t kReturnAddressDistanceFromFunctionStart =
4967 
4968  // This should contain all kCallerSaved registers.
4969  const RegList kSavedRegs =
4970  1 << 0 | // r0
4971  1 << 1 | // r1
4972  1 << 2 | // r2
4973  1 << 3 | // r3
4974  1 << 5 | // r5
4975  1 << 9; // r9
4976  // We also save lr, so the count here is one higher than the mask indicates.
4977  const int32_t kNumSavedRegs = 7;
4978 
4979  ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
4980 
4981  // Save all caller-save registers as this may be called from anywhere.
4982  __ stm(db_w, sp, kSavedRegs | lr.bit());
4983 
4984  // Compute the function's address for the first argument.
4985  __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4986 
4987  // The caller's return address is above the saved temporaries.
4988  // Grab that for the second argument to the hook.
4989  __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4990 
4991  // Align the stack if necessary.
4992  int frame_alignment = masm->ActivationFrameAlignment();
4993  if (frame_alignment > kPointerSize) {
4994  __ mov(r5, sp);
4995  ASSERT(IsPowerOf2(frame_alignment));
4996  __ and_(sp, sp, Operand(-frame_alignment));
4997  }
4998 
4999 #if V8_HOST_ARCH_ARM
5000  int32_t entry_hook =
5001  reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5002  __ mov(ip, Operand(entry_hook));
5003 #else
5004  // Under the simulator we need to indirect the entry hook through a
5005  // trampoline function at a known address.
5006  // It additionally takes an isolate as a third parameter
5007  __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
5008 
5009  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5010  __ mov(ip, Operand(ExternalReference(&dispatcher,
5011  ExternalReference::BUILTIN_CALL,
5012  masm->isolate())));
5013 #endif
5014  __ Call(ip);
5015 
5016  // Restore the stack pointer if needed.
5017  if (frame_alignment > kPointerSize) {
5018  __ mov(sp, r5);
5019  }
5020 
5021  // Also pop pc to get Ret(0).
5022  __ ldm(ia_w, sp, kSavedRegs | pc.bit());
5023 }
5024 
5025 
5026 template<class T>
5027 static void CreateArrayDispatch(MacroAssembler* masm,
5029  if (mode == DISABLE_ALLOCATION_SITES) {
5030  T stub(GetInitialFastElementsKind(), mode);
5031  __ TailCallStub(&stub);
5032  } else if (mode == DONT_OVERRIDE) {
5033  int last_index = GetSequenceIndexFromFastElementsKind(
5035  for (int i = 0; i <= last_index; ++i) {
5037  __ cmp(r3, Operand(kind));
5038  T stub(kind);
5039  __ TailCallStub(&stub, eq);
5040  }
5041 
5042  // If we reached this point there is a problem.
5043  __ Abort(kUnexpectedElementsKindInArrayConstructor);
5044  } else {
5045  UNREACHABLE();
5046  }
5047 }
5048 
5049 
5050 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5052  // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5053  // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5054  // r0 - number of arguments
5055  // r1 - constructor?
5056  // sp[0] - last argument
5057  Label normal_sequence;
5058  if (mode == DONT_OVERRIDE) {
5059  ASSERT(FAST_SMI_ELEMENTS == 0);
5061  ASSERT(FAST_ELEMENTS == 2);
5065 
5066  // is the low bit set? If so, we are holey and that is good.
5067  __ tst(r3, Operand(1));
5068  __ b(ne, &normal_sequence);
5069  }
5070 
5071  // look at the first argument
5072  __ ldr(r5, MemOperand(sp, 0));
5073  __ cmp(r5, Operand::Zero());
5074  __ b(eq, &normal_sequence);
5075 
5076  if (mode == DISABLE_ALLOCATION_SITES) {
5078  ElementsKind holey_initial = GetHoleyElementsKind(initial);
5079 
5080  ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5082  __ TailCallStub(&stub_holey);
5083 
5084  __ bind(&normal_sequence);
5085  ArraySingleArgumentConstructorStub stub(initial,
5087  __ TailCallStub(&stub);
5088  } else if (mode == DONT_OVERRIDE) {
5089  // We are going to create a holey array, but our kind is non-holey.
5090  // Fix kind and retry (only if we have an allocation site in the slot).
5091  __ add(r3, r3, Operand(1));
5092 
5093  if (FLAG_debug_code) {
5094  __ ldr(r5, FieldMemOperand(r2, 0));
5095  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
5096  __ Assert(eq, kExpectedAllocationSite);
5097  }
5098 
5099  // Save the resulting elements kind in type info. We can't just store r3
5100  // in the AllocationSite::transition_info field because elements kind is
5101  // restricted to a portion of the field...upper bits need to be left alone.
5106 
5107  __ bind(&normal_sequence);
5108  int last_index = GetSequenceIndexFromFastElementsKind(
5110  for (int i = 0; i <= last_index; ++i) {
5112  __ cmp(r3, Operand(kind));
5113  ArraySingleArgumentConstructorStub stub(kind);
5114  __ TailCallStub(&stub, eq);
5115  }
5116 
5117  // If we reached this point there is a problem.
5118  __ Abort(kUnexpectedElementsKindInArrayConstructor);
5119  } else {
5120  UNREACHABLE();
5121  }
5122 }
5123 
5124 
5125 template<class T>
5126 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5127  int to_index = GetSequenceIndexFromFastElementsKind(
5129  for (int i = 0; i <= to_index; ++i) {
5131  T stub(kind);
5132  stub.GetCode(isolate);
5134  T stub1(kind, DISABLE_ALLOCATION_SITES);
5135  stub1.GetCode(isolate);
5136  }
5137  }
5138 }
5139 
5140 
5142  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5143  isolate);
5144  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5145  isolate);
5146  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5147  isolate);
5148 }
5149 
5150 
5152  Isolate* isolate) {
5154  for (int i = 0; i < 2; i++) {
5155  // For internal arrays we only need a few things
5156  InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5157  stubh1.GetCode(isolate);
5158  InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5159  stubh2.GetCode(isolate);
5160  InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5161  stubh3.GetCode(isolate);
5162  }
5163 }
5164 
5165 
5166 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5167  MacroAssembler* masm,
5169  if (argument_count_ == ANY) {
5170  Label not_zero_case, not_one_case;
5171  __ tst(r0, r0);
5172  __ b(ne, &not_zero_case);
5173  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5174 
5175  __ bind(&not_zero_case);
5176  __ cmp(r0, Operand(1));
5177  __ b(gt, &not_one_case);
5178  CreateArrayDispatchOneArgument(masm, mode);
5179 
5180  __ bind(&not_one_case);
5181  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5182  } else if (argument_count_ == NONE) {
5183  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5184  } else if (argument_count_ == ONE) {
5185  CreateArrayDispatchOneArgument(masm, mode);
5186  } else if (argument_count_ == MORE_THAN_ONE) {
5187  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5188  } else {
5189  UNREACHABLE();
5190  }
5191 }
5192 
5193 
5194 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5195  // ----------- S t a t e -------------
5196  // -- r0 : argc (only if argument_count_ == ANY)
5197  // -- r1 : constructor
5198  // -- r2 : AllocationSite or undefined
5199  // -- sp[0] : return address
5200  // -- sp[4] : last argument
5201  // -----------------------------------
5202 
5203  if (FLAG_debug_code) {
5204  // The array construct code is only set for the global and natives
5205  // builtin Array functions which always have maps.
5206 
5207  // Initial map for the builtin Array function should be a map.
5209  // Will both indicate a NULL and a Smi.
5210  __ tst(r4, Operand(kSmiTagMask));
5211  __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
5212  __ CompareObjectType(r4, r4, r5, MAP_TYPE);
5213  __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
5214 
5215  // We should either have undefined in r2 or a valid AllocationSite
5216  __ AssertUndefinedOrAllocationSite(r2, r4);
5217  }
5218 
5219  Label no_info;
5220  // Get the elements kind and case on that.
5221  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
5222  __ b(eq, &no_info);
5223 
5225  __ SmiUntag(r3);
5228  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5229 
5230  __ bind(&no_info);
5231  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5232 }
5233 
5234 
5235 void InternalArrayConstructorStub::GenerateCase(
5236  MacroAssembler* masm, ElementsKind kind) {
5237  __ cmp(r0, Operand(1));
5238 
5239  InternalArrayNoArgumentConstructorStub stub0(kind);
5240  __ TailCallStub(&stub0, lo);
5241 
5242  InternalArrayNArgumentsConstructorStub stubN(kind);
5243  __ TailCallStub(&stubN, hi);
5244 
5245  if (IsFastPackedElementsKind(kind)) {
5246  // We might need to create a holey array
5247  // look at the first argument
5248  __ ldr(r3, MemOperand(sp, 0));
5249  __ cmp(r3, Operand::Zero());
5250 
5251  InternalArraySingleArgumentConstructorStub
5252  stub1_holey(GetHoleyElementsKind(kind));
5253  __ TailCallStub(&stub1_holey, ne);
5254  }
5255 
5256  InternalArraySingleArgumentConstructorStub stub1(kind);
5257  __ TailCallStub(&stub1);
5258 }
5259 
5260 
5261 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5262  // ----------- S t a t e -------------
5263  // -- r0 : argc
5264  // -- r1 : constructor
5265  // -- sp[0] : return address
5266  // -- sp[4] : last argument
5267  // -----------------------------------
5268 
5269  if (FLAG_debug_code) {
5270  // The array construct code is only set for the global and natives
5271  // builtin Array functions which always have maps.
5272 
5273  // Initial map for the builtin Array function should be a map.
5275  // Will both indicate a NULL and a Smi.
5276  __ tst(r3, Operand(kSmiTagMask));
5277  __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
5278  __ CompareObjectType(r3, r3, r4, MAP_TYPE);
5279  __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
5280  }
5281 
5282  // Figure out the right elements kind
5284  // Load the map's "bit field 2" into |result|. We only need the first byte,
5285  // but the following bit field extraction takes care of that anyway.
5287  // Retrieve elements_kind from bit field 2.
5289 
5290  if (FLAG_debug_code) {
5291  Label done;
5292  __ cmp(r3, Operand(FAST_ELEMENTS));
5293  __ b(eq, &done);
5294  __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
5295  __ Assert(eq,
5296  kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5297  __ bind(&done);
5298  }
5299 
5300  Label fast_elements_case;
5301  __ cmp(r3, Operand(FAST_ELEMENTS));
5302  __ b(eq, &fast_elements_case);
5303  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5304 
5305  __ bind(&fast_elements_case);
5306  GenerateCase(masm, FAST_ELEMENTS);
5307 }
5308 
5309 
5310 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5311  // ----------- S t a t e -------------
5312  // -- r0 : callee
5313  // -- r4 : call_data
5314  // -- r2 : holder
5315  // -- r1 : api_function_address
5316  // -- cp : context
5317  // --
5318  // -- sp[0] : last argument
5319  // -- ...
5320  // -- sp[(argc - 1)* 4] : first argument
5321  // -- sp[argc * 4] : receiver
5322  // -----------------------------------
5323 
5324  Register callee = r0;
5325  Register call_data = r4;
5326  Register holder = r2;
5327  Register api_function_address = r1;
5328  Register context = cp;
5329 
5330  int argc = ArgumentBits::decode(bit_field_);
5331  bool is_store = IsStoreBits::decode(bit_field_);
5332  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5333 
5334  typedef FunctionCallbackArguments FCA;
5335 
5336  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5337  STATIC_ASSERT(FCA::kCalleeIndex == 5);
5338  STATIC_ASSERT(FCA::kDataIndex == 4);
5339  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5340  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5341  STATIC_ASSERT(FCA::kIsolateIndex == 1);
5342  STATIC_ASSERT(FCA::kHolderIndex == 0);
5343  STATIC_ASSERT(FCA::kArgsLength == 7);
5344 
5345  Isolate* isolate = masm->isolate();
5346 
5347  // context save
5348  __ push(context);
5349  // load context from callee
5350  __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5351 
5352  // callee
5353  __ push(callee);
5354 
5355  // call data
5356  __ push(call_data);
5357 
5358  Register scratch = call_data;
5359  if (!call_data_undefined) {
5360  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5361  }
5362  // return value
5363  __ push(scratch);
5364  // return value default
5365  __ push(scratch);
5366  // isolate
5367  __ mov(scratch,
5368  Operand(ExternalReference::isolate_address(isolate)));
5369  __ push(scratch);
5370  // holder
5371  __ push(holder);
5372 
5373  // Prepare arguments.
5374  __ mov(scratch, sp);
5375 
5376  // Allocate the v8::Arguments structure in the arguments' space since
5377  // it's not controlled by GC.
5378  const int kApiStackSpace = 4;
5379 
5380  FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
5381  __ EnterExitFrame(false, kApiStackSpace);
5382 
5383  ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
5384  // r0 = FunctionCallbackInfo&
5385  // Arguments is after the return address.
5386  __ add(r0, sp, Operand(1 * kPointerSize));
5387  // FunctionCallbackInfo::implicit_args_
5388  __ str(scratch, MemOperand(r0, 0 * kPointerSize));
5389  // FunctionCallbackInfo::values_
5390  __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5391  __ str(ip, MemOperand(r0, 1 * kPointerSize));
5392  // FunctionCallbackInfo::length_ = argc
5393  __ mov(ip, Operand(argc));
5394  __ str(ip, MemOperand(r0, 2 * kPointerSize));
5395  // FunctionCallbackInfo::is_construct_call = 0
5396  __ mov(ip, Operand::Zero());
5397  __ str(ip, MemOperand(r0, 3 * kPointerSize));
5398 
5399  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5400  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5401  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
5402  ApiFunction thunk_fun(thunk_address);
5403  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5404  masm->isolate());
5405 
5406  AllowExternalCallThatCantCauseGC scope(masm);
5407  MemOperand context_restore_operand(
5408  fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5409  // Stores return the first js argument
5410  int return_value_offset = 0;
5411  if (is_store) {
5412  return_value_offset = 2 + FCA::kArgsLength;
5413  } else {
5414  return_value_offset = 2 + FCA::kReturnValueOffset;
5415  }
5416  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5417 
5418  __ CallApiFunctionAndReturn(api_function_address,
5419  thunk_ref,
5420  kStackUnwindSpace,
5421  return_value_operand,
5422  &context_restore_operand);
5423 }
5424 
5425 
5426 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5427  // ----------- S t a t e -------------
5428  // -- sp[0] : name
5429  // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5430  // -- ...
5431  // -- r2 : api_function_address
5432  // -----------------------------------
5433 
5434  Register api_function_address = r2;
5435 
5436  __ mov(r0, sp); // r0 = Handle<Name>
5437  __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
5438 
5439  const int kApiStackSpace = 1;
5440  FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
5441  __ EnterExitFrame(false, kApiStackSpace);
5442 
5443  // Create PropertyAccessorInfo instance on the stack above the exit frame with
5444  // r1 (internal::Object** args_) as the data.
5445  __ str(r1, MemOperand(sp, 1 * kPointerSize));
5446  __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
5447 
5448  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5449 
5451  ExternalReference::Type thunk_type =
5452  ExternalReference::PROFILING_GETTER_CALL;
5453  ApiFunction thunk_fun(thunk_address);
5454  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5455  masm->isolate());
5456  __ CallApiFunctionAndReturn(api_function_address,
5457  thunk_ref,
5458  kStackUnwindSpace,
5459  MemOperand(fp, 6 * kPointerSize),
5460  NULL);
5461 }
5462 
5463 
5464 #undef __
5465 
5466 } } // namespace v8::internal
5467 
5468 #endif // V8_TARGET_ARCH_ARM
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateFast(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static int GetBranchOffset(Instr instr)
static const int kEvacuationCandidateMask
Definition: spaces.h:430
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
bool IsHoleyElementsKind(ElementsKind kind)
const RegList kCallerSaved
Definition: frames-arm.h:75
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
#define COMPARE(asm_, compare_string)
static const int kValueOffset
Definition: objects.h:9547
static int SlotOffset(int index)
Definition: contexts.h:498
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:217
const Register r3
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kCallerStackParameterCountFrameOffset
Definition: frames.h:776
void Generate(MacroAssembler *masm)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const int kFailureTypeTagSize
Definition: objects.h:1712
const Register cp
const LowDwVfpRegister d0
static Failure * InternalError()
Definition: objects-inl.h:1239
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
void Generate(MacroAssembler *masm)
static const int kElementsKindBitCount
Definition: objects.h:6483
static const int kDataOffset
Definition: objects.h:7921
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:997
static Representation Integer32()
const Register r6
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:541
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
TypeImpl< ZoneTypeConfig > Type
static const int kExponentBias
Definition: objects.h:1985
int int32_t
Definition: unicode.cc:47
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor) V8_OVERRIDE
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
uint32_t RegList
Definition: frames.h:41
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Failure * Exception()
Definition: objects-inl.h:1244
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
AllocationSiteOverrideMode
Definition: code-stubs.h:759
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
friend class BlockConstPoolScope
static const int kContextOffset
Definition: frames.h:185
WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, Register scratch)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:6680
const int kPointerSizeLog2
Definition: globals.h:281
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
#define kFirstCalleeSavedDoubleReg
const LowDwVfpRegister d3
const uint32_t kStringRepresentationMask
Definition: objects.h:615
const Register r2
static const int kCallerFPOffset
Definition: frames.h:188
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:7523
const uint32_t kShortExternalStringMask
Definition: objects.h:643
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind)
static const int kLastSubjectOffset
Definition: jsregexp.h:190
ProfileEntryHookStub()
Definition: code-stubs.h:2504
const int kIntSize
Definition: globals.h:263
const SwVfpRegister s6
static const int kZeroHash
Definition: objects.h:8520
#define V8_INFINITY
Definition: globals.h:44
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void GenerateCall(MacroAssembler *masm, Register target)
static const int kLastCaptureCountOffset
Definition: jsregexp.h:188
static void GenerateCopyCharactersLong(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch1, Register scratch2, Register scratch3, Register scratch4, int flags)
static const int kFirstOffset
Definition: objects.h:9165
static const int kMinLength
Definition: objects.h:9170
const int kNumDoubleCalleeSaved
Definition: frames-arm.h:86
const uint32_t kNotStringTag
Definition: objects.h:599
const Register sp
static const int kParentOffset
Definition: objects.h:9209
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1990
static const int kLiteralsOffset
Definition: objects.h:7524
#define UNREACHABLE()
Definition: checks.h:52
const int kFastElementsKindPackedToHoley
Definition: elements-kind.h:94
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static int ActivationFrameAlignment()
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const LowDwVfpRegister d7
static const int kExponentShift
Definition: objects.h:1986
const intptr_t kFailureTagMask
Definition: v8globals.h:64
static const int kValueOffset
Definition: objects.h:1971
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const int kFailureTagSize
Definition: v8globals.h:63
bool IsFastPackedElementsKind(ElementsKind kind)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
const int kDoubleSize
Definition: globals.h:266
#define kLastCalleeSavedDoubleReg
static const int kIrregexpCaptureCountOffset
Definition: objects.h:7967
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:577
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
const Register ip
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number)
const Register r9
const LowDwVfpRegister d6
const int kPointerSize
Definition: globals.h:268
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kTransitionInfoOffset
Definition: objects.h:8411
#define kScratchDoubleReg
static void MaybeCallEntryHook(MacroAssembler *masm)
static void GenerateAheadOfTime(Isolate *isolate)
const int kHeapObjectTag
Definition: v8.h:5473
void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const RegList kCalleeSaved
Definition: frames-arm.h:63
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static const int kCallerSPOffset
Definition: frames.h:190
const Register pc
static const int kPropertiesOffset
Definition: objects.h:2755
static void PatchBranchIntoNop(MacroAssembler *masm, int pos)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
virtual void Generate(MacroAssembler *masm)
bool IsFastSmiElementsKind(ElementsKind kind)
static const int kMinLength
Definition: objects.h:9214
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
void Generate(MacroAssembler *masm)
static void GenerateStubsAheadOfTime(Isolate *isolate)
const Register r0
static const int kElementsOffset
Definition: objects.h:2756
bool IsPowerOf2(T x)
Definition: utils.h:51
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kStringTag
Definition: objects.h:598
void Generate(MacroAssembler *masm)
static bool IsEqualityOp(Value op)
Definition: token.h:228
static Representation External()
static const int kOffsetOffset
Definition: objects.h:9210
const uint32_t kInternalizedTag
Definition: objects.h:605
static const int kLengthOffset
Definition: objects.h:10076
#define T(name, string, precedence)
Definition: token.cc:48
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
static const int kLastMatchOverhead
Definition: jsregexp.h:185
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kHeaderSize
Definition: objects.h:3016
const intptr_t kPointerAlignmentMask
Definition: v8globals.h:49
void Generate(MacroAssembler *masm)
const Register lr
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
static const int kMantissaBitsInTopWord
Definition: objects.h:1989
static const int kMaxShortLength
Definition: objects.h:9247
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:433
const uint32_t kIsNotStringMask
Definition: objects.h:597
const LowDwVfpRegister d2
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int kNumCalleeSaved
Definition: frames-arm.h:83
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
void Generate(MacroAssembler *masm)
#define kDoubleRegZero
static const int kSecondOffset
Definition: objects.h:9166
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:6675
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor) V8_OVERRIDE
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static const int kCallerFPOffset
Definition: frames-arm.h:105
static const int kArgumentsLengthIndex
Definition: heap.h:1104
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
MemOperand FieldMemOperand(Register object, int offset)
const intptr_t kObjectAlignment
Definition: v8globals.h:44
ElementsKind GetInitialFastElementsKind()
static const int kFirstCaptureOffset
Definition: jsregexp.h:194
static const uint32_t kSignMask
Definition: objects.h:1980
static const int kLastInputOffset
Definition: jsregexp.h:192
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
const int kSmiShiftSize
Definition: v8.h:5539
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int kSmiTagSize
Definition: v8.h:5479
static const int kHeaderSize
Definition: objects.h:5604
void InvokeAccessorGetterCallback(v8::Local< v8::String > property, const v8::PropertyCallbackInfo< v8::Value > &info, v8::AccessorGetterCallback getter)
Definition: api.cc:7628
const Register r8
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:7963
ICCompareStub(Token::Value op, CompareIC::State left, CompareIC::State right, CompareIC::State handler)
Definition: code-stubs.h:1329
static void GenerateStubsAheadOfTime(Isolate *isolate)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static const int kElementsKindShift
Definition: objects.h:6482
CodeStubInterfaceDescriptor * GetInterfaceDescriptor(Isolate *isolate)
Definition: code-stubs.h:395
const uint32_t kOneByteStringTag
Definition: objects.h:611
void Generate(MacroAssembler *masm)
static const int kArgumentsCalleeIndex
Definition: heap.h:1106
const int kSmiTag
Definition: v8.h:5478
static const int kIsUndetectable
Definition: objects.h:6472
static const int kHeaderSize
Definition: objects.h:2757
Code::Kind kind()
Definition: code-stubs.h:831
static void InitializeForIsolate(Isolate *isolate)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int kFailureTag
Definition: v8globals.h:62
static const int kInstrSize
static const int kDataTagOffset
Definition: objects.h:7961
static const uint32_t kHashBitMask
Definition: objects.h:8646
static const int kPrototypeOffset
Definition: objects.h:6427
void Generate(MacroAssembler *masm)
static const int kHashShift
Definition: objects.h:8642
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:697
const Register no_reg
const LowDwVfpRegister d1
const Register fp
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Representation Tagged()
static const int kNativeContextOffset
Definition: objects.h:7611
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static const int kConstructStubOffset
Definition: objects.h:7106
static const int kExponentBits
Definition: objects.h:1984
static const int kCompilerHintsOffset
Definition: objects.h:7171
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
#define FUNCTION_ADDR(f)
Definition: globals.h:345
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kSloppyArgumentsObjectSize
Definition: heap.h:1098
void InvokeFunctionCallback(const v8::FunctionCallbackInfo< v8::Value > &info, v8::FunctionCallback callback)
Definition: api.cc:7642
static const int kBitField2Offset
Definition: objects.h:6462
void Generate(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:1492
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kExponentOffset
Definition: objects.h:1977
static const int kDataUC16CodeOffset
Definition: objects.h:7965
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static const int kStrictArgumentsObjectSize
Definition: heap.h:1101
const Register r5
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool IsFastDoubleElementsKind(ElementsKind kind)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:224
const uint32_t kStringEncodingMask
Definition: objects.h:609
static const int kInstanceTypeOffset
Definition: objects.h:6459
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void Generate(MacroAssembler *masm)
static const int kMantissaOffset
Definition: objects.h:1976
const Register r4
const Register r7
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)