v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-arm64.cc
Go to the documentation of this file.
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_ARM64
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 #include "stub-cache.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
42  Isolate* isolate,
43  CodeStubInterfaceDescriptor* descriptor) {
44  // x2: function info
45  static Register registers[] = { x2 };
46  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
47  descriptor->register_params_ = registers;
48  descriptor->deoptimization_handler_ =
49  Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
50 }
51 
52 
53 void FastNewContextStub::InitializeInterfaceDescriptor(
54  Isolate* isolate,
55  CodeStubInterfaceDescriptor* descriptor) {
56  // x1: function
57  static Register registers[] = { x1 };
58  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
59  descriptor->register_params_ = registers;
60  descriptor->deoptimization_handler_ = NULL;
61 }
62 
63 
65  Isolate* isolate,
66  CodeStubInterfaceDescriptor* descriptor) {
67  // x0: value
68  static Register registers[] = { x0 };
69  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
70  descriptor->register_params_ = registers;
71  descriptor->deoptimization_handler_ = NULL;
72 }
73 
74 
75 void NumberToStringStub::InitializeInterfaceDescriptor(
76  Isolate* isolate,
77  CodeStubInterfaceDescriptor* descriptor) {
78  // x0: value
79  static Register registers[] = { x0 };
80  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
81  descriptor->register_params_ = registers;
82  descriptor->deoptimization_handler_ =
83  Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
84 }
85 
86 
88  Isolate* isolate,
89  CodeStubInterfaceDescriptor* descriptor) {
90  // x3: array literals array
91  // x2: array literal index
92  // x1: constant elements
93  static Register registers[] = { x3, x2, x1 };
94  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
95  descriptor->register_params_ = registers;
96  descriptor->deoptimization_handler_ =
98  Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
99 }
100 
101 
103  Isolate* isolate,
104  CodeStubInterfaceDescriptor* descriptor) {
105  // x3: object literals array
106  // x2: object literal index
107  // x1: constant properties
108  // x0: object literal flags
109  static Register registers[] = { x3, x2, x1, x0 };
110  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
111  descriptor->register_params_ = registers;
112  descriptor->deoptimization_handler_ =
113  Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
114 }
115 
116 
118  Isolate* isolate,
119  CodeStubInterfaceDescriptor* descriptor) {
120  // x2: feedback vector
121  // x3: call feedback slot
122  static Register registers[] = { x2, x3 };
123  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
124  descriptor->register_params_ = registers;
125  descriptor->deoptimization_handler_ = NULL;
126 }
127 
128 
130  Isolate* isolate,
131  CodeStubInterfaceDescriptor* descriptor) {
132  // x1: receiver
133  // x0: key
134  static Register registers[] = { x1, x0 };
135  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
136  descriptor->register_params_ = registers;
137  descriptor->deoptimization_handler_ =
138  FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
139 }
140 
141 
143  Isolate* isolate,
144  CodeStubInterfaceDescriptor* descriptor) {
145  // x1: receiver
146  // x0: key
147  static Register registers[] = { x1, x0 };
148  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
149  descriptor->register_params_ = registers;
150  descriptor->deoptimization_handler_ =
151  FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
152 }
153 
154 
155 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
156  Isolate* isolate,
157  CodeStubInterfaceDescriptor* descriptor) {
158  // x2: length
159  // x1: index (of last match)
160  // x0: string
161  static Register registers[] = { x2, x1, x0 };
162  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
163  descriptor->register_params_ = registers;
164  descriptor->deoptimization_handler_ =
165  Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
166 }
167 
168 
170  Isolate* isolate,
171  CodeStubInterfaceDescriptor* descriptor) {
172  // x0: receiver
173  static Register registers[] = { x0 };
174  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
175  descriptor->register_params_ = registers;
176  descriptor->deoptimization_handler_ = NULL;
177 }
178 
179 
181  Isolate* isolate,
182  CodeStubInterfaceDescriptor* descriptor) {
183  // x1: receiver
184  static Register registers[] = { x1 };
185  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
186  descriptor->register_params_ = registers;
187  descriptor->deoptimization_handler_ = NULL;
188 }
189 
190 
192  Isolate* isolate,
193  CodeStubInterfaceDescriptor* descriptor) {
194  static Register registers[] = { x0, x2 };
195  descriptor->register_param_count_ = 2;
196  descriptor->register_params_ = registers;
197  descriptor->deoptimization_handler_ = NULL;
198 }
199 
200 
202  Isolate* isolate,
203  CodeStubInterfaceDescriptor* descriptor) {
204  static Register registers[] = { x1, x0 };
205  descriptor->register_param_count_ = 2;
206  descriptor->register_params_ = registers;
207  descriptor->deoptimization_handler_ = NULL;
208 }
209 
210 
212  Isolate* isolate,
213  CodeStubInterfaceDescriptor* descriptor) {
214  // x2: receiver
215  // x1: key
216  // x0: value
217  static Register registers[] = { x2, x1, x0 };
218  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
219  descriptor->register_params_ = registers;
220  descriptor->deoptimization_handler_ =
221  FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
222 }
223 
224 
226  Isolate* isolate,
227  CodeStubInterfaceDescriptor* descriptor) {
228  // x0: value (js_array)
229  // x1: to_map
230  static Register registers[] = { x0, x1 };
231  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
232  descriptor->register_params_ = registers;
233  Address entry =
234  Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
235  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
236 }
237 
238 
240  Isolate* isolate,
241  CodeStubInterfaceDescriptor* descriptor) {
242  // x0: value to compare
243  static Register registers[] = { x0 };
244  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
245  descriptor->register_params_ = registers;
246  descriptor->deoptimization_handler_ =
247  FUNCTION_ADDR(CompareNilIC_Miss);
248  descriptor->SetMissHandler(
249  ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
250 }
251 
252 
253 static void InitializeArrayConstructorDescriptor(
254  Isolate* isolate,
255  CodeStubInterfaceDescriptor* descriptor,
256  int constant_stack_parameter_count) {
257  // x1: function
258  // x2: allocation site with elements kind
259  // x0: number of arguments to the constructor function
260  static Register registers_variable_args[] = { x1, x2, x0 };
261  static Register registers_no_args[] = { x1, x2 };
262 
263  if (constant_stack_parameter_count == 0) {
264  descriptor->register_param_count_ =
265  sizeof(registers_no_args) / sizeof(registers_no_args[0]);
266  descriptor->register_params_ = registers_no_args;
267  } else {
268  // stack param count needs (constructor pointer, and single argument)
269  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
270  descriptor->stack_parameter_count_ = x0;
271  descriptor->register_param_count_ =
272  sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
273  descriptor->register_params_ = registers_variable_args;
274  }
275 
276  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
277  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
278  descriptor->deoptimization_handler_ =
279  Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
280 }
281 
282 
284  Isolate* isolate,
285  CodeStubInterfaceDescriptor* descriptor) {
286  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
287 }
288 
289 
291  Isolate* isolate,
292  CodeStubInterfaceDescriptor* descriptor) {
293  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
294 }
295 
296 
298  Isolate* isolate,
299  CodeStubInterfaceDescriptor* descriptor) {
300  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
301 }
302 
303 
304 static void InitializeInternalArrayConstructorDescriptor(
305  Isolate* isolate,
306  CodeStubInterfaceDescriptor* descriptor,
307  int constant_stack_parameter_count) {
308  // x1: constructor function
309  // x0: number of arguments to the constructor function
310  static Register registers_variable_args[] = { x1, x0 };
311  static Register registers_no_args[] = { x1 };
312 
313  if (constant_stack_parameter_count == 0) {
314  descriptor->register_param_count_ =
315  sizeof(registers_no_args) / sizeof(registers_no_args[0]);
316  descriptor->register_params_ = registers_no_args;
317  } else {
318  // stack param count needs (constructor pointer, and single argument)
319  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
320  descriptor->stack_parameter_count_ = x0;
321  descriptor->register_param_count_ =
322  sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
323  descriptor->register_params_ = registers_variable_args;
324  }
325 
326  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
327  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
328  descriptor->deoptimization_handler_ =
329  Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
330 }
331 
332 
334  Isolate* isolate,
335  CodeStubInterfaceDescriptor* descriptor) {
336  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
337 }
338 
339 
341  Isolate* isolate,
342  CodeStubInterfaceDescriptor* descriptor) {
343  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
344 }
345 
346 
348  Isolate* isolate,
349  CodeStubInterfaceDescriptor* descriptor) {
350  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
351 }
352 
353 
355  Isolate* isolate,
356  CodeStubInterfaceDescriptor* descriptor) {
357  // x0: value
358  static Register registers[] = { x0 };
359  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
360  descriptor->register_params_ = registers;
361  descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
362  descriptor->SetMissHandler(
363  ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
364 }
365 
366 
368  Isolate* isolate,
369  CodeStubInterfaceDescriptor* descriptor) {
370  // x1: receiver
371  // x2: key (unused)
372  // x0: value
373  static Register registers[] = { x1, x2, x0 };
374  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
375  descriptor->register_params_ = registers;
376  descriptor->deoptimization_handler_ =
377  FUNCTION_ADDR(StoreIC_MissFromStubFailure);
378 }
379 
380 
382  Isolate* isolate,
383  CodeStubInterfaceDescriptor* descriptor) {
384  // x0: value
385  // x3: target map
386  // x1: key
387  // x2: receiver
388  static Register registers[] = { x0, x3, x1, x2 };
389  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
390  descriptor->register_params_ = registers;
391  descriptor->deoptimization_handler_ =
392  FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
393 }
394 
395 
397  Isolate* isolate,
398  CodeStubInterfaceDescriptor* descriptor) {
399  // x1: left operand
400  // x0: right operand
401  static Register registers[] = { x1, x0 };
402  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
403  descriptor->register_params_ = registers;
404  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
405  descriptor->SetMissHandler(
406  ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
407 }
408 
409 
410 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
411  Isolate* isolate,
412  CodeStubInterfaceDescriptor* descriptor) {
413  // x2: allocation site
414  // x1: left operand
415  // x0: right operand
416  static Register registers[] = { x2, x1, x0 };
417  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
418  descriptor->register_params_ = registers;
419  descriptor->deoptimization_handler_ =
420  FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
421 }
422 
423 
424 void StringAddStub::InitializeInterfaceDescriptor(
425  Isolate* isolate,
426  CodeStubInterfaceDescriptor* descriptor) {
427  // x1: left operand
428  // x0: right operand
429  static Register registers[] = { x1, x0 };
430  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
431  descriptor->register_params_ = registers;
432  descriptor->deoptimization_handler_ =
433  Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
434 }
435 
436 
437 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
438  static PlatformCallInterfaceDescriptor default_descriptor =
439  PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
440 
441  static PlatformCallInterfaceDescriptor noInlineDescriptor =
442  PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
443 
444  {
445  CallInterfaceDescriptor* descriptor =
446  isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
447  static Register registers[] = { x1, // JSFunction
448  cp, // context
449  x0, // actual number of arguments
450  x2, // expected number of arguments
451  };
452  static Representation representations[] = {
453  Representation::Tagged(), // JSFunction
454  Representation::Tagged(), // context
455  Representation::Integer32(), // actual number of arguments
456  Representation::Integer32(), // expected number of arguments
457  };
458  descriptor->register_param_count_ = 4;
459  descriptor->register_params_ = registers;
460  descriptor->param_representations_ = representations;
461  descriptor->platform_specific_descriptor_ = &default_descriptor;
462  }
463  {
464  CallInterfaceDescriptor* descriptor =
465  isolate->call_descriptor(Isolate::KeyedCall);
466  static Register registers[] = { cp, // context
467  x2, // key
468  };
469  static Representation representations[] = {
470  Representation::Tagged(), // context
471  Representation::Tagged(), // key
472  };
473  descriptor->register_param_count_ = 2;
474  descriptor->register_params_ = registers;
475  descriptor->param_representations_ = representations;
476  descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
477  }
478  {
479  CallInterfaceDescriptor* descriptor =
480  isolate->call_descriptor(Isolate::NamedCall);
481  static Register registers[] = { cp, // context
482  x2, // name
483  };
484  static Representation representations[] = {
485  Representation::Tagged(), // context
486  Representation::Tagged(), // name
487  };
488  descriptor->register_param_count_ = 2;
489  descriptor->register_params_ = registers;
490  descriptor->param_representations_ = representations;
491  descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
492  }
493  {
494  CallInterfaceDescriptor* descriptor =
495  isolate->call_descriptor(Isolate::CallHandler);
496  static Register registers[] = { cp, // context
497  x0, // receiver
498  };
499  static Representation representations[] = {
500  Representation::Tagged(), // context
501  Representation::Tagged(), // receiver
502  };
503  descriptor->register_param_count_ = 2;
504  descriptor->register_params_ = registers;
505  descriptor->param_representations_ = representations;
506  descriptor->platform_specific_descriptor_ = &default_descriptor;
507  }
508  {
509  CallInterfaceDescriptor* descriptor =
510  isolate->call_descriptor(Isolate::ApiFunctionCall);
511  static Register registers[] = { x0, // callee
512  x4, // call_data
513  x2, // holder
514  x1, // api_function_address
515  cp, // context
516  };
517  static Representation representations[] = {
518  Representation::Tagged(), // callee
519  Representation::Tagged(), // call_data
520  Representation::Tagged(), // holder
521  Representation::External(), // api_function_address
522  Representation::Tagged(), // context
523  };
524  descriptor->register_param_count_ = 5;
525  descriptor->register_params_ = registers;
526  descriptor->param_representations_ = representations;
527  descriptor->platform_specific_descriptor_ = &default_descriptor;
528  }
529 }
530 
531 
532 #define __ ACCESS_MASM(masm)
533 
534 
535 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
536  // Update the static counter each time a new code stub is generated.
537  Isolate* isolate = masm->isolate();
538  isolate->counters()->code_stubs()->Increment();
539 
540  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
541  int param_count = descriptor->register_param_count_;
542  {
543  // Call the runtime system in a fresh internal frame.
544  FrameScope scope(masm, StackFrame::INTERNAL);
545  ASSERT((descriptor->register_param_count_ == 0) ||
546  x0.Is(descriptor->register_params_[param_count - 1]));
547 
548  // Push arguments
549  MacroAssembler::PushPopQueue queue(masm);
550  for (int i = 0; i < param_count; ++i) {
551  queue.Queue(descriptor->register_params_[i]);
552  }
553  queue.PushQueued();
554 
555  ExternalReference miss = descriptor->miss_handler();
556  __ CallExternalReference(miss, descriptor->register_param_count_);
557  }
558 
559  __ Ret();
560 }
561 
562 
563 void DoubleToIStub::Generate(MacroAssembler* masm) {
564  Label done;
565  Register input = source();
566  Register result = destination();
568 
569  ASSERT(result.Is64Bits());
570  ASSERT(jssp.Is(masm->StackPointer()));
571 
572  int double_offset = offset();
573 
574  DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
575  Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
576  Register scratch2 =
577  GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
578 
579  __ Push(scratch1, scratch2);
580  // Account for saved regs if input is jssp.
581  if (input.is(jssp)) double_offset += 2 * kPointerSize;
582 
583  if (!skip_fastpath()) {
584  __ Push(double_scratch);
585  if (input.is(jssp)) double_offset += 1 * kDoubleSize;
586  __ Ldr(double_scratch, MemOperand(input, double_offset));
587  // Try to convert with a FPU convert instruction. This handles all
588  // non-saturating cases.
589  __ TryConvertDoubleToInt64(result, double_scratch, &done);
590  __ Fmov(result, double_scratch);
591  } else {
592  __ Ldr(result, MemOperand(input, double_offset));
593  }
594 
595  // If we reach here we need to manually convert the input to an int32.
596 
597  // Extract the exponent.
598  Register exponent = scratch1;
599  __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
601 
602  // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
603  // the mantissa gets shifted completely out of the int32_t result.
605  __ CzeroX(result, ge);
606  __ B(ge, &done);
607 
608  // The Fcvtzs sequence handles all cases except where the conversion causes
609  // signed overflow in the int64_t target. Since we've already handled
610  // exponents >= 84, we can guarantee that 63 <= exponent < 84.
611 
612  if (masm->emit_debug_code()) {
613  __ Cmp(exponent, HeapNumber::kExponentBias + 63);
614  // Exponents less than this should have been handled by the Fcvt case.
615  __ Check(ge, kUnexpectedValue);
616  }
617 
618  // Isolate the mantissa bits, and set the implicit '1'.
619  Register mantissa = scratch2;
620  __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
621  __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
622 
623  // Negate the mantissa if necessary.
624  __ Tst(result, kXSignMask);
625  __ Cneg(mantissa, mantissa, ne);
626 
627  // Shift the mantissa bits in the correct place. We know that we have to shift
628  // it left here, because exponent >= 63 >= kMantissaBits.
629  __ Sub(exponent, exponent,
631  __ Lsl(result, mantissa, exponent);
632 
633  __ Bind(&done);
634  if (!skip_fastpath()) {
635  __ Pop(double_scratch);
636  }
637  __ Pop(scratch2, scratch1);
638  __ Ret();
639 }
640 
641 
642 // See call site for description.
643 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
644  Register left,
645  Register right,
646  Register scratch,
647  FPRegister double_scratch,
648  Label* slow,
649  Condition cond) {
650  ASSERT(!AreAliased(left, right, scratch));
651  Label not_identical, return_equal, heap_number;
652  Register result = x0;
653 
654  __ Cmp(right, left);
655  __ B(ne, &not_identical);
656 
657  // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
658  // so we do the second best thing - test it ourselves.
659  // They are both equal and they are not both Smis so both of them are not
660  // Smis. If it's not a heap number, then return equal.
661  if ((cond == lt) || (cond == gt)) {
662  __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
663  ge);
664  } else {
665  Register right_type = scratch;
666  __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
667  &heap_number);
668  // Comparing JS objects with <=, >= is complicated.
669  if (cond != eq) {
670  __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
671  __ B(ge, slow);
672  // Normally here we fall through to return_equal, but undefined is
673  // special: (undefined == undefined) == true, but
674  // (undefined <= undefined) == false! See ECMAScript 11.8.5.
675  if ((cond == le) || (cond == ge)) {
676  __ Cmp(right_type, ODDBALL_TYPE);
677  __ B(ne, &return_equal);
678  __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
679  if (cond == le) {
680  // undefined <= undefined should fail.
681  __ Mov(result, GREATER);
682  } else {
683  // undefined >= undefined should fail.
684  __ Mov(result, LESS);
685  }
686  __ Ret();
687  }
688  }
689  }
690 
691  __ Bind(&return_equal);
692  if (cond == lt) {
693  __ Mov(result, GREATER); // Things aren't less than themselves.
694  } else if (cond == gt) {
695  __ Mov(result, LESS); // Things aren't greater than themselves.
696  } else {
697  __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
698  }
699  __ Ret();
700 
701  // Cases lt and gt have been handled earlier, and case ne is never seen, as
702  // it is handled in the parser (see Parser::ParseBinaryExpression). We are
703  // only concerned with cases ge, le and eq here.
704  if ((cond != lt) && (cond != gt)) {
705  ASSERT((cond == ge) || (cond == le) || (cond == eq));
706  __ Bind(&heap_number);
707  // Left and right are identical pointers to a heap number object. Return
708  // non-equal if the heap number is a NaN, and equal otherwise. Comparing
709  // the number to itself will set the overflow flag iff the number is NaN.
710  __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
711  __ Fcmp(double_scratch, double_scratch);
712  __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
713 
714  if (cond == le) {
715  __ Mov(result, GREATER);
716  } else {
717  __ Mov(result, LESS);
718  }
719  __ Ret();
720  }
721 
722  // No fall through here.
723  if (FLAG_debug_code) {
724  __ Unreachable();
725  }
726 
727  __ Bind(&not_identical);
728 }
729 
730 
731 // See call site for description.
732 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
733  Register left,
734  Register right,
735  Register left_type,
736  Register right_type,
737  Register scratch) {
738  ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
739 
740  if (masm->emit_debug_code()) {
741  // We assume that the arguments are not identical.
742  __ Cmp(left, right);
743  __ Assert(ne, kExpectedNonIdenticalObjects);
744  }
745 
746  // If either operand is a JS object or an oddball value, then they are not
747  // equal since their pointers are different.
748  // There is no test for undetectability in strict equality.
750  Label right_non_object;
751 
752  __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
753  __ B(lt, &right_non_object);
754 
755  // Return non-zero - x0 already contains a non-zero pointer.
756  ASSERT(left.is(x0) || right.is(x0));
757  Label return_not_equal;
758  __ Bind(&return_not_equal);
759  __ Ret();
760 
761  __ Bind(&right_non_object);
762 
763  // Check for oddballs: true, false, null, undefined.
764  __ Cmp(right_type, ODDBALL_TYPE);
765 
766  // If right is not ODDBALL, test left. Otherwise, set eq condition.
767  __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
768 
769  // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
770  // Otherwise, right or left is ODDBALL, so set a ge condition.
771  __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
772 
773  __ B(ge, &return_not_equal);
774 
775  // Internalized strings are unique, so they can only be equal if they are the
776  // same object. We have already tested that case, so if left and right are
777  // both internalized strings, they cannot be equal.
778  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
779  __ Orr(scratch, left_type, right_type);
780  __ TestAndBranchIfAllClear(
781  scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
782 }
783 
784 
785 // See call site for description.
786 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
787  Register left,
788  Register right,
789  FPRegister left_d,
790  FPRegister right_d,
791  Register scratch,
792  Label* slow,
793  bool strict) {
794  ASSERT(!AreAliased(left, right, scratch));
795  ASSERT(!AreAliased(left_d, right_d));
796  ASSERT((left.is(x0) && right.is(x1)) ||
797  (right.is(x0) && left.is(x1)));
798  Register result = x0;
799 
800  Label right_is_smi, done;
801  __ JumpIfSmi(right, &right_is_smi);
802 
803  // Left is the smi. Check whether right is a heap number.
804  if (strict) {
805  // If right is not a number and left is a smi, then strict equality cannot
806  // succeed. Return non-equal.
807  Label is_heap_number;
808  __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
809  &is_heap_number);
810  // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
811  if (!right.is(result)) {
812  __ Mov(result, NOT_EQUAL);
813  }
814  __ Ret();
815  __ Bind(&is_heap_number);
816  } else {
817  // Smi compared non-strictly with a non-smi, non-heap-number. Call the
818  // runtime.
819  __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
820  }
821 
822  // Left is the smi. Right is a heap number. Load right value into right_d, and
823  // convert left smi into double in left_d.
824  __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
825  __ SmiUntagToDouble(left_d, left);
826  __ B(&done);
827 
828  __ Bind(&right_is_smi);
829  // Right is a smi. Check whether the non-smi left is a heap number.
830  if (strict) {
831  // If left is not a number and right is a smi then strict equality cannot
832  // succeed. Return non-equal.
833  Label is_heap_number;
834  __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
835  &is_heap_number);
836  // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
837  if (!left.is(result)) {
838  __ Mov(result, NOT_EQUAL);
839  }
840  __ Ret();
841  __ Bind(&is_heap_number);
842  } else {
843  // Smi compared non-strictly with a non-smi, non-heap-number. Call the
844  // runtime.
845  __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
846  }
847 
848  // Right is the smi. Left is a heap number. Load left value into left_d, and
849  // convert right smi into double in right_d.
850  __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
851  __ SmiUntagToDouble(right_d, right);
852 
853  // Fall through to both_loaded_as_doubles.
854  __ Bind(&done);
855 }
856 
857 
858 // Fast negative check for internalized-to-internalized equality.
859 // See call site for description.
860 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
861  Register left,
862  Register right,
863  Register left_map,
864  Register right_map,
865  Register left_type,
866  Register right_type,
867  Label* possible_strings,
868  Label* not_both_strings) {
869  ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
870  Register result = x0;
871 
872  Label object_test;
873  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
874  // TODO(all): reexamine this branch sequence for optimisation wrt branch
875  // prediction.
876  __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
877  __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
878  __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
879  __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
880 
881  // Both are internalized. We already checked that they weren't the same
882  // pointer, so they are not equal.
883  __ Mov(result, NOT_EQUAL);
884  __ Ret();
885 
886  __ Bind(&object_test);
887 
888  __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
889 
890  // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
891  // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
892  __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
893 
894  __ B(lt, not_both_strings);
895 
896  // If both objects are undetectable, they are equal. Otherwise, they are not
897  // equal, since they are different objects and an object is not equal to
898  // undefined.
899 
900  // Returning here, so we can corrupt right_type and left_type.
901  Register right_bitfield = right_type;
902  Register left_bitfield = left_type;
903  __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
904  __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
905  __ And(result, right_bitfield, left_bitfield);
906  __ And(result, result, 1 << Map::kIsUndetectable);
907  __ Eor(result, result, 1 << Map::kIsUndetectable);
908  __ Ret();
909 }
910 
911 
912 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
913  Register input,
914  Register scratch,
915  CompareIC::State expected,
916  Label* fail) {
917  Label ok;
918  if (expected == CompareIC::SMI) {
919  __ JumpIfNotSmi(input, fail);
920  } else if (expected == CompareIC::NUMBER) {
921  __ JumpIfSmi(input, &ok);
922  __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
924  }
925  // We could be strict about internalized/non-internalized here, but as long as
926  // hydrogen doesn't care, the stub doesn't have to care either.
927  __ Bind(&ok);
928 }
929 
930 
931 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
932  Register lhs = x1;
933  Register rhs = x0;
934  Register result = x0;
935  Condition cond = GetCondition();
936 
937  Label miss;
938  ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
939  ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
940 
941  Label slow; // Call builtin.
942  Label not_smis, both_loaded_as_doubles;
943  Label not_two_smis, smi_done;
944  __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
945  __ SmiUntag(lhs);
946  __ Sub(result, lhs, Operand::UntagSmi(rhs));
947  __ Ret();
948 
949  __ Bind(&not_two_smis);
950 
951  // NOTICE! This code is only reached after a smi-fast-case check, so it is
952  // certain that at least one operand isn't a smi.
953 
954  // Handle the case where the objects are identical. Either returns the answer
955  // or goes to slow. Only falls through if the objects were not identical.
956  EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
957 
958  // If either is a smi (we know that at least one is not a smi), then they can
959  // only be strictly equal if the other is a HeapNumber.
960  __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
961 
962  // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
963  // can:
964  // 1) Return the answer.
965  // 2) Branch to the slow case.
966  // 3) Fall through to both_loaded_as_doubles.
967  // In case 3, we have found out that we were dealing with a number-number
968  // comparison. The double values of the numbers have been loaded, right into
969  // rhs_d, left into lhs_d.
970  FPRegister rhs_d = d0;
971  FPRegister lhs_d = d1;
972  EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
973 
974  __ Bind(&both_loaded_as_doubles);
975  // The arguments have been converted to doubles and stored in rhs_d and
976  // lhs_d.
977  Label nan;
978  __ Fcmp(lhs_d, rhs_d);
979  __ B(vs, &nan); // Overflow flag set if either is NaN.
980  STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
981  __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
982  __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
983  __ Ret();
984 
985  __ Bind(&nan);
986  // Left and/or right is a NaN. Load the result register with whatever makes
987  // the comparison fail, since comparisons with NaN always fail (except ne,
988  // which is filtered out at a higher level.)
989  ASSERT(cond != ne);
990  if ((cond == lt) || (cond == le)) {
991  __ Mov(result, GREATER);
992  } else {
993  __ Mov(result, LESS);
994  }
995  __ Ret();
996 
997  __ Bind(&not_smis);
998  // At this point we know we are dealing with two different objects, and
999  // neither of them is a smi. The objects are in rhs_ and lhs_.
1000 
1001  // Load the maps and types of the objects.
1002  Register rhs_map = x10;
1003  Register rhs_type = x11;
1004  Register lhs_map = x12;
1005  Register lhs_type = x13;
1006  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
1007  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
1008  __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
1009  __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
1010 
1011  if (strict()) {
1012  // This emits a non-equal return sequence for some object types, or falls
1013  // through if it was not lucky.
1014  EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
1015  }
1016 
1017  Label check_for_internalized_strings;
1018  Label flat_string_check;
1019  // Check for heap number comparison. Branch to earlier double comparison code
1020  // if they are heap numbers, otherwise, branch to internalized string check.
1021  __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
1022  __ B(ne, &check_for_internalized_strings);
1023  __ Cmp(lhs_map, rhs_map);
1024 
1025  // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
1026  // string check.
1027  __ B(ne, &flat_string_check);
1028 
1029  // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
1030  // comparison code.
1031  __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1032  __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1033  __ B(&both_loaded_as_doubles);
1034 
1035  __ Bind(&check_for_internalized_strings);
1036  // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
1037  // of internalized strings.
1038  if ((cond == eq) && !strict()) {
1039  // Returns an answer for two internalized strings or two detectable objects.
1040  // Otherwise branches to the string case or not both strings case.
1041  EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
1042  lhs_type, rhs_type,
1043  &flat_string_check, &slow);
1044  }
1045 
1046  // Check for both being sequential ASCII strings, and inline if that is the
1047  // case.
1048  __ Bind(&flat_string_check);
1049  __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
1050  x15, &slow);
1051 
1052  Isolate* isolate = masm->isolate();
1053  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
1054  x11);
1055  if (cond == eq) {
1057  x10, x11, x12);
1058  } else {
1060  x10, x11, x12, x13);
1061  }
1062 
1063  // Never fall through to here.
1064  if (FLAG_debug_code) {
1065  __ Unreachable();
1066  }
1067 
1068  __ Bind(&slow);
1069 
1070  __ Push(lhs, rhs);
1071  // Figure out which native to call and setup the arguments.
1072  Builtins::JavaScript native;
1073  if (cond == eq) {
1074  native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1075  } else {
1076  native = Builtins::COMPARE;
1077  int ncr; // NaN compare result
1078  if ((cond == lt) || (cond == le)) {
1079  ncr = GREATER;
1080  } else {
1081  ASSERT((cond == gt) || (cond == ge)); // remaining cases
1082  ncr = LESS;
1083  }
1084  __ Mov(x10, Smi::FromInt(ncr));
1085  __ Push(x10);
1086  }
1087 
1088  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1089  // tagged as a small integer.
1090  __ InvokeBuiltin(native, JUMP_FUNCTION);
1091 
1092  __ Bind(&miss);
1093  GenerateMiss(masm);
1094 }
1095 
1096 
1097 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1098  // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
1099  // ip0 and ip1 are corrupted by the call into C.
1100  CPURegList saved_regs = kCallerSaved;
1101  saved_regs.Remove(ip0);
1102  saved_regs.Remove(ip1);
1103  saved_regs.Remove(x8);
1104  saved_regs.Remove(x9);
1105 
1106  // We don't allow a GC during a store buffer overflow so there is no need to
1107  // store the registers in any particular way, but we do have to store and
1108  // restore them.
1109  __ PushCPURegList(saved_regs);
1110  if (save_doubles_ == kSaveFPRegs) {
1111  __ PushCPURegList(kCallerSavedFP);
1112  }
1113 
1114  AllowExternalCallThatCantCauseGC scope(masm);
1115  __ Mov(x0, ExternalReference::isolate_address(masm->isolate()));
1116  __ CallCFunction(
1117  ExternalReference::store_buffer_overflow_function(masm->isolate()),
1118  1, 0);
1119 
1120  if (save_doubles_ == kSaveFPRegs) {
1121  __ PopCPURegList(kCallerSavedFP);
1122  }
1123  __ PopCPURegList(saved_regs);
1124  __ Ret();
1125 }
1126 
1127 
1129  Isolate* isolate) {
1131  stub1.GetCode(isolate);
1133  stub2.GetCode(isolate);
1134 }
1135 
1136 
1137 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
1138  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1139  UseScratchRegisterScope temps(masm);
1140  Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
1141  Register return_address = temps.AcquireX();
1142  __ Mov(return_address, lr);
1143  // Restore lr with the value it had before the call to this stub (the value
1144  // which must be pushed).
1145  __ Mov(lr, saved_lr);
1146  if (save_doubles_ == kSaveFPRegs) {
1147  __ PushSafepointRegistersAndDoubles();
1148  } else {
1149  __ PushSafepointRegisters();
1150  }
1151  __ Ret(return_address);
1152 }
1153 
1154 
1155 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
1156  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1157  UseScratchRegisterScope temps(masm);
1158  Register return_address = temps.AcquireX();
1159  // Preserve the return address (lr will be clobbered by the pop).
1160  __ Mov(return_address, lr);
1161  if (save_doubles_ == kSaveFPRegs) {
1162  __ PopSafepointRegistersAndDoubles();
1163  } else {
1164  __ PopSafepointRegisters();
1165  }
1166  __ Ret(return_address);
1167 }
1168 
1169 
1170 void MathPowStub::Generate(MacroAssembler* masm) {
1171  // Stack on entry:
1172  // jssp[0]: Exponent (as a tagged value).
1173  // jssp[1]: Base (as a tagged value).
1174  //
1175  // The (tagged) result will be returned in x0, as a heap number.
1176 
1177  Register result_tagged = x0;
1178  Register base_tagged = x10;
1179  Register exponent_tagged = x11;
1180  Register exponent_integer = x12;
1181  Register scratch1 = x14;
1182  Register scratch0 = x15;
1183  Register saved_lr = x19;
1184  FPRegister result_double = d0;
1185  FPRegister base_double = d0;
1186  FPRegister exponent_double = d1;
1187  FPRegister base_double_copy = d2;
1188  FPRegister scratch1_double = d6;
1189  FPRegister scratch0_double = d7;
1190 
1191  // A fast-path for integer exponents.
1192  Label exponent_is_smi, exponent_is_integer;
1193  // Bail out to runtime.
1194  Label call_runtime;
1195  // Allocate a heap number for the result, and return it.
1196  Label done;
1197 
1198  // Unpack the inputs.
1199  if (exponent_type_ == ON_STACK) {
1200  Label base_is_smi;
1201  Label unpack_exponent;
1202 
1203  __ Pop(exponent_tagged, base_tagged);
1204 
1205  __ JumpIfSmi(base_tagged, &base_is_smi);
1206  __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
1207  // base_tagged is a heap number, so load its double value.
1208  __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
1209  __ B(&unpack_exponent);
1210  __ Bind(&base_is_smi);
1211  // base_tagged is a SMI, so untag it and convert it to a double.
1212  __ SmiUntagToDouble(base_double, base_tagged);
1213 
1214  __ Bind(&unpack_exponent);
1215  // x10 base_tagged The tagged base (input).
1216  // x11 exponent_tagged The tagged exponent (input).
1217  // d1 base_double The base as a double.
1218  __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
1219  __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
1220  // exponent_tagged is a heap number, so load its double value.
1221  __ Ldr(exponent_double,
1222  FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
1223  } else if (exponent_type_ == TAGGED) {
1224  __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
1225  __ Ldr(exponent_double,
1226  FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
1227  }
1228 
1229  // Handle double (heap number) exponents.
1230  if (exponent_type_ != INTEGER) {
1231  // Detect integer exponents stored as doubles and handle those in the
1232  // integer fast-path.
1233  __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
1234  scratch0_double, &exponent_is_integer);
1235 
1236  if (exponent_type_ == ON_STACK) {
1237  FPRegister half_double = d3;
1238  FPRegister minus_half_double = d4;
1239  // Detect square root case. Crankshaft detects constant +/-0.5 at compile
1240  // time and uses DoMathPowHalf instead. We then skip this check for
1241  // non-constant cases of +/-0.5 as these hardly occur.
1242 
1243  __ Fmov(minus_half_double, -0.5);
1244  __ Fmov(half_double, 0.5);
1245  __ Fcmp(minus_half_double, exponent_double);
1246  __ Fccmp(half_double, exponent_double, NZFlag, ne);
1247  // Condition flags at this point:
1248  // 0.5; nZCv // Identified by eq && pl
1249  // -0.5: NZcv // Identified by eq && mi
1250  // other: ?z?? // Identified by ne
1251  __ B(ne, &call_runtime);
1252 
1253  // The exponent is 0.5 or -0.5.
1254 
1255  // Given that exponent is known to be either 0.5 or -0.5, the following
1256  // special cases could apply (according to ECMA-262 15.8.2.13):
1257  //
1258  // base.isNaN(): The result is NaN.
1259  // (base == +INFINITY) || (base == -INFINITY)
1260  // exponent == 0.5: The result is +INFINITY.
1261  // exponent == -0.5: The result is +0.
1262  // (base == +0) || (base == -0)
1263  // exponent == 0.5: The result is +0.
1264  // exponent == -0.5: The result is +INFINITY.
1265  // (base < 0) && base.isFinite(): The result is NaN.
1266  //
1267  // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
1268  // where base is -INFINITY or -0.
1269 
1270  // Add +0 to base. This has no effect other than turning -0 into +0.
1271  __ Fadd(base_double, base_double, fp_zero);
1272  // The operation -0+0 results in +0 in all cases except where the
1273  // FPCR rounding mode is 'round towards minus infinity' (RM). The
1274  // ARM64 simulator does not currently simulate FPCR (where the rounding
1275  // mode is set), so test the operation with some debug code.
1276  if (masm->emit_debug_code()) {
1277  UseScratchRegisterScope temps(masm);
1278  Register temp = temps.AcquireX();
1279  __ Fneg(scratch0_double, fp_zero);
1280  // Verify that we correctly generated +0.0 and -0.0.
1281  // bits(+0.0) = 0x0000000000000000
1282  // bits(-0.0) = 0x8000000000000000
1283  __ Fmov(temp, fp_zero);
1284  __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
1285  __ Fmov(temp, scratch0_double);
1286  __ Eor(temp, temp, kDSignMask);
1287  __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
1288  // Check that -0.0 + 0.0 == +0.0.
1289  __ Fadd(scratch0_double, scratch0_double, fp_zero);
1290  __ Fmov(temp, scratch0_double);
1291  __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
1292  }
1293 
1294  // If base is -INFINITY, make it +INFINITY.
1295  // * Calculate base - base: All infinities will become NaNs since both
1296  // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
1297  // * If the result is NaN, calculate abs(base).
1298  __ Fsub(scratch0_double, base_double, base_double);
1299  __ Fcmp(scratch0_double, 0.0);
1300  __ Fabs(scratch1_double, base_double);
1301  __ Fcsel(base_double, scratch1_double, base_double, vs);
1302 
1303  // Calculate the square root of base.
1304  __ Fsqrt(result_double, base_double);
1305  __ Fcmp(exponent_double, 0.0);
1306  __ B(ge, &done); // Finish now for exponents of 0.5.
1307  // Find the inverse for exponents of -0.5.
1308  __ Fmov(scratch0_double, 1.0);
1309  __ Fdiv(result_double, scratch0_double, result_double);
1310  __ B(&done);
1311  }
1312 
1313  {
1314  AllowExternalCallThatCantCauseGC scope(masm);
1315  __ Mov(saved_lr, lr);
1316  __ CallCFunction(
1317  ExternalReference::power_double_double_function(masm->isolate()),
1318  0, 2);
1319  __ Mov(lr, saved_lr);
1320  __ B(&done);
1321  }
1322 
1323  // Handle SMI exponents.
1324  __ Bind(&exponent_is_smi);
1325  // x10 base_tagged The tagged base (input).
1326  // x11 exponent_tagged The tagged exponent (input).
1327  // d1 base_double The base as a double.
1328  __ SmiUntag(exponent_integer, exponent_tagged);
1329  }
1330 
1331  __ Bind(&exponent_is_integer);
1332  // x10 base_tagged The tagged base (input).
1333  // x11 exponent_tagged The tagged exponent (input).
1334  // x12 exponent_integer The exponent as an integer.
1335  // d1 base_double The base as a double.
1336 
1337  // Find abs(exponent). For negative exponents, we can find the inverse later.
1338  Register exponent_abs = x13;
1339  __ Cmp(exponent_integer, 0);
1340  __ Cneg(exponent_abs, exponent_integer, mi);
1341  // x13 exponent_abs The value of abs(exponent_integer).
1342 
1343  // Repeatedly multiply to calculate the power.
1344  // result = 1.0;
1345  // For each bit n (exponent_integer{n}) {
1346  // if (exponent_integer{n}) {
1347  // result *= base;
1348  // }
1349  // base *= base;
1350  // if (remaining bits in exponent_integer are all zero) {
1351  // break;
1352  // }
1353  // }
1354  Label power_loop, power_loop_entry, power_loop_exit;
1355  __ Fmov(scratch1_double, base_double);
1356  __ Fmov(base_double_copy, base_double);
1357  __ Fmov(result_double, 1.0);
1358  __ B(&power_loop_entry);
1359 
1360  __ Bind(&power_loop);
1361  __ Fmul(scratch1_double, scratch1_double, scratch1_double);
1362  __ Lsr(exponent_abs, exponent_abs, 1);
1363  __ Cbz(exponent_abs, &power_loop_exit);
1364 
1365  __ Bind(&power_loop_entry);
1366  __ Tbz(exponent_abs, 0, &power_loop);
1367  __ Fmul(result_double, result_double, scratch1_double);
1368  __ B(&power_loop);
1369 
1370  __ Bind(&power_loop_exit);
1371 
1372  // If the exponent was positive, result_double holds the result.
1373  __ Tbz(exponent_integer, kXSignBit, &done);
1374 
1375  // The exponent was negative, so find the inverse.
1376  __ Fmov(scratch0_double, 1.0);
1377  __ Fdiv(result_double, scratch0_double, result_double);
1378  // ECMA-262 only requires Math.pow to return an 'implementation-dependent
1379  // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
1380  // to calculate the subnormal value 2^-1074. This method of calculating
1381  // negative powers doesn't work because 2^1074 overflows to infinity. To
1382  // catch this corner-case, we bail out if the result was 0. (This can only
1383  // occur if the divisor is infinity or the base is zero.)
1384  __ Fcmp(result_double, 0.0);
1385  __ B(&done, ne);
1386 
1387  if (exponent_type_ == ON_STACK) {
1388  // Bail out to runtime code.
1389  __ Bind(&call_runtime);
1390  // Put the arguments back on the stack.
1391  __ Push(base_tagged, exponent_tagged);
1392  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1393 
1394  // Return.
1395  __ Bind(&done);
1396  __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
1397  __ Str(result_double,
1398  FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
1399  ASSERT(result_tagged.is(x0));
1400  __ IncrementCounter(
1401  masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
1402  __ Ret();
1403  } else {
1404  AllowExternalCallThatCantCauseGC scope(masm);
1405  __ Mov(saved_lr, lr);
1406  __ Fmov(base_double, base_double_copy);
1407  __ Scvtf(exponent_double, exponent_integer);
1408  __ CallCFunction(
1409  ExternalReference::power_double_double_function(masm->isolate()),
1410  0, 2);
1411  __ Mov(lr, saved_lr);
1412  __ Bind(&done);
1413  __ IncrementCounter(
1414  masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
1415  __ Ret();
1416  }
1417 }
1418 
1419 
1420 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1421  // It is important that the following stubs are generated in this order
1422  // because pregenerated stubs can only call other pregenerated stubs.
1423  // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
1424  // CEntryStub.
1433  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1434 }
1435 
1436 
1437 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1439  stub1.GetCode(isolate);
1441  stub2.GetCode(isolate);
1442 }
1443 
1444 
1445 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
1447  stub1.GetCode(isolate);
1449  stub2.GetCode(isolate);
1450 }
1451 
1452 
1453 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1454  // Floating-point code doesn't get special handling in ARM64, so there's
1455  // nothing to do here.
1456  USE(isolate);
1457 }
1458 
1459 
1460 bool CEntryStub::NeedsImmovableCode() {
1461  // CEntryStub stores the return address on the stack before calling into
1462  // C++ code. In some cases, the VM accesses this address, but it is not used
1463  // when the C++ code returns to the stub because LR holds the return address
1464  // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1465  // returning to dead code.
1466  // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1467  // find any comment to confirm this, and I don't hit any crashes whatever
1468  // this function returns. The anaylsis should be properly confirmed.
1469  return true;
1470 }
1471 
1472 
1473 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1474  CEntryStub stub(1, kDontSaveFPRegs);
1475  stub.GetCode(isolate);
1476  CEntryStub stub_fp(1, kSaveFPRegs);
1477  stub_fp.GetCode(isolate);
1478 }
1479 
1480 
1481 void CEntryStub::GenerateCore(MacroAssembler* masm,
1482  Label* throw_normal,
1483  Label* throw_termination,
1484  bool do_gc,
1485  bool always_allocate) {
1486  // x0 : Result parameter for PerformGC, if do_gc is true.
1487  // x21 : argv
1488  // x22 : argc
1489  // x23 : target
1490  //
1491  // The stack (on entry) holds the arguments and the receiver, with the
1492  // receiver at the highest address:
1493  //
1494  // argv[8]: receiver
1495  // argv -> argv[0]: arg[argc-2]
1496  // ... ...
1497  // argv[...]: arg[1]
1498  // argv[...]: arg[0]
1499  //
1500  // Immediately below (after) this is the exit frame, as constructed by
1501  // EnterExitFrame:
1502  // fp[8]: CallerPC (lr)
1503  // fp -> fp[0]: CallerFP (old fp)
1504  // fp[-8]: Space reserved for SPOffset.
1505  // fp[-16]: CodeObject()
1506  // csp[...]: Saved doubles, if saved_doubles is true.
1507  // csp[32]: Alignment padding, if necessary.
1508  // csp[24]: Preserved x23 (used for target).
1509  // csp[16]: Preserved x22 (used for argc).
1510  // csp[8]: Preserved x21 (used for argv).
1511  // csp -> csp[0]: Space reserved for the return address.
1512  //
1513  // After a successful call, the exit frame, preserved registers (x21-x23) and
1514  // the arguments (including the receiver) are dropped or popped as
1515  // appropriate. The stub then returns.
1516  //
1517  // After an unsuccessful call, the exit frame and suchlike are left
1518  // untouched, and the stub either throws an exception by jumping to one of
1519  // the provided throw_ labels, or it falls through. The failure details are
1520  // passed through in x0.
1521  ASSERT(csp.Is(__ StackPointer()));
1522 
1523  Isolate* isolate = masm->isolate();
1524 
1525  const Register& argv = x21;
1526  const Register& argc = x22;
1527  const Register& target = x23;
1528 
1529  if (do_gc) {
1530  // Call Runtime::PerformGC, passing x0 (the result parameter for
1531  // PerformGC) and x1 (the isolate).
1532  __ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
1533  __ CallCFunction(
1534  ExternalReference::perform_gc_function(isolate), 2, 0);
1535  }
1536 
1537  ExternalReference scope_depth =
1538  ExternalReference::heap_always_allocate_scope_depth(isolate);
1539  if (always_allocate) {
1540  __ Mov(x10, Operand(scope_depth));
1541  __ Ldr(x11, MemOperand(x10));
1542  __ Add(x11, x11, 1);
1543  __ Str(x11, MemOperand(x10));
1544  }
1545 
1546  // Prepare AAPCS64 arguments to pass to the builtin.
1547  __ Mov(x0, argc);
1548  __ Mov(x1, argv);
1549  __ Mov(x2, ExternalReference::isolate_address(isolate));
1550 
1551  // Store the return address on the stack, in the space previously allocated
1552  // by EnterExitFrame. The return address is queried by
1553  // ExitFrame::GetStateForFramePointer.
1554  Label return_location;
1555  __ Adr(x12, &return_location);
1556  __ Poke(x12, 0);
1557  if (__ emit_debug_code()) {
1558  // Verify that the slot below fp[kSPOffset]-8 points to the return location
1559  // (currently in x12).
1560  UseScratchRegisterScope temps(masm);
1561  Register temp = temps.AcquireX();
1563  __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
1564  __ Cmp(temp, x12);
1565  __ Check(eq, kReturnAddressNotFoundInFrame);
1566  }
1567 
1568  // Call the builtin.
1569  __ Blr(target);
1570  __ Bind(&return_location);
1571  const Register& result = x0;
1572 
1573  if (always_allocate) {
1574  __ Mov(x10, Operand(scope_depth));
1575  __ Ldr(x11, MemOperand(x10));
1576  __ Sub(x11, x11, 1);
1577  __ Str(x11, MemOperand(x10));
1578  }
1579 
1580  // x0 result The return code from the call.
1581  // x21 argv
1582  // x22 argc
1583  // x23 target
1584  //
1585  // If all of the result bits matching kFailureTagMask are '1', the result is
1586  // a failure. Otherwise, it's an ordinary tagged object and the call was a
1587  // success.
1588  Label failure;
1589  __ And(x10, result, kFailureTagMask);
1590  __ Cmp(x10, kFailureTagMask);
1591  __ B(&failure, eq);
1592 
1593  // The call succeeded, so unwind the stack and return.
1594 
1595  // Restore callee-saved registers x21-x23.
1596  __ Mov(x11, argc);
1597 
1598  __ Peek(argv, 1 * kPointerSize);
1599  __ Peek(argc, 2 * kPointerSize);
1600  __ Peek(target, 3 * kPointerSize);
1601 
1602  __ LeaveExitFrame(save_doubles_, x10, true);
1603  ASSERT(jssp.Is(__ StackPointer()));
1604  // Pop or drop the remaining stack slots and return from the stub.
1605  // jssp[24]: Arguments array (of size argc), including receiver.
1606  // jssp[16]: Preserved x23 (used for target).
1607  // jssp[8]: Preserved x22 (used for argc).
1608  // jssp[0]: Preserved x21 (used for argv).
1609  __ Drop(x11);
1610  __ Ret();
1611 
1612  // The stack pointer is still csp if we aren't returning, and the frame
1613  // hasn't changed (except for the return address).
1614  __ SetStackPointer(csp);
1615 
1616  __ Bind(&failure);
1617  // The call failed, so check if we need to throw an exception, and fall
1618  // through (to retry) otherwise.
1619 
1620  Label retry;
1621  // x0 result The return code from the call, including the failure
1622  // code and details.
1623  // x21 argv
1624  // x22 argc
1625  // x23 target
1626  // Refer to the Failure class for details of the bit layout.
1628  __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
1629  __ B(eq, &retry); // RETRY_AFTER_GC
1630 
1631  // Retrieve the pending exception.
1632  const Register& exception = result;
1633  const Register& exception_address = x11;
1634  __ Mov(exception_address,
1635  Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1636  isolate)));
1637  __ Ldr(exception, MemOperand(exception_address));
1638 
1639  // Clear the pending exception.
1640  __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
1641  __ Str(x10, MemOperand(exception_address));
1642 
1643  // x0 exception The exception descriptor.
1644  // x21 argv
1645  // x22 argc
1646  // x23 target
1647 
1648  // Special handling of termination exceptions, which are uncatchable by
1649  // JavaScript code.
1650  __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
1651  __ B(eq, throw_termination);
1652 
1653  // Handle normal exception.
1654  __ B(throw_normal);
1655 
1656  __ Bind(&retry);
1657  // The result (x0) is passed through as the next PerformGC parameter.
1658 }
1659 
1660 
1661 void CEntryStub::Generate(MacroAssembler* masm) {
1662  // The Abort mechanism relies on CallRuntime, which in turn relies on
1663  // CEntryStub, so until this stub has been generated, we have to use a
1664  // fall-back Abort mechanism.
1665  //
1666  // Note that this stub must be generated before any use of Abort.
1667  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1668 
1669  ASM_LOCATION("CEntryStub::Generate entry");
1671 
1672  // Register parameters:
1673  // x0: argc (including receiver, untagged)
1674  // x1: target
1675  //
1676  // The stack on entry holds the arguments and the receiver, with the receiver
1677  // at the highest address:
1678  //
1679  // jssp]argc-1]: receiver
1680  // jssp[argc-2]: arg[argc-2]
1681  // ... ...
1682  // jssp[1]: arg[1]
1683  // jssp[0]: arg[0]
1684  //
1685  // The arguments are in reverse order, so that arg[argc-2] is actually the
1686  // first argument to the target function and arg[0] is the last.
1687  ASSERT(jssp.Is(__ StackPointer()));
1688  const Register& argc_input = x0;
1689  const Register& target_input = x1;
1690 
1691  // Calculate argv, argc and the target address, and store them in
1692  // callee-saved registers so we can retry the call without having to reload
1693  // these arguments.
1694  // TODO(jbramley): If the first call attempt succeeds in the common case (as
1695  // it should), then we might be better off putting these parameters directly
1696  // into their argument registers, rather than using callee-saved registers and
1697  // preserving them on the stack.
1698  const Register& argv = x21;
1699  const Register& argc = x22;
1700  const Register& target = x23;
1701 
1702  // Derive argv from the stack pointer so that it points to the first argument
1703  // (arg[argc-2]), or just below the receiver in case there are no arguments.
1704  // - Adjust for the arg[] array.
1705  Register temp_argv = x11;
1706  __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1707  // - Adjust for the receiver.
1708  __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1709 
1710  // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
1711  // registers.
1712  FrameScope scope(masm, StackFrame::MANUAL);
1713  __ EnterExitFrame(save_doubles_, x10, 3);
1714  ASSERT(csp.Is(__ StackPointer()));
1715 
1716  // Poke callee-saved registers into reserved space.
1717  __ Poke(argv, 1 * kPointerSize);
1718  __ Poke(argc, 2 * kPointerSize);
1719  __ Poke(target, 3 * kPointerSize);
1720 
1721  // We normally only keep tagged values in callee-saved registers, as they
1722  // could be pushed onto the stack by called stubs and functions, and on the
1723  // stack they can confuse the GC. However, we're only calling C functions
1724  // which can push arbitrary data onto the stack anyway, and so the GC won't
1725  // examine that part of the stack.
1726  __ Mov(argc, argc_input);
1727  __ Mov(target, target_input);
1728  __ Mov(argv, temp_argv);
1729 
1730  Label throw_normal;
1731  Label throw_termination;
1732 
1733  // Call the runtime function.
1734  GenerateCore(masm,
1735  &throw_normal,
1736  &throw_termination,
1737  false,
1738  false);
1739 
1740  // If successful, the previous GenerateCore will have returned to the
1741  // calling code. Otherwise, we fall through into the following.
1742 
1743  // Do space-specific GC and retry runtime call.
1744  GenerateCore(masm,
1745  &throw_normal,
1746  &throw_termination,
1747  true,
1748  false);
1749 
1750  // Do full GC and retry runtime call one final time.
1751  __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
1752  GenerateCore(masm,
1753  &throw_normal,
1754  &throw_termination,
1755  true,
1756  true);
1757 
1758  { FrameScope scope(masm, StackFrame::MANUAL);
1759  __ CallCFunction(
1760  ExternalReference::out_of_memory_function(masm->isolate()), 0);
1761  }
1762 
1763  // We didn't execute a return case, so the stack frame hasn't been updated
1764  // (except for the return address slot). However, we don't need to initialize
1765  // jssp because the throw method will immediately overwrite it when it
1766  // unwinds the stack.
1767  __ SetStackPointer(jssp);
1768 
1769  // Throw exceptions.
1770  // If we throw an exception, we can end up re-entering CEntryStub before we
1771  // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
1772  // here.
1773 
1774  __ Bind(&throw_termination);
1775  ASM_LOCATION("Throw termination");
1776  __ Mov(argv, 0);
1777  __ Mov(argc, 0);
1778  __ Mov(target, 0);
1779  __ ThrowUncatchable(x0, x10, x11, x12, x13);
1780 
1781  __ Bind(&throw_normal);
1782  ASM_LOCATION("Throw normal");
1783  __ Mov(argv, 0);
1784  __ Mov(argc, 0);
1785  __ Mov(target, 0);
1786  __ Throw(x0, x10, x11, x12, x13);
1787 }
1788 
1789 
1790 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1791 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1792 // Input:
1793 // x0: code entry.
1794 // x1: function.
1795 // x2: receiver.
1796 // x3: argc.
1797 // x4: argv.
1798 // Output:
1799 // x0: result.
1800 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1801  ASSERT(jssp.Is(__ StackPointer()));
1802  Register code_entry = x0;
1803 
1804  // Enable instruction instrumentation. This only works on the simulator, and
1805  // will have no effect on the model or real hardware.
1806  __ EnableInstrumentation();
1807 
1808  Label invoke, handler_entry, exit;
1809 
1810  // Push callee-saved registers and synchronize the system stack pointer (csp)
1811  // and the JavaScript stack pointer (jssp).
1812  //
1813  // We must not write to jssp until after the PushCalleeSavedRegisters()
1814  // call, since jssp is itself a callee-saved register.
1815  __ SetStackPointer(csp);
1816  __ PushCalleeSavedRegisters();
1817  __ Mov(jssp, csp);
1818  __ SetStackPointer(jssp);
1819 
1821 
1822  // Set up the reserved register for 0.0.
1823  __ Fmov(fp_zero, 0.0);
1824 
1825  // Build an entry frame (see layout below).
1826  Isolate* isolate = masm->isolate();
1827 
1828  // Build an entry frame.
1829  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1830  int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1831  __ Mov(x13, bad_frame_pointer);
1832  __ Mov(x12, Smi::FromInt(marker));
1833  __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
1834  __ Ldr(x10, MemOperand(x11));
1835 
1836  __ Push(x13, xzr, x12, x10);
1837  // Set up fp.
1839 
1840  // Push the JS entry frame marker. Also set js_entry_sp if this is the
1841  // outermost JS call.
1842  Label non_outermost_js, done;
1843  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1844  __ Mov(x10, ExternalReference(js_entry_sp));
1845  __ Ldr(x11, MemOperand(x10));
1846  __ Cbnz(x11, &non_outermost_js);
1847  __ Str(fp, MemOperand(x10));
1848  __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1849  __ Push(x12);
1850  __ B(&done);
1851  __ Bind(&non_outermost_js);
1852  // We spare one instruction by pushing xzr since the marker is 0.
1853  ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1854  __ Push(xzr);
1855  __ Bind(&done);
1856 
1857  // The frame set up looks like this:
1858  // jssp[0] : JS entry frame marker.
1859  // jssp[1] : C entry FP.
1860  // jssp[2] : stack frame marker.
1861  // jssp[3] : stack frmae marker.
1862  // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1863 
1864 
1865  // Jump to a faked try block that does the invoke, with a faked catch
1866  // block that sets the pending exception.
1867  __ B(&invoke);
1868 
1869  // Prevent the constant pool from being emitted between the record of the
1870  // handler_entry position and the first instruction of the sequence here.
1871  // There is no risk because Assembler::Emit() emits the instruction before
1872  // checking for constant pool emission, but we do not want to depend on
1873  // that.
1874  {
1875  Assembler::BlockPoolsScope block_pools(masm);
1876  __ bind(&handler_entry);
1877  handler_offset_ = handler_entry.pos();
1878  // Caught exception: Store result (exception) in the pending exception
1879  // field in the JSEnv and return a failure sentinel. Coming in here the
1880  // fp will be invalid because the PushTryHandler below sets it to 0 to
1881  // signal the existence of the JSEntry frame.
1882  __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1883  isolate)));
1884  }
1885  __ Str(code_entry, MemOperand(x10));
1886  __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
1887  __ B(&exit);
1888 
1889  // Invoke: Link this frame into the handler chain. There's only one
1890  // handler block in this code object, so its index is 0.
1891  __ Bind(&invoke);
1892  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1893  // If an exception not caught by another handler occurs, this handler
1894  // returns control to the code after the B(&invoke) above, which
1895  // restores all callee-saved registers (including cp and fp) to their
1896  // saved values before returning a failure to C.
1897 
1898  // Clear any pending exceptions.
1899  __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
1900  __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1901  isolate)));
1902  __ Str(x10, MemOperand(x11));
1903 
1904  // Invoke the function by calling through the JS entry trampoline builtin.
1905  // Notice that we cannot store a reference to the trampoline code directly in
1906  // this stub, because runtime stubs are not traversed when doing GC.
1907 
1908  // Expected registers by Builtins::JSEntryTrampoline
1909  // x0: code entry.
1910  // x1: function.
1911  // x2: receiver.
1912  // x3: argc.
1913  // x4: argv.
1914  ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
1915  : Builtins::kJSEntryTrampoline,
1916  isolate);
1917  __ Mov(x10, entry);
1918 
1919  // Call the JSEntryTrampoline.
1920  __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1921  __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1922  __ Blr(x12);
1923 
1924  // Unlink this frame from the handler chain.
1925  __ PopTryHandler();
1926 
1927 
1928  __ Bind(&exit);
1929  // x0 holds the result.
1930  // The stack pointer points to the top of the entry frame pushed on entry from
1931  // C++ (at the beginning of this stub):
1932  // jssp[0] : JS entry frame marker.
1933  // jssp[1] : C entry FP.
1934  // jssp[2] : stack frame marker.
1935  // jssp[3] : stack frmae marker.
1936  // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1937 
1938  // Check if the current stack frame is marked as the outermost JS frame.
1939  Label non_outermost_js_2;
1940  __ Pop(x10);
1941  __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1942  __ B(ne, &non_outermost_js_2);
1943  __ Mov(x11, ExternalReference(js_entry_sp));
1944  __ Str(xzr, MemOperand(x11));
1945  __ Bind(&non_outermost_js_2);
1946 
1947  // Restore the top frame descriptors from the stack.
1948  __ Pop(x10);
1949  __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate));
1950  __ Str(x10, MemOperand(x11));
1951 
1952  // Reset the stack to the callee saved registers.
1954  // Restore the callee-saved registers and return.
1955  ASSERT(jssp.Is(__ StackPointer()));
1956  __ Mov(csp, jssp);
1957  __ SetStackPointer(csp);
1958  __ PopCalleeSavedRegisters();
1959  // After this point, we must not modify jssp because it is a callee-saved
1960  // register which we have just restored.
1961  __ Ret();
1962 }
1963 
1964 
1965 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1966  Label miss;
1967  Register receiver;
1968  if (kind() == Code::KEYED_LOAD_IC) {
1969  // ----------- S t a t e -------------
1970  // -- lr : return address
1971  // -- x1 : receiver
1972  // -- x0 : key
1973  // -----------------------------------
1974  Register key = x0;
1975  receiver = x1;
1976  __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
1977  __ B(ne, &miss);
1978  } else {
1979  ASSERT(kind() == Code::LOAD_IC);
1980  // ----------- S t a t e -------------
1981  // -- lr : return address
1982  // -- x2 : name
1983  // -- x0 : receiver
1984  // -- sp[0] : receiver
1985  // -----------------------------------
1986  receiver = x0;
1987  }
1988 
1989  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
1990 
1991  __ Bind(&miss);
1992  StubCompiler::TailCallBuiltin(masm,
1994 }
1995 
1996 
1997 void InstanceofStub::Generate(MacroAssembler* masm) {
1998  // Stack on entry:
1999  // jssp[0]: function.
2000  // jssp[8]: object.
2001  //
2002  // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
2003  // instanceof.
2004 
2005  Register result = x0;
2006  Register function = right();
2007  Register object = left();
2008  Register scratch1 = x6;
2009  Register scratch2 = x7;
2010  Register res_true = x8;
2011  Register res_false = x9;
2012  // Only used if there was an inline map check site. (See
2013  // LCodeGen::DoInstanceOfKnownGlobal().)
2014  Register map_check_site = x4;
2015  // Delta for the instructions generated between the inline map check and the
2016  // instruction setting the result.
2017  const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
2018 
2019  Label not_js_object, slow;
2020 
2021  if (!HasArgsInRegisters()) {
2022  __ Pop(function, object);
2023  }
2024 
2025  if (ReturnTrueFalseObject()) {
2026  __ LoadTrueFalseRoots(res_true, res_false);
2027  } else {
2028  // This is counter-intuitive, but correct.
2029  __ Mov(res_true, Smi::FromInt(0));
2030  __ Mov(res_false, Smi::FromInt(1));
2031  }
2032 
2033  // Check that the left hand side is a JS object and load its map as a side
2034  // effect.
2035  Register map = x12;
2036  __ JumpIfSmi(object, &not_js_object);
2037  __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
2038 
2039  // If there is a call site cache, don't look in the global cache, but do the
2040  // real lookup and update the call site cache.
2041  if (!HasCallSiteInlineCheck()) {
2042  Label miss;
2043  __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
2044  __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
2045  __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
2046  __ Ret();
2047  __ Bind(&miss);
2048  }
2049 
2050  // Get the prototype of the function.
2051  Register prototype = x13;
2052  __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
2054 
2055  // Check that the function prototype is a JS object.
2056  __ JumpIfSmi(prototype, &slow);
2057  __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
2058 
2059  // Update the global instanceof or call site inlined cache with the current
2060  // map and function. The cached answer will be set when it is known below.
2061  if (HasCallSiteInlineCheck()) {
2062  // Patch the (relocated) inlined map check.
2063  __ GetRelocatedValueLocation(map_check_site, scratch1);
2064  // We have a cell, so need another level of dereferencing.
2065  __ Ldr(scratch1, MemOperand(scratch1));
2066  __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
2067  } else {
2068  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2069  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2070  }
2071 
2072  Label return_true, return_result;
2073  {
2074  // Loop through the prototype chain looking for the function prototype.
2075  Register chain_map = x1;
2076  Register chain_prototype = x14;
2077  Register null_value = x15;
2078  Label loop;
2079  __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
2080  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2081  // Speculatively set a result.
2082  __ Mov(result, res_false);
2083 
2084  __ Bind(&loop);
2085 
2086  // If the chain prototype is the object prototype, return true.
2087  __ Cmp(chain_prototype, prototype);
2088  __ B(eq, &return_true);
2089 
2090  // If the chain prototype is null, we've reached the end of the chain, so
2091  // return false.
2092  __ Cmp(chain_prototype, null_value);
2093  __ B(eq, &return_result);
2094 
2095  // Otherwise, load the next prototype in the chain, and loop.
2096  __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
2097  __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
2098  __ B(&loop);
2099  }
2100 
2101  // Return sequence when no arguments are on the stack.
2102  // We cannot fall through to here.
2103  __ Bind(&return_true);
2104  __ Mov(result, res_true);
2105  __ Bind(&return_result);
2106  if (HasCallSiteInlineCheck()) {
2107  ASSERT(ReturnTrueFalseObject());
2108  __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
2109  __ GetRelocatedValueLocation(map_check_site, scratch2);
2110  __ Str(result, MemOperand(scratch2));
2111  } else {
2112  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
2113  }
2114  __ Ret();
2115 
2116  Label object_not_null, object_not_null_or_smi;
2117 
2118  __ Bind(&not_js_object);
2119  Register object_type = x14;
2120  // x0 result result return register (uninit)
2121  // x10 function pointer to function
2122  // x11 object pointer to object
2123  // x14 object_type type of object (uninit)
2124 
2125  // Before null, smi and string checks, check that the rhs is a function.
2126  // For a non-function rhs, an exception must be thrown.
2127  __ JumpIfSmi(function, &slow);
2128  __ JumpIfNotObjectType(
2129  function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
2130 
2131  __ Mov(result, res_false);
2132 
2133  // Null is not instance of anything.
2134  __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
2135  __ B(ne, &object_not_null);
2136  __ Ret();
2137 
2138  __ Bind(&object_not_null);
2139  // Smi values are not instances of anything.
2140  __ JumpIfNotSmi(object, &object_not_null_or_smi);
2141  __ Ret();
2142 
2143  __ Bind(&object_not_null_or_smi);
2144  // String values are not instances of anything.
2145  __ IsObjectJSStringType(object, scratch2, &slow);
2146  __ Ret();
2147 
2148  // Slow-case. Tail call builtin.
2149  __ Bind(&slow);
2150  {
2151  FrameScope scope(masm, StackFrame::INTERNAL);
2152  // Arguments have either been passed into registers or have been previously
2153  // popped. We need to push them before calling builtin.
2154  __ Push(object, function);
2155  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2156  }
2157  if (ReturnTrueFalseObject()) {
2158  // Reload true/false because they were clobbered in the builtin call.
2159  __ LoadTrueFalseRoots(res_true, res_false);
2160  __ Cmp(result, 0);
2161  __ Csel(result, res_true, res_false, eq);
2162  }
2163  __ Ret();
2164 }
2165 
2166 
2167 Register InstanceofStub::left() {
2168  // Object to check (instanceof lhs).
2169  return x11;
2170 }
2171 
2172 
2173 Register InstanceofStub::right() {
2174  // Constructor function (instanceof rhs).
2175  return x10;
2176 }
2177 
2178 
2179 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2180  Register arg_count = x0;
2181  Register key = x1;
2182 
2183  // The displacement is the offset of the last parameter (if any) relative
2184  // to the frame pointer.
2185  static const int kDisplacement =
2187 
2188  // Check that the key is a smi.
2189  Label slow;
2190  __ JumpIfNotSmi(key, &slow);
2191 
2192  // Check if the calling frame is an arguments adaptor frame.
2193  Register local_fp = x11;
2194  Register caller_fp = x11;
2195  Register caller_ctx = x12;
2196  Label skip_adaptor;
2198  __ Ldr(caller_ctx, MemOperand(caller_fp,
2200  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2201  __ Csel(local_fp, fp, caller_fp, ne);
2202  __ B(ne, &skip_adaptor);
2203 
2204  // Load the actual arguments limit found in the arguments adaptor frame.
2205  __ Ldr(arg_count, MemOperand(caller_fp,
2207  __ Bind(&skip_adaptor);
2208 
2209  // Check index against formal parameters count limit. Use unsigned comparison
2210  // to get negative check for free: branch if key < 0 or key >= arg_count.
2211  __ Cmp(key, arg_count);
2212  __ B(hs, &slow);
2213 
2214  // Read the argument from the stack and return it.
2215  __ Sub(x10, arg_count, key);
2216  __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
2217  __ Ldr(x0, MemOperand(x10, kDisplacement));
2218  __ Ret();
2219 
2220  // Slow case: handle non-smi or out-of-bounds access to arguments by calling
2221  // the runtime system.
2222  __ Bind(&slow);
2223  __ Push(key);
2224  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2225 }
2226 
2227 
2228 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
2229  // Stack layout on entry.
2230  // jssp[0]: number of parameters (tagged)
2231  // jssp[8]: address of receiver argument
2232  // jssp[16]: function
2233 
2234  // Check if the calling frame is an arguments adaptor frame.
2235  Label runtime;
2236  Register caller_fp = x10;
2238  // Load and untag the context.
2241  (kSmiShift / kBitsPerByte)));
2243  __ B(ne, &runtime);
2244 
2245  // Patch the arguments.length and parameters pointer in the current frame.
2246  __ Ldr(x11, MemOperand(caller_fp,
2248  __ Poke(x11, 0 * kXRegSize);
2249  __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
2251  __ Poke(x10, 1 * kXRegSize);
2252 
2253  __ Bind(&runtime);
2254  __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2255 }
2256 
2257 
2258 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
2259  // Stack layout on entry.
2260  // jssp[0]: number of parameters (tagged)
2261  // jssp[8]: address of receiver argument
2262  // jssp[16]: function
2263  //
2264  // Returns pointer to result object in x0.
2265 
2266  // Note: arg_count_smi is an alias of param_count_smi.
2267  Register arg_count_smi = x3;
2268  Register param_count_smi = x3;
2269  Register param_count = x7;
2270  Register recv_arg = x14;
2271  Register function = x4;
2272  __ Pop(param_count_smi, recv_arg, function);
2273  __ SmiUntag(param_count, param_count_smi);
2274 
2275  // Check if the calling frame is an arguments adaptor frame.
2276  Register caller_fp = x11;
2277  Register caller_ctx = x12;
2278  Label runtime;
2279  Label adaptor_frame, try_allocate;
2281  __ Ldr(caller_ctx, MemOperand(caller_fp,
2283  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2284  __ B(eq, &adaptor_frame);
2285 
2286  // No adaptor, parameter count = argument count.
2287 
2288  // x1 mapped_params number of mapped params, min(params, args) (uninit)
2289  // x2 arg_count number of function arguments (uninit)
2290  // x3 arg_count_smi number of function arguments (smi)
2291  // x4 function function pointer
2292  // x7 param_count number of function parameters
2293  // x11 caller_fp caller's frame pointer
2294  // x14 recv_arg pointer to receiver arguments
2295 
2296  Register arg_count = x2;
2297  __ Mov(arg_count, param_count);
2298  __ B(&try_allocate);
2299 
2300  // We have an adaptor frame. Patch the parameters pointer.
2301  __ Bind(&adaptor_frame);
2302  __ Ldr(arg_count_smi,
2303  MemOperand(caller_fp,
2305  __ SmiUntag(arg_count, arg_count_smi);
2306  __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
2307  __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
2308 
2309  // Compute the mapped parameter count = min(param_count, arg_count)
2310  Register mapped_params = x1;
2311  __ Cmp(param_count, arg_count);
2312  __ Csel(mapped_params, param_count, arg_count, lt);
2313 
2314  __ Bind(&try_allocate);
2315 
2316  // x0 alloc_obj pointer to allocated objects: param map, backing
2317  // store, arguments (uninit)
2318  // x1 mapped_params number of mapped parameters, min(params, args)
2319  // x2 arg_count number of function arguments
2320  // x3 arg_count_smi number of function arguments (smi)
2321  // x4 function function pointer
2322  // x7 param_count number of function parameters
2323  // x10 size size of objects to allocate (uninit)
2324  // x14 recv_arg pointer to receiver arguments
2325 
2326  // Compute the size of backing store, parameter map, and arguments object.
2327  // 1. Parameter map, has two extra words containing context and backing
2328  // store.
2329  const int kParameterMapHeaderSize =
2331 
2332  // Calculate the parameter map size, assuming it exists.
2333  Register size = x10;
2334  __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
2335  __ Add(size, size, kParameterMapHeaderSize);
2336 
2337  // If there are no mapped parameters, set the running size total to zero.
2338  // Otherwise, use the parameter map size calculated earlier.
2339  __ Cmp(mapped_params, 0);
2340  __ CzeroX(size, eq);
2341 
2342  // 2. Add the size of the backing store and arguments object.
2343  __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
2344  __ Add(size, size,
2346 
2347  // Do the allocation of all three objects in one go. Assign this to x0, as it
2348  // will be returned to the caller.
2349  Register alloc_obj = x0;
2350  __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
2351 
2352  // Get the arguments boilerplate from the current (global) context.
2353 
2354  // x0 alloc_obj pointer to allocated objects (param map, backing
2355  // store, arguments)
2356  // x1 mapped_params number of mapped parameters, min(params, args)
2357  // x2 arg_count number of function arguments
2358  // x3 arg_count_smi number of function arguments (smi)
2359  // x4 function function pointer
2360  // x7 param_count number of function parameters
2361  // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
2362  // x14 recv_arg pointer to receiver arguments
2363 
2364  Register global_object = x10;
2365  Register global_ctx = x10;
2366  Register args_offset = x11;
2367  Register aliased_args_offset = x10;
2368  __ Ldr(global_object, GlobalObjectMemOperand());
2369  __ Ldr(global_ctx, FieldMemOperand(global_object,
2371 
2372  __ Ldr(args_offset,
2373  ContextMemOperand(global_ctx,
2375  __ Ldr(aliased_args_offset,
2376  ContextMemOperand(global_ctx,
2378  __ Cmp(mapped_params, 0);
2379  __ CmovX(args_offset, aliased_args_offset, ne);
2380 
2381  // Copy the JS object part.
2382  __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
2383  JSObject::kHeaderSize / kPointerSize);
2384 
2385  // Set up the callee in-object property.
2387  const int kCalleeOffset = JSObject::kHeaderSize +
2389  __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
2390 
2391  // Use the length and set that as an in-object property.
2393  const int kLengthOffset = JSObject::kHeaderSize +
2395  __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2396 
2397  // Set up the elements pointer in the allocated arguments object.
2398  // If we allocated a parameter map, "elements" will point there, otherwise
2399  // it will point to the backing store.
2400 
2401  // x0 alloc_obj pointer to allocated objects (param map, backing
2402  // store, arguments)
2403  // x1 mapped_params number of mapped parameters, min(params, args)
2404  // x2 arg_count number of function arguments
2405  // x3 arg_count_smi number of function arguments (smi)
2406  // x4 function function pointer
2407  // x5 elements pointer to parameter map or backing store (uninit)
2408  // x6 backing_store pointer to backing store (uninit)
2409  // x7 param_count number of function parameters
2410  // x14 recv_arg pointer to receiver arguments
2411 
2412  Register elements = x5;
2413  __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
2414  __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2415 
2416  // Initialize parameter map. If there are no mapped arguments, we're done.
2417  Label skip_parameter_map;
2418  __ Cmp(mapped_params, 0);
2419  // Set up backing store address, because it is needed later for filling in
2420  // the unmapped arguments.
2421  Register backing_store = x6;
2422  __ CmovX(backing_store, elements, eq);
2423  __ B(eq, &skip_parameter_map);
2424 
2425  __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
2426  __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2427  __ Add(x10, mapped_params, 2);
2428  __ SmiTag(x10);
2429  __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
2430  __ Str(cp, FieldMemOperand(elements,
2431  FixedArray::kHeaderSize + 0 * kPointerSize));
2432  __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
2433  __ Add(x10, x10, kParameterMapHeaderSize);
2434  __ Str(x10, FieldMemOperand(elements,
2435  FixedArray::kHeaderSize + 1 * kPointerSize));
2436 
2437  // Copy the parameter slots and the holes in the arguments.
2438  // We need to fill in mapped_parameter_count slots. Then index the context,
2439  // where parameters are stored in reverse order, at:
2440  //
2441  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
2442  //
2443  // The mapped parameter thus needs to get indices:
2444  //
2445  // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
2446  // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
2447  //
2448  // We loop from right to left.
2449 
2450  // x0 alloc_obj pointer to allocated objects (param map, backing
2451  // store, arguments)
2452  // x1 mapped_params number of mapped parameters, min(params, args)
2453  // x2 arg_count number of function arguments
2454  // x3 arg_count_smi number of function arguments (smi)
2455  // x4 function function pointer
2456  // x5 elements pointer to parameter map or backing store (uninit)
2457  // x6 backing_store pointer to backing store (uninit)
2458  // x7 param_count number of function parameters
2459  // x11 loop_count parameter loop counter (uninit)
2460  // x12 index parameter index (smi, uninit)
2461  // x13 the_hole hole value (uninit)
2462  // x14 recv_arg pointer to receiver arguments
2463 
2464  Register loop_count = x11;
2465  Register index = x12;
2466  Register the_hole = x13;
2467  Label parameters_loop, parameters_test;
2468  __ Mov(loop_count, mapped_params);
2469  __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
2470  __ Sub(index, index, mapped_params);
2471  __ SmiTag(index);
2472  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
2473  __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
2474  __ Add(backing_store, backing_store, kParameterMapHeaderSize);
2475 
2476  __ B(&parameters_test);
2477 
2478  __ Bind(&parameters_loop);
2479  __ Sub(loop_count, loop_count, 1);
2480  __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
2481  __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
2482  __ Str(index, MemOperand(elements, x10));
2483  __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
2484  __ Str(the_hole, MemOperand(backing_store, x10));
2485  __ Add(index, index, Smi::FromInt(1));
2486  __ Bind(&parameters_test);
2487  __ Cbnz(loop_count, &parameters_loop);
2488 
2489  __ Bind(&skip_parameter_map);
2490  // Copy arguments header and remaining slots (if there are any.)
2491  __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2492  __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
2493  __ Str(arg_count_smi, FieldMemOperand(backing_store,
2495 
2496  // x0 alloc_obj pointer to allocated objects (param map, backing
2497  // store, arguments)
2498  // x1 mapped_params number of mapped parameters, min(params, args)
2499  // x2 arg_count number of function arguments
2500  // x4 function function pointer
2501  // x3 arg_count_smi number of function arguments (smi)
2502  // x6 backing_store pointer to backing store (uninit)
2503  // x14 recv_arg pointer to receiver arguments
2504 
2505  Label arguments_loop, arguments_test;
2506  __ Mov(x10, mapped_params);
2507  __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
2508  __ B(&arguments_test);
2509 
2510  __ Bind(&arguments_loop);
2511  __ Sub(recv_arg, recv_arg, kPointerSize);
2512  __ Ldr(x11, MemOperand(recv_arg));
2513  __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
2514  __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
2515  __ Add(x10, x10, 1);
2516 
2517  __ Bind(&arguments_test);
2518  __ Cmp(x10, arg_count);
2519  __ B(lt, &arguments_loop);
2520 
2521  __ Ret();
2522 
2523  // Do the runtime call to allocate the arguments object.
2524  __ Bind(&runtime);
2525  __ Push(function, recv_arg, arg_count_smi);
2526  __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
2527 }
2528 
2529 
2530 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2531  // Stack layout on entry.
2532  // jssp[0]: number of parameters (tagged)
2533  // jssp[8]: address of receiver argument
2534  // jssp[16]: function
2535  //
2536  // Returns pointer to result object in x0.
2537 
2538  // Get the stub arguments from the frame, and make an untagged copy of the
2539  // parameter count.
2540  Register param_count_smi = x1;
2541  Register params = x2;
2542  Register function = x3;
2543  Register param_count = x13;
2544  __ Pop(param_count_smi, params, function);
2545  __ SmiUntag(param_count, param_count_smi);
2546 
2547  // Test if arguments adaptor needed.
2548  Register caller_fp = x11;
2549  Register caller_ctx = x12;
2550  Label try_allocate, runtime;
2552  __ Ldr(caller_ctx, MemOperand(caller_fp,
2554  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2555  __ B(ne, &try_allocate);
2556 
2557  // x1 param_count_smi number of parameters passed to function (smi)
2558  // x2 params pointer to parameters
2559  // x3 function function pointer
2560  // x11 caller_fp caller's frame pointer
2561  // x13 param_count number of parameters passed to function
2562 
2563  // Patch the argument length and parameters pointer.
2564  __ Ldr(param_count_smi,
2565  MemOperand(caller_fp,
2567  __ SmiUntag(param_count, param_count_smi);
2568  __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2569  __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2570 
2571  // Try the new space allocation. Start out with computing the size of the
2572  // arguments object and the elements array in words.
2573  Register size = x10;
2574  __ Bind(&try_allocate);
2575  __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2576  __ Cmp(param_count, 0);
2577  __ CzeroX(size, eq);
2578  __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
2579 
2580  // Do the allocation of both objects in one go. Assign this to x0, as it will
2581  // be returned to the caller.
2582  Register alloc_obj = x0;
2583  __ Allocate(size, alloc_obj, x11, x12, &runtime,
2584  static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2585 
2586  // Get the arguments boilerplate from the current (native) context.
2587  Register global_object = x10;
2588  Register global_ctx = x10;
2589  Register args_offset = x4;
2590  __ Ldr(global_object, GlobalObjectMemOperand());
2591  __ Ldr(global_ctx, FieldMemOperand(global_object,
2593  __ Ldr(args_offset,
2594  ContextMemOperand(global_ctx,
2596 
2597  // x0 alloc_obj pointer to allocated objects: parameter array and
2598  // arguments object
2599  // x1 param_count_smi number of parameters passed to function (smi)
2600  // x2 params pointer to parameters
2601  // x3 function function pointer
2602  // x4 args_offset offset to arguments boilerplate
2603  // x13 param_count number of parameters passed to function
2604 
2605  // Copy the JS object part.
2606  __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
2607  JSObject::kHeaderSize / kPointerSize);
2608 
2609  // Set the smi-tagged length as an in-object property.
2611  const int kLengthOffset = JSObject::kHeaderSize +
2613  __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2614 
2615  // If there are no actual arguments, we're done.
2616  Label done;
2617  __ Cbz(param_count, &done);
2618 
2619  // Set up the elements pointer in the allocated arguments object and
2620  // initialize the header in the elements fixed array.
2621  Register elements = x5;
2622  __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
2623  __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2624  __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2625  __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2626  __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2627 
2628  // x0 alloc_obj pointer to allocated objects: parameter array and
2629  // arguments object
2630  // x1 param_count_smi number of parameters passed to function (smi)
2631  // x2 params pointer to parameters
2632  // x3 function function pointer
2633  // x4 array pointer to array slot (uninit)
2634  // x5 elements pointer to elements array of alloc_obj
2635  // x13 param_count number of parameters passed to function
2636 
2637  // Copy the fixed array slots.
2638  Label loop;
2639  Register array = x4;
2640  // Set up pointer to first array slot.
2641  __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
2642 
2643  __ Bind(&loop);
2644  // Pre-decrement the parameters pointer by kPointerSize on each iteration.
2645  // Pre-decrement in order to skip receiver.
2646  __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
2647  // Post-increment elements by kPointerSize on each iteration.
2648  __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
2649  __ Sub(param_count, param_count, 1);
2650  __ Cbnz(param_count, &loop);
2651 
2652  // Return from stub.
2653  __ Bind(&done);
2654  __ Ret();
2655 
2656  // Do the runtime call to allocate the arguments object.
2657  __ Bind(&runtime);
2658  __ Push(function, params, param_count_smi);
2659  __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
2660 }
2661 
2662 
2663 void RegExpExecStub::Generate(MacroAssembler* masm) {
2664 #ifdef V8_INTERPRETED_REGEXP
2665  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
2666 #else // V8_INTERPRETED_REGEXP
2667 
2668  // Stack frame on entry.
2669  // jssp[0]: last_match_info (expected JSArray)
2670  // jssp[8]: previous index
2671  // jssp[16]: subject string
2672  // jssp[24]: JSRegExp object
2673  Label runtime;
2674 
2675  // Use of registers for this function.
2676 
2677  // Variable registers:
2678  // x10-x13 used as scratch registers
2679  // w0 string_type type of subject string
2680  // x2 jsstring_length subject string length
2681  // x3 jsregexp_object JSRegExp object
2682  // w4 string_encoding ASCII or UC16
2683  // w5 sliced_string_offset if the string is a SlicedString
2684  // offset to the underlying string
2685  // w6 string_representation groups attributes of the string:
2686  // - is a string
2687  // - type of the string
2688  // - is a short external string
2689  Register string_type = w0;
2690  Register jsstring_length = x2;
2691  Register jsregexp_object = x3;
2692  Register string_encoding = w4;
2693  Register sliced_string_offset = w5;
2694  Register string_representation = w6;
2695 
2696  // These are in callee save registers and will be preserved by the call
2697  // to the native RegExp code, as this code is called using the normal
2698  // C calling convention. When calling directly from generated code the
2699  // native RegExp code will not do a GC and therefore the content of
2700  // these registers are safe to use after the call.
2701 
2702  // x19 subject subject string
2703  // x20 regexp_data RegExp data (FixedArray)
2704  // x21 last_match_info_elements info relative to the last match
2705  // (FixedArray)
2706  // x22 code_object generated regexp code
2707  Register subject = x19;
2708  Register regexp_data = x20;
2709  Register last_match_info_elements = x21;
2710  Register code_object = x22;
2711 
2712  // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
2713  CPURegList used_callee_saved_registers(subject,
2714  regexp_data,
2715  last_match_info_elements,
2716  code_object);
2717  __ PushCPURegList(used_callee_saved_registers);
2718 
2719  // Stack frame.
2720  // jssp[0] : x19
2721  // jssp[8] : x20
2722  // jssp[16]: x21
2723  // jssp[24]: x22
2724  // jssp[32]: last_match_info (JSArray)
2725  // jssp[40]: previous index
2726  // jssp[48]: subject string
2727  // jssp[56]: JSRegExp object
2728 
2729  const int kLastMatchInfoOffset = 4 * kPointerSize;
2730  const int kPreviousIndexOffset = 5 * kPointerSize;
2731  const int kSubjectOffset = 6 * kPointerSize;
2732  const int kJSRegExpOffset = 7 * kPointerSize;
2733 
2734  // Ensure that a RegExp stack is allocated.
2735  Isolate* isolate = masm->isolate();
2736  ExternalReference address_of_regexp_stack_memory_address =
2737  ExternalReference::address_of_regexp_stack_memory_address(isolate);
2738  ExternalReference address_of_regexp_stack_memory_size =
2739  ExternalReference::address_of_regexp_stack_memory_size(isolate);
2740  __ Mov(x10, address_of_regexp_stack_memory_size);
2741  __ Ldr(x10, MemOperand(x10));
2742  __ Cbz(x10, &runtime);
2743 
2744  // Check that the first argument is a JSRegExp object.
2745  ASSERT(jssp.Is(__ StackPointer()));
2746  __ Peek(jsregexp_object, kJSRegExpOffset);
2747  __ JumpIfSmi(jsregexp_object, &runtime);
2748  __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
2749 
2750  // Check that the RegExp has been compiled (data contains a fixed array).
2751  __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
2752  if (FLAG_debug_code) {
2753  STATIC_ASSERT(kSmiTag == 0);
2754  __ Tst(regexp_data, kSmiTagMask);
2755  __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2756  __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
2757  __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2758  }
2759 
2760  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2761  __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2762  __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
2763  __ B(ne, &runtime);
2764 
2765  // Check that the number of captures fit in the static offsets vector buffer.
2766  // We have always at least one capture for the whole match, plus additional
2767  // ones due to capturing parentheses. A capture takes 2 registers.
2768  // The number of capture registers then is (number_of_captures + 1) * 2.
2769  __ Ldrsw(x10,
2770  UntagSmiFieldMemOperand(regexp_data,
2772  // Check (number_of_captures + 1) * 2 <= offsets vector size
2773  // number_of_captures * 2 <= offsets vector size - 2
2775  __ Add(x10, x10, x10);
2777  __ B(hi, &runtime);
2778 
2779  // Initialize offset for possibly sliced string.
2780  __ Mov(sliced_string_offset, 0);
2781 
2782  ASSERT(jssp.Is(__ StackPointer()));
2783  __ Peek(subject, kSubjectOffset);
2784  __ JumpIfSmi(subject, &runtime);
2785 
2786  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2787  __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2788 
2789  __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
2790 
2791  // Handle subject string according to its encoding and representation:
2792  // (1) Sequential string? If yes, go to (5).
2793  // (2) Anything but sequential or cons? If yes, go to (6).
2794  // (3) Cons string. If the string is flat, replace subject with first string.
2795  // Otherwise bailout.
2796  // (4) Is subject external? If yes, go to (7).
2797  // (5) Sequential string. Load regexp code according to encoding.
2798  // (E) Carry on.
2800 
2801  // Deferred code at the end of the stub:
2802  // (6) Not a long external string? If yes, go to (8).
2803  // (7) External string. Make it, offset-wise, look like a sequential string.
2804  // Go to (5).
2805  // (8) Short external string or not a string? If yes, bail out to runtime.
2806  // (9) Sliced string. Replace subject with parent. Go to (4).
2807 
2808  Label check_underlying; // (4)
2809  Label seq_string; // (5)
2810  Label not_seq_nor_cons; // (6)
2811  Label external_string; // (7)
2812  Label not_long_external; // (8)
2813 
2814  // (1) Sequential string? If yes, go to (5).
2815  __ And(string_representation,
2816  string_type,
2820  // We depend on the fact that Strings of type
2821  // SeqString and not ShortExternalString are defined
2822  // by the following pattern:
2823  // string_type: 0XX0 XX00
2824  // ^ ^ ^^
2825  // | | ||
2826  // | | is a SeqString
2827  // | is not a short external String
2828  // is a String
2831  __ Cbz(string_representation, &seq_string); // Go to (5).
2832 
2833  // (2) Anything but sequential or cons? If yes, go to (6).
2838  __ Cmp(string_representation, kExternalStringTag);
2839  __ B(ge, &not_seq_nor_cons); // Go to (6).
2840 
2841  // (3) Cons string. Check that it's flat.
2842  __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
2843  __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
2844  // Replace subject with first string.
2845  __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2846 
2847  // (4) Is subject external? If yes, go to (7).
2848  __ Bind(&check_underlying);
2849  // Reload the string type.
2850  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2851  __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2853  // The underlying external string is never a short external string.
2856  __ TestAndBranchIfAnySet(string_type.X(),
2858  &external_string); // Go to (7).
2859 
2860  // (5) Sequential string. Load regexp code according to encoding.
2861  __ Bind(&seq_string);
2862 
2863  // Check that the third argument is a positive smi less than the subject
2864  // string length. A negative value will be greater (unsigned comparison).
2865  ASSERT(jssp.Is(__ StackPointer()));
2866  __ Peek(x10, kPreviousIndexOffset);
2867  __ JumpIfNotSmi(x10, &runtime);
2868  __ Cmp(jsstring_length, x10);
2869  __ B(ls, &runtime);
2870 
2871  // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
2872  // before entering the exit frame.
2873  __ SmiUntag(x1, x10);
2874 
2875  // The third bit determines the string encoding in string_type.
2879 
2880  // Find the code object based on the assumptions above.
2881  // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
2882  // of kPointerSize to reach the latter.
2885  __ Mov(x10, kPointerSize);
2886  // We will need the encoding later: ASCII = 0x04
2887  // UC16 = 0x00
2888  __ Ands(string_encoding, string_type, kStringEncodingMask);
2889  __ CzeroX(x10, ne);
2890  __ Add(x10, regexp_data, x10);
2891  __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
2892 
2893  // (E) Carry on. String handling is done.
2894 
2895  // Check that the irregexp code has been generated for the actual string
2896  // encoding. If it has, the field contains a code object otherwise it contains
2897  // a smi (code flushing support).
2898  __ JumpIfSmi(code_object, &runtime);
2899 
2900  // All checks done. Now push arguments for native regexp code.
2901  __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
2902  x10,
2903  x11);
2904 
2905  // Isolates: note we add an additional parameter here (isolate pointer).
2906  __ EnterExitFrame(false, x10, 1);
2907  ASSERT(csp.Is(__ StackPointer()));
2908 
2909  // We have 9 arguments to pass to the regexp code, therefore we have to pass
2910  // one on the stack and the rest as registers.
2911 
2912  // Note that the placement of the argument on the stack isn't standard
2913  // AAPCS64:
2914  // csp[0]: Space for the return address placed by DirectCEntryStub.
2915  // csp[8]: Argument 9, the current isolate address.
2916 
2917  __ Mov(x10, ExternalReference::isolate_address(isolate));
2918  __ Poke(x10, kPointerSize);
2919 
2920  Register length = w11;
2921  Register previous_index_in_bytes = w12;
2922  Register start = x13;
2923 
2924  // Load start of the subject string.
2925  __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
2926  // Load the length from the original subject string from the previous stack
2927  // frame. Therefore we have to use fp, which points exactly to two pointer
2928  // sizes below the previous sp. (Because creating a new stack frame pushes
2929  // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
2930  __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2931  __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
2932 
2933  // Handle UC16 encoding, two bytes make one character.
2934  // string_encoding: if ASCII: 0x04
2935  // if UC16: 0x00
2937  __ Ubfx(string_encoding, string_encoding, 2, 1);
2938  __ Eor(string_encoding, string_encoding, 1);
2939  // string_encoding: if ASCII: 0
2940  // if UC16: 1
2941 
2942  // Convert string positions from characters to bytes.
2943  // Previous index is in x1.
2944  __ Lsl(previous_index_in_bytes, w1, string_encoding);
2945  __ Lsl(length, length, string_encoding);
2946  __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
2947 
2948  // Argument 1 (x0): Subject string.
2949  __ Mov(x0, subject);
2950 
2951  // Argument 2 (x1): Previous index, already there.
2952 
2953  // Argument 3 (x2): Get the start of input.
2954  // Start of input = start of string + previous index + substring offset
2955  // (0 if the string
2956  // is not sliced).
2957  __ Add(w10, previous_index_in_bytes, sliced_string_offset);
2958  __ Add(x2, start, Operand(w10, UXTW));
2959 
2960  // Argument 4 (x3):
2961  // End of input = start of input + (length of input - previous index)
2962  __ Sub(w10, length, previous_index_in_bytes);
2963  __ Add(x3, x2, Operand(w10, UXTW));
2964 
2965  // Argument 5 (x4): static offsets vector buffer.
2966  __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate));
2967 
2968  // Argument 6 (x5): Set the number of capture registers to zero to force
2969  // global regexps to behave as non-global. This stub is not used for global
2970  // regexps.
2971  __ Mov(x5, 0);
2972 
2973  // Argument 7 (x6): Start (high end) of backtracking stack memory area.
2974  __ Mov(x10, address_of_regexp_stack_memory_address);
2975  __ Ldr(x10, MemOperand(x10));
2976  __ Mov(x11, address_of_regexp_stack_memory_size);
2977  __ Ldr(x11, MemOperand(x11));
2978  __ Add(x6, x10, x11);
2979 
2980  // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
2981  __ Mov(x7, 1);
2982 
2983  // Locate the code entry and call it.
2984  __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
2985  DirectCEntryStub stub;
2986  stub.GenerateCall(masm, code_object);
2987 
2988  __ LeaveExitFrame(false, x10, true);
2989 
2990  // The generated regexp code returns an int32 in w0.
2991  Label failure, exception;
2992  __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
2993  __ CompareAndBranch(w0,
2995  eq,
2996  &exception);
2997  __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
2998 
2999  // Success: process the result from the native regexp code.
3000  Register number_of_capture_registers = x12;
3001 
3002  // Calculate number of capture registers (number_of_captures + 1) * 2
3003  // and store it in the last match info.
3004  __ Ldrsw(x10,
3005  UntagSmiFieldMemOperand(regexp_data,
3007  __ Add(x10, x10, x10);
3008  __ Add(number_of_capture_registers, x10, 2);
3009 
3010  // Check that the fourth object is a JSArray object.
3011  ASSERT(jssp.Is(__ StackPointer()));
3012  __ Peek(x10, kLastMatchInfoOffset);
3013  __ JumpIfSmi(x10, &runtime);
3014  __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
3015 
3016  // Check that the JSArray is the fast case.
3017  __ Ldr(last_match_info_elements,
3019  __ Ldr(x10,
3020  FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
3021  __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
3022 
3023  // Check that the last match info has space for the capture registers and the
3024  // additional information (overhead).
3025  // (number_of_captures + 1) * 2 + overhead <= last match info size
3026  // (number_of_captures * 2) + 2 + overhead <= last match info size
3027  // number_of_capture_registers + overhead <= last match info size
3028  __ Ldrsw(x10,
3029  UntagSmiFieldMemOperand(last_match_info_elements,
3031  __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
3032  __ Cmp(x11, x10);
3033  __ B(gt, &runtime);
3034 
3035  // Store the capture count.
3036  __ SmiTag(x10, number_of_capture_registers);
3037  __ Str(x10,
3038  FieldMemOperand(last_match_info_elements,
3040  // Store last subject and last input.
3041  __ Str(subject,
3042  FieldMemOperand(last_match_info_elements,
3044  // Use x10 as the subject string in order to only need
3045  // one RecordWriteStub.
3046  __ Mov(x10, subject);
3047  __ RecordWriteField(last_match_info_elements,
3049  x10,
3050  x11,
3052  kDontSaveFPRegs);
3053  __ Str(subject,
3054  FieldMemOperand(last_match_info_elements,
3056  __ Mov(x10, subject);
3057  __ RecordWriteField(last_match_info_elements,
3059  x10,
3060  x11,
3062  kDontSaveFPRegs);
3063 
3064  Register last_match_offsets = x13;
3065  Register offsets_vector_index = x14;
3066  Register current_offset = x15;
3067 
3068  // Get the static offsets vector filled by the native regexp code
3069  // and fill the last match info.
3070  ExternalReference address_of_static_offsets_vector =
3071  ExternalReference::address_of_static_offsets_vector(isolate);
3072  __ Mov(offsets_vector_index, address_of_static_offsets_vector);
3073 
3074  Label next_capture, done;
3075  // Capture register counter starts from number of capture registers and
3076  // iterates down to zero (inclusive).
3077  __ Add(last_match_offsets,
3078  last_match_info_elements,
3080  __ Bind(&next_capture);
3081  __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
3082  __ B(mi, &done);
3083  // Read two 32 bit values from the static offsets vector buffer into
3084  // an X register
3085  __ Ldr(current_offset,
3086  MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
3087  // Store the smi values in the last match info.
3088  __ SmiTag(x10, current_offset);
3089  // Clearing the 32 bottom bits gives us a Smi.
3090  STATIC_ASSERT(kSmiShift == 32);
3091  __ And(x11, current_offset, ~kWRegMask);
3092  __ Stp(x10,
3093  x11,
3094  MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
3095  __ B(&next_capture);
3096  __ Bind(&done);
3097 
3098  // Return last match info.
3099  __ Peek(x0, kLastMatchInfoOffset);
3100  __ PopCPURegList(used_callee_saved_registers);
3101  // Drop the 4 arguments of the stub from the stack.
3102  __ Drop(4);
3103  __ Ret();
3104 
3105  __ Bind(&exception);
3106  Register exception_value = x0;
3107  // A stack overflow (on the backtrack stack) may have occured
3108  // in the RegExp code but no exception has been created yet.
3109  // If there is no pending exception, handle that in the runtime system.
3110  __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
3111  __ Mov(x11,
3112  Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3113  isolate)));
3114  __ Ldr(exception_value, MemOperand(x11));
3115  __ Cmp(x10, exception_value);
3116  __ B(eq, &runtime);
3117 
3118  __ Str(x10, MemOperand(x11)); // Clear pending exception.
3119 
3120  // Check if the exception is a termination. If so, throw as uncatchable.
3121  Label termination_exception;
3122  __ JumpIfRoot(exception_value,
3123  Heap::kTerminationExceptionRootIndex,
3124  &termination_exception);
3125 
3126  __ Throw(exception_value, x10, x11, x12, x13);
3127 
3128  __ Bind(&termination_exception);
3129  __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
3130 
3131  __ Bind(&failure);
3132  __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
3133  __ PopCPURegList(used_callee_saved_registers);
3134  // Drop the 4 arguments of the stub from the stack.
3135  __ Drop(4);
3136  __ Ret();
3137 
3138  __ Bind(&runtime);
3139  __ PopCPURegList(used_callee_saved_registers);
3140  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
3141 
3142  // Deferred code for string handling.
3143  // (6) Not a long external string? If yes, go to (8).
3144  __ Bind(&not_seq_nor_cons);
3145  // Compare flags are still set.
3146  __ B(ne, &not_long_external); // Go to (8).
3147 
3148  // (7) External string. Make it, offset-wise, look like a sequential string.
3149  __ Bind(&external_string);
3150  if (masm->emit_debug_code()) {
3151  // Assert that we do not have a cons or slice (indirect strings) here.
3152  // Sequential strings have already been ruled out.
3153  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
3154  __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3155  __ Tst(x10, kIsIndirectStringMask);
3156  __ Check(eq, kExternalStringExpectedButNotFound);
3157  __ And(x10, x10, kStringRepresentationMask);
3158  __ Cmp(x10, 0);
3159  __ Check(ne, kExternalStringExpectedButNotFound);
3160  }
3161  __ Ldr(subject,
3163  // Move the pointer so that offset-wise, it looks like a sequential string.
3165  __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3166  __ B(&seq_string); // Go to (5).
3167 
3168  // (8) If this is a short external string or not a string, bail out to
3169  // runtime.
3170  __ Bind(&not_long_external);
3172  __ TestAndBranchIfAnySet(string_representation,
3174  &runtime);
3175 
3176  // (9) Sliced string. Replace subject with parent.
3177  __ Ldr(sliced_string_offset,
3179  __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3180  __ B(&check_underlying); // Go to (4).
3181 #endif
3182 }
3183 
3184 
3185 static void GenerateRecordCallTarget(MacroAssembler* masm,
3186  Register argc,
3187  Register function,
3188  Register feedback_vector,
3189  Register index,
3190  Register scratch1,
3191  Register scratch2) {
3192  ASM_LOCATION("GenerateRecordCallTarget");
3193  ASSERT(!AreAliased(scratch1, scratch2,
3194  argc, function, feedback_vector, index));
3195  // Cache the called function in a feedback vector slot. Cache states are
3196  // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
3197  // argc : number of arguments to the construct function
3198  // function : the function to call
3199  // feedback_vector : the feedback vector
3200  // index : slot in feedback vector (smi)
3201  Label initialize, done, miss, megamorphic, not_array_function;
3202 
3204  masm->isolate()->heap()->megamorphic_symbol());
3206  masm->isolate()->heap()->uninitialized_symbol());
3207 
3208  // Load the cache state.
3209  __ Add(scratch1, feedback_vector,
3211  __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3212 
3213  // A monomorphic cache hit or an already megamorphic state: invoke the
3214  // function without changing the state.
3215  __ Cmp(scratch1, function);
3216  __ B(eq, &done);
3217 
3218  if (!FLAG_pretenuring_call_new) {
3219  // If we came here, we need to see if we are the array function.
3220  // If we didn't have a matching function, and we didn't find the megamorph
3221  // sentinel, then we have in the slot either some other function or an
3222  // AllocationSite. Do a map check on the object in scratch1 register.
3223  __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
3224  __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
3225 
3226  // Make sure the function is the Array() function
3227  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
3228  __ Cmp(function, scratch1);
3229  __ B(ne, &megamorphic);
3230  __ B(&done);
3231  }
3232 
3233  __ Bind(&miss);
3234 
3235  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3236  // megamorphic.
3237  __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize);
3238  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3239  // write-barrier is needed.
3240  __ Bind(&megamorphic);
3241  __ Add(scratch1, feedback_vector,
3243  __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex);
3244  __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3245  __ B(&done);
3246 
3247  // An uninitialized cache is patched with the function or sentinel to
3248  // indicate the ElementsKind if function is the Array constructor.
3249  __ Bind(&initialize);
3250 
3251  if (!FLAG_pretenuring_call_new) {
3252  // Make sure the function is the Array() function
3253  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
3254  __ Cmp(function, scratch1);
3255  __ B(ne, &not_array_function);
3256 
3257  // The target function is the Array constructor,
3258  // Create an AllocationSite if we don't already have it, store it in the
3259  // slot.
3260  {
3261  FrameScope scope(masm, StackFrame::INTERNAL);
3262  CreateAllocationSiteStub create_stub;
3263 
3264  // Arguments register must be smi-tagged to call out.
3265  __ SmiTag(argc);
3266  __ Push(argc, function, feedback_vector, index);
3267 
3268  // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
3269  // index in x3.
3270  ASSERT(feedback_vector.Is(x2) && index.Is(x3));
3271  __ CallStub(&create_stub);
3272 
3273  __ Pop(index, feedback_vector, function, argc);
3274  __ SmiUntag(argc);
3275  }
3276  __ B(&done);
3277 
3278  __ Bind(&not_array_function);
3279  }
3280 
3281  // An uninitialized cache is patched with the function.
3282 
3283  __ Add(scratch1, feedback_vector,
3285  __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
3286  __ Str(function, MemOperand(scratch1, 0));
3287 
3288  __ Push(function);
3289  __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
3291  __ Pop(function);
3292 
3293  __ Bind(&done);
3294 }
3295 
3296 
3297 void CallFunctionStub::Generate(MacroAssembler* masm) {
3298  ASM_LOCATION("CallFunctionStub::Generate");
3299  // x1 function the function to call
3300  // x2 : feedback vector
3301  // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol)
3302  Register function = x1;
3303  Register cache_cell = x2;
3304  Register slot = x3;
3305  Register type = x4;
3306  Label slow, non_function, wrap, cont;
3307 
3308  // TODO(jbramley): This function has a lot of unnamed registers. Name them,
3309  // and tidy things up a bit.
3310 
3311  if (NeedsChecks()) {
3312  // Check that the function is really a JavaScript function.
3313  __ JumpIfSmi(function, &non_function);
3314 
3315  // Goto slow case if we do not have a function.
3316  __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3317 
3318  if (RecordCallTarget()) {
3319  GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5);
3320  // Type information was updated. Because we may call Array, which
3321  // expects either undefined or an AllocationSite in ebx we need
3322  // to set ebx to undefined.
3323  __ LoadRoot(cache_cell, Heap::kUndefinedValueRootIndex);
3324  }
3325  }
3326 
3327  // Fast-case: Invoke the function now.
3328  // x1 function pushed function
3329  ParameterCount actual(argc_);
3330 
3331  if (CallAsMethod()) {
3332  if (NeedsChecks()) {
3333  // Do not transform the receiver for strict mode functions.
3337 
3338  // Do not transform the receiver for native (Compilerhints already in x3).
3339  __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
3340  }
3341 
3342  // Compute the receiver in sloppy mode.
3343  __ Peek(x3, argc_ * kPointerSize);
3344 
3345  if (NeedsChecks()) {
3346  __ JumpIfSmi(x3, &wrap);
3347  __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3348  } else {
3349  __ B(&wrap);
3350  }
3351 
3352  __ Bind(&cont);
3353  }
3354  __ InvokeFunction(function,
3355  actual,
3356  JUMP_FUNCTION,
3357  NullCallWrapper());
3358 
3359  if (NeedsChecks()) {
3360  // Slow-case: Non-function called.
3361  __ Bind(&slow);
3362  if (RecordCallTarget()) {
3363  // If there is a call target cache, mark it megamorphic in the
3364  // non-function case. MegamorphicSentinel is an immortal immovable object
3365  // (megamorphic symbol) so no write barrier is needed.
3367  masm->isolate()->heap()->megamorphic_symbol());
3368  __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
3369  kPointerSizeLog2));
3370  __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex);
3371  __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
3372  }
3373  // Check for function proxy.
3374  // x10 : function type.
3375  __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
3376  __ Push(function); // put proxy as additional argument
3377  __ Mov(x0, argc_ + 1);
3378  __ Mov(x2, 0);
3379  __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
3380  {
3381  Handle<Code> adaptor =
3382  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3383  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3384  }
3385 
3386  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3387  // of the original receiver from the call site).
3388  __ Bind(&non_function);
3389  __ Poke(function, argc_ * kXRegSize);
3390  __ Mov(x0, argc_); // Set up the number of arguments.
3391  __ Mov(x2, 0);
3392  __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
3393  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3394  RelocInfo::CODE_TARGET);
3395  }
3396 
3397  if (CallAsMethod()) {
3398  __ Bind(&wrap);
3399  // Wrap the receiver and patch it back onto the stack.
3400  { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3401  __ Push(x1, x3);
3402  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3403  __ Pop(x1);
3404  }
3405  __ Poke(x0, argc_ * kPointerSize);
3406  __ B(&cont);
3407  }
3408 }
3409 
3410 
3411 void CallConstructStub::Generate(MacroAssembler* masm) {
3412  ASM_LOCATION("CallConstructStub::Generate");
3413  // x0 : number of arguments
3414  // x1 : the function to call
3415  // x2 : feedback vector
3416  // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
3417  Register function = x1;
3418  Label slow, non_function_call;
3419 
3420  // Check that the function is not a smi.
3421  __ JumpIfSmi(function, &non_function_call);
3422  // Check that the function is a JSFunction.
3423  Register object_type = x10;
3424  __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
3425  &slow);
3426 
3427  if (RecordCallTarget()) {
3428  GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
3429 
3430  __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
3431  if (FLAG_pretenuring_call_new) {
3432  // Put the AllocationSite from the feedback vector into x2.
3433  // By adding kPointerSize we encode that we know the AllocationSite
3434  // entry is at the feedback vector slot given by x3 + 1.
3435  __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
3436  } else {
3437  Label feedback_register_initialized;
3438  // Put the AllocationSite from the feedback vector into x2, or undefined.
3441  __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
3442  &feedback_register_initialized);
3443  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
3444  __ bind(&feedback_register_initialized);
3445  }
3446 
3447  __ AssertUndefinedOrAllocationSite(x2, x5);
3448  }
3449 
3450  // Jump to the function-specific construct stub.
3451  Register jump_reg = x4;
3452  Register shared_func_info = jump_reg;
3453  Register cons_stub = jump_reg;
3454  Register cons_stub_code = jump_reg;
3455  __ Ldr(shared_func_info,
3457  __ Ldr(cons_stub,
3458  FieldMemOperand(shared_func_info,
3460  __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
3461  __ Br(cons_stub_code);
3462 
3463  Label do_call;
3464  __ Bind(&slow);
3465  __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
3466  __ B(ne, &non_function_call);
3467  __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3468  __ B(&do_call);
3469 
3470  __ Bind(&non_function_call);
3471  __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3472 
3473  __ Bind(&do_call);
3474  // Set expected number of arguments to zero (not changing x0).
3475  __ Mov(x2, 0);
3476  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3477  RelocInfo::CODE_TARGET);
3478 }
3479 
3480 
3481 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3482  // If the receiver is a smi trigger the non-string case.
3483  __ JumpIfSmi(object_, receiver_not_string_);
3484 
3485  // Fetch the instance type of the receiver into result register.
3486  __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3487  __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3488 
3489  // If the receiver is not a string trigger the non-string case.
3490  __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
3491 
3492  // If the index is non-smi trigger the non-smi case.
3493  __ JumpIfNotSmi(index_, &index_not_smi_);
3494 
3495  __ Bind(&got_smi_index_);
3496  // Check for index out of range.
3497  __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
3498  __ Cmp(result_, Operand::UntagSmi(index_));
3499  __ B(ls, index_out_of_range_);
3500 
3501  __ SmiUntag(index_);
3502 
3504  object_,
3505  index_.W(),
3506  result_,
3507  &call_runtime_);
3508  __ SmiTag(result_);
3509  __ Bind(&exit_);
3510 }
3511 
3512 
3514  MacroAssembler* masm,
3515  const RuntimeCallHelper& call_helper) {
3516  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3517 
3518  __ Bind(&index_not_smi_);
3519  // If index is a heap number, try converting it to an integer.
3520  __ CheckMap(index_,
3521  result_,
3522  Heap::kHeapNumberMapRootIndex,
3523  index_not_number_,
3525  call_helper.BeforeCall(masm);
3526  // Save object_ on the stack and pass index_ as argument for runtime call.
3527  __ Push(object_, index_);
3528  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3529  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3530  } else {
3531  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3532  // NumberToSmi discards numbers that are not exact integers.
3533  __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
3534  }
3535  // Save the conversion result before the pop instructions below
3536  // have a chance to overwrite it.
3537  __ Mov(index_, x0);
3538  __ Pop(object_);
3539  // Reload the instance type.
3540  __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3541  __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3542  call_helper.AfterCall(masm);
3543 
3544  // If index is still not a smi, it must be out of range.
3545  __ JumpIfNotSmi(index_, index_out_of_range_);
3546  // Otherwise, return to the fast path.
3547  __ B(&got_smi_index_);
3548 
3549  // Call runtime. We get here when the receiver is a string and the
3550  // index is a number, but the code of getting the actual character
3551  // is too complex (e.g., when the string needs to be flattened).
3552  __ Bind(&call_runtime_);
3553  call_helper.BeforeCall(masm);
3554  __ SmiTag(index_);
3555  __ Push(object_, index_);
3556  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
3557  __ Mov(result_, x0);
3558  call_helper.AfterCall(masm);
3559  __ B(&exit_);
3560 
3561  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3562 }
3563 
3564 
3565 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3566  __ JumpIfNotSmi(code_, &slow_case_);
3568  __ B(hi, &slow_case_);
3569 
3570  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3571  // At this point code register contains smi tagged ASCII char code.
3573  __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
3574  __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3575  __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
3576  __ Bind(&exit_);
3577 }
3578 
3579 
3581  MacroAssembler* masm,
3582  const RuntimeCallHelper& call_helper) {
3583  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3584 
3585  __ Bind(&slow_case_);
3586  call_helper.BeforeCall(masm);
3587  __ Push(code_);
3588  __ CallRuntime(Runtime::kCharFromCode, 1);
3589  __ Mov(result_, x0);
3590  call_helper.AfterCall(masm);
3591  __ B(&exit_);
3592 
3593  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3594 }
3595 
3596 
3597 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3598  // Inputs are in x0 (lhs) and x1 (rhs).
3599  ASSERT(state_ == CompareIC::SMI);
3600  ASM_LOCATION("ICCompareStub[Smis]");
3601  Label miss;
3602  // Bail out (to 'miss') unless both x0 and x1 are smis.
3603  __ JumpIfEitherNotSmi(x0, x1, &miss);
3604 
3605  if (GetCondition() == eq) {
3606  // For equality we do not care about the sign of the result.
3607  __ Sub(x0, x0, x1);
3608  } else {
3609  // Untag before subtracting to avoid handling overflow.
3610  __ SmiUntag(x1);
3611  __ Sub(x0, x1, Operand::UntagSmi(x0));
3612  }
3613  __ Ret();
3614 
3615  __ Bind(&miss);
3616  GenerateMiss(masm);
3617 }
3618 
3619 
3620 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3621  ASSERT(state_ == CompareIC::NUMBER);
3622  ASM_LOCATION("ICCompareStub[HeapNumbers]");
3623 
3624  Label unordered, maybe_undefined1, maybe_undefined2;
3625  Label miss, handle_lhs, values_in_d_regs;
3626  Label untag_rhs, untag_lhs;
3627 
3628  Register result = x0;
3629  Register rhs = x0;
3630  Register lhs = x1;
3631  FPRegister rhs_d = d0;
3632  FPRegister lhs_d = d1;
3633 
3634  if (left_ == CompareIC::SMI) {
3635  __ JumpIfNotSmi(lhs, &miss);
3636  }
3637  if (right_ == CompareIC::SMI) {
3638  __ JumpIfNotSmi(rhs, &miss);
3639  }
3640 
3641  __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
3642  __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
3643 
3644  // Load rhs if it's a heap number.
3645  __ JumpIfSmi(rhs, &handle_lhs);
3646  __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3648  __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
3649 
3650  // Load lhs if it's a heap number.
3651  __ Bind(&handle_lhs);
3652  __ JumpIfSmi(lhs, &values_in_d_regs);
3653  __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3655  __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
3656 
3657  __ Bind(&values_in_d_regs);
3658  __ Fcmp(lhs_d, rhs_d);
3659  __ B(vs, &unordered); // Overflow flag set if either is NaN.
3660  STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
3661  __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
3662  __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
3663  __ Ret();
3664 
3665  __ Bind(&unordered);
3668  __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
3669 
3670  __ Bind(&maybe_undefined1);
3672  __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
3673  __ JumpIfSmi(lhs, &unordered);
3674  __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
3675  __ B(&unordered);
3676  }
3677 
3678  __ Bind(&maybe_undefined2);
3680  __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
3681  }
3682 
3683  __ Bind(&miss);
3684  GenerateMiss(masm);
3685 }
3686 
3687 
3688 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3690  ASM_LOCATION("ICCompareStub[InternalizedStrings]");
3691  Label miss;
3692 
3693  Register result = x0;
3694  Register rhs = x0;
3695  Register lhs = x1;
3696 
3697  // Check that both operands are heap objects.
3698  __ JumpIfEitherSmi(lhs, rhs, &miss);
3699 
3700  // Check that both operands are internalized strings.
3701  Register rhs_map = x10;
3702  Register lhs_map = x11;
3703  Register rhs_type = x10;
3704  Register lhs_type = x11;
3705  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3706  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3707  __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3708  __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3709 
3710  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
3711  __ Orr(x12, lhs_type, rhs_type);
3712  __ TestAndBranchIfAnySet(
3714 
3715  // Internalized strings are compared by identity.
3716  STATIC_ASSERT(EQUAL == 0);
3717  __ Cmp(lhs, rhs);
3718  __ Cset(result, ne);
3719  __ Ret();
3720 
3721  __ Bind(&miss);
3722  GenerateMiss(masm);
3723 }
3724 
3725 
3726 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
3727  ASSERT(state_ == CompareIC::UNIQUE_NAME);
3728  ASM_LOCATION("ICCompareStub[UniqueNames]");
3729  ASSERT(GetCondition() == eq);
3730  Label miss;
3731 
3732  Register result = x0;
3733  Register rhs = x0;
3734  Register lhs = x1;
3735 
3736  Register lhs_instance_type = w2;
3737  Register rhs_instance_type = w3;
3738 
3739  // Check that both operands are heap objects.
3740  __ JumpIfEitherSmi(lhs, rhs, &miss);
3741 
3742  // Check that both operands are unique names. This leaves the instance
3743  // types loaded in tmp1 and tmp2.
3744  __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
3745  __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
3746  __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3747  __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
3748 
3749  // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
3750  // should have kInternalizedTag set.
3751  __ JumpIfNotUniqueName(lhs_instance_type, &miss);
3752  __ JumpIfNotUniqueName(rhs_instance_type, &miss);
3753 
3754  // Unique names are compared by identity.
3755  STATIC_ASSERT(EQUAL == 0);
3756  __ Cmp(lhs, rhs);
3757  __ Cset(result, ne);
3758  __ Ret();
3759 
3760  __ Bind(&miss);
3761  GenerateMiss(masm);
3762 }
3763 
3764 
3765 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
3766  ASSERT(state_ == CompareIC::STRING);
3767  ASM_LOCATION("ICCompareStub[Strings]");
3768 
3769  Label miss;
3770 
3771  bool equality = Token::IsEqualityOp(op_);
3772 
3773  Register result = x0;
3774  Register rhs = x0;
3775  Register lhs = x1;
3776 
3777  // Check that both operands are heap objects.
3778  __ JumpIfEitherSmi(rhs, lhs, &miss);
3779 
3780  // Check that both operands are strings.
3781  Register rhs_map = x10;
3782  Register lhs_map = x11;
3783  Register rhs_type = x10;
3784  Register lhs_type = x11;
3785  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3786  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3787  __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3788  __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3790  __ Orr(x12, lhs_type, rhs_type);
3791  __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
3792 
3793  // Fast check for identical strings.
3794  Label not_equal;
3795  __ Cmp(lhs, rhs);
3796  __ B(ne, &not_equal);
3797  __ Mov(result, EQUAL);
3798  __ Ret();
3799 
3800  __ Bind(&not_equal);
3801  // Handle not identical strings
3802 
3803  // Check that both strings are internalized strings. If they are, we're done
3804  // because we already know they are not identical. We know they are both
3805  // strings.
3806  if (equality) {
3807  ASSERT(GetCondition() == eq);
3809  Label not_internalized_strings;
3810  __ Orr(x12, lhs_type, rhs_type);
3811  __ TestAndBranchIfAnySet(
3812  x12, kIsNotInternalizedMask, &not_internalized_strings);
3813  // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
3814  __ Ret();
3815  __ Bind(&not_internalized_strings);
3816  }
3817 
3818  // Check that both strings are sequential ASCII.
3819  Label runtime;
3820  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
3821  lhs_type, rhs_type, x12, x13, &runtime);
3822 
3823  // Compare flat ASCII strings. Returns when done.
3824  if (equality) {
3826  masm, lhs, rhs, x10, x11, x12);
3827  } else {
3829  masm, lhs, rhs, x10, x11, x12, x13);
3830  }
3831 
3832  // Handle more complex cases in runtime.
3833  __ Bind(&runtime);
3834  __ Push(lhs, rhs);
3835  if (equality) {
3836  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3837  } else {
3838  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
3839  }
3840 
3841  __ Bind(&miss);
3842  GenerateMiss(masm);
3843 }
3844 
3845 
3846 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
3847  ASSERT(state_ == CompareIC::OBJECT);
3848  ASM_LOCATION("ICCompareStub[Objects]");
3849 
3850  Label miss;
3851 
3852  Register result = x0;
3853  Register rhs = x0;
3854  Register lhs = x1;
3855 
3856  __ JumpIfEitherSmi(rhs, lhs, &miss);
3857 
3858  __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
3859  __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
3860 
3861  ASSERT(GetCondition() == eq);
3862  __ Sub(result, rhs, lhs);
3863  __ Ret();
3864 
3865  __ Bind(&miss);
3866  GenerateMiss(masm);
3867 }
3868 
3869 
3870 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
3871  ASM_LOCATION("ICCompareStub[KnownObjects]");
3872 
3873  Label miss;
3874 
3875  Register result = x0;
3876  Register rhs = x0;
3877  Register lhs = x1;
3878 
3879  __ JumpIfEitherSmi(rhs, lhs, &miss);
3880 
3881  Register rhs_map = x10;
3882  Register lhs_map = x11;
3883  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3884  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3885  __ Cmp(rhs_map, Operand(known_map_));
3886  __ B(ne, &miss);
3887  __ Cmp(lhs_map, Operand(known_map_));
3888  __ B(ne, &miss);
3889 
3890  __ Sub(result, rhs, lhs);
3891  __ Ret();
3892 
3893  __ Bind(&miss);
3894  GenerateMiss(masm);
3895 }
3896 
3897 
3898 // This method handles the case where a compare stub had the wrong
3899 // implementation. It calls a miss handler, which re-writes the stub. All other
3900 // ICCompareStub::Generate* methods should fall back into this one if their
3901 // operands were not the expected types.
3902 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
3903  ASM_LOCATION("ICCompareStub[Miss]");
3904 
3905  Register stub_entry = x11;
3906  {
3907  ExternalReference miss =
3908  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
3909 
3910  FrameScope scope(masm, StackFrame::INTERNAL);
3911  Register op = x10;
3912  Register left = x1;
3913  Register right = x0;
3914  // Preserve some caller-saved registers.
3915  __ Push(x1, x0, lr);
3916  // Push the arguments.
3917  __ Mov(op, Smi::FromInt(op_));
3918  __ Push(left, right, op);
3919 
3920  // Call the miss handler. This also pops the arguments.
3921  __ CallExternalReference(miss, 3);
3922 
3923  // Compute the entry point of the rewritten stub.
3924  __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
3925  // Restore caller-saved registers.
3926  __ Pop(lr, x0, x1);
3927  }
3928 
3929  // Tail-call to the new stub.
3930  __ Jump(stub_entry);
3931 }
3932 
3933 
3934 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3935  Register hash,
3936  Register character) {
3937  ASSERT(!AreAliased(hash, character));
3938 
3939  // hash = character + (character << 10);
3940  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3941  // Untag smi seed and add the character.
3942  __ Add(hash, character, Operand(hash, LSR, kSmiShift));
3943 
3944  // Compute hashes modulo 2^32 using a 32-bit W register.
3945  Register hash_w = hash.W();
3946 
3947  // hash += hash << 10;
3948  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
3949  // hash ^= hash >> 6;
3950  __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
3951 }
3952 
3953 
3954 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3955  Register hash,
3956  Register character) {
3957  ASSERT(!AreAliased(hash, character));
3958 
3959  // hash += character;
3960  __ Add(hash, hash, character);
3961 
3962  // Compute hashes modulo 2^32 using a 32-bit W register.
3963  Register hash_w = hash.W();
3964 
3965  // hash += hash << 10;
3966  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
3967  // hash ^= hash >> 6;
3968  __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
3969 }
3970 
3971 
3972 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3973  Register hash,
3974  Register scratch) {
3975  // Compute hashes modulo 2^32 using a 32-bit W register.
3976  Register hash_w = hash.W();
3977  Register scratch_w = scratch.W();
3978  ASSERT(!AreAliased(hash_w, scratch_w));
3979 
3980  // hash += hash << 3;
3981  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
3982  // hash ^= hash >> 11;
3983  __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
3984  // hash += hash << 15;
3985  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
3986 
3987  __ Ands(hash_w, hash_w, String::kHashBitMask);
3988 
3989  // if (hash == 0) hash = 27;
3990  __ Mov(scratch_w, StringHasher::kZeroHash);
3991  __ Csel(hash_w, scratch_w, hash_w, eq);
3992 }
3993 
3994 
3995 void SubStringStub::Generate(MacroAssembler* masm) {
3996  ASM_LOCATION("SubStringStub::Generate");
3997  Label runtime;
3998 
3999  // Stack frame on entry.
4000  // lr: return address
4001  // jssp[0]: substring "to" offset
4002  // jssp[8]: substring "from" offset
4003  // jssp[16]: pointer to string object
4004 
4005  // This stub is called from the native-call %_SubString(...), so
4006  // nothing can be assumed about the arguments. It is tested that:
4007  // "string" is a sequential string,
4008  // both "from" and "to" are smis, and
4009  // 0 <= from <= to <= string.length (in debug mode.)
4010  // If any of these assumptions fail, we call the runtime system.
4011 
4012  static const int kToOffset = 0 * kPointerSize;
4013  static const int kFromOffset = 1 * kPointerSize;
4014  static const int kStringOffset = 2 * kPointerSize;
4015 
4016  Register to = x0;
4017  Register from = x15;
4018  Register input_string = x10;
4019  Register input_length = x11;
4020  Register input_type = x12;
4021  Register result_string = x0;
4022  Register result_length = x1;
4023  Register temp = x3;
4024 
4025  __ Peek(to, kToOffset);
4026  __ Peek(from, kFromOffset);
4027 
4028  // Check that both from and to are smis. If not, jump to runtime.
4029  __ JumpIfEitherNotSmi(from, to, &runtime);
4030  __ SmiUntag(from);
4031  __ SmiUntag(to);
4032 
4033  // Calculate difference between from and to. If to < from, branch to runtime.
4034  __ Subs(result_length, to, from);
4035  __ B(mi, &runtime);
4036 
4037  // Check from is positive.
4038  __ Tbnz(from, kWSignBit, &runtime);
4039 
4040  // Make sure first argument is a string.
4041  __ Peek(input_string, kStringOffset);
4042  __ JumpIfSmi(input_string, &runtime);
4043  __ IsObjectJSStringType(input_string, input_type, &runtime);
4044 
4045  Label single_char;
4046  __ Cmp(result_length, 1);
4047  __ B(eq, &single_char);
4048 
4049  // Short-cut for the case of trivial substring.
4050  Label return_x0;
4051  __ Ldrsw(input_length,
4053 
4054  __ Cmp(result_length, input_length);
4055  __ CmovX(x0, input_string, eq);
4056  // Return original string.
4057  __ B(eq, &return_x0);
4058 
4059  // Longer than original string's length or negative: unsafe arguments.
4060  __ B(hi, &runtime);
4061 
4062  // Shorter than original string's length: an actual substring.
4063 
4064  // x0 to substring end character offset
4065  // x1 result_length length of substring result
4066  // x10 input_string pointer to input string object
4067  // x10 unpacked_string pointer to unpacked string object
4068  // x11 input_length length of input string
4069  // x12 input_type instance type of input string
4070  // x15 from substring start character offset
4071 
4072  // Deal with different string types: update the index if necessary and put
4073  // the underlying string into register unpacked_string.
4074  Label underlying_unpacked, sliced_string, seq_or_external_string;
4075  Label update_instance_type;
4076  // If the string is not indirect, it can only be sequential or external.
4079 
4080  // Test for string types, and branch/fall through to appropriate unpacking
4081  // code.
4082  __ Tst(input_type, kIsIndirectStringMask);
4083  __ B(eq, &seq_or_external_string);
4084  __ Tst(input_type, kSlicedNotConsMask);
4085  __ B(ne, &sliced_string);
4086 
4087  Register unpacked_string = input_string;
4088 
4089  // Cons string. Check whether it is flat, then fetch first part.
4090  __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
4091  __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
4092  __ Ldr(unpacked_string,
4093  FieldMemOperand(input_string, ConsString::kFirstOffset));
4094  __ B(&update_instance_type);
4095 
4096  __ Bind(&sliced_string);
4097  // Sliced string. Fetch parent and correct start index by offset.
4098  __ Ldrsw(temp,
4100  __ Add(from, from, temp);
4101  __ Ldr(unpacked_string,
4103 
4104  __ Bind(&update_instance_type);
4105  __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
4106  __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
4107  // Now control must go to &underlying_unpacked. Since the no code is generated
4108  // before then we fall through instead of generating a useless branch.
4109 
4110  __ Bind(&seq_or_external_string);
4111  // Sequential or external string. Registers unpacked_string and input_string
4112  // alias, so there's nothing to do here.
4113  // Note that if code is added here, the above code must be updated.
4114 
4115  // x0 result_string pointer to result string object (uninit)
4116  // x1 result_length length of substring result
4117  // x10 unpacked_string pointer to unpacked string object
4118  // x11 input_length length of input string
4119  // x12 input_type instance type of input string
4120  // x15 from substring start character offset
4121  __ Bind(&underlying_unpacked);
4122 
4123  if (FLAG_string_slices) {
4124  Label copy_routine;
4125  __ Cmp(result_length, SlicedString::kMinLength);
4126  // Short slice. Copy instead of slicing.
4127  __ B(lt, &copy_routine);
4128  // Allocate new sliced string. At this point we do not reload the instance
4129  // type including the string encoding because we simply rely on the info
4130  // provided by the original string. It does not matter if the original
4131  // string's encoding is wrong because we always have to recheck encoding of
4132  // the newly created string's parent anyway due to externalized strings.
4133  Label two_byte_slice, set_slice_header;
4136  __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
4137  __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
4138  &runtime);
4139  __ B(&set_slice_header);
4140 
4141  __ Bind(&two_byte_slice);
4142  __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
4143  &runtime);
4144 
4145  __ Bind(&set_slice_header);
4146  __ SmiTag(from);
4147  __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
4148  __ Str(unpacked_string,
4150  __ B(&return_x0);
4151 
4152  __ Bind(&copy_routine);
4153  }
4154 
4155  // x0 result_string pointer to result string object (uninit)
4156  // x1 result_length length of substring result
4157  // x10 unpacked_string pointer to unpacked string object
4158  // x11 input_length length of input string
4159  // x12 input_type instance type of input string
4160  // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
4161  // x13 substring_char0 pointer to first char of substring (uninit)
4162  // x14 result_char0 pointer to first char of result (uninit)
4163  // x15 from substring start character offset
4164  Register unpacked_char0 = x13;
4165  Register substring_char0 = x13;
4166  Register result_char0 = x14;
4167  Label two_byte_sequential, sequential_string, allocate_result;
4170 
4171  __ Tst(input_type, kExternalStringTag);
4172  __ B(eq, &sequential_string);
4173 
4174  __ Tst(input_type, kShortExternalStringTag);
4175  __ B(ne, &runtime);
4176  __ Ldr(unpacked_char0,
4178  // unpacked_char0 points to the first character of the underlying string.
4179  __ B(&allocate_result);
4180 
4181  __ Bind(&sequential_string);
4182  // Locate first character of underlying subject string.
4184  __ Add(unpacked_char0, unpacked_string,
4186 
4187  __ Bind(&allocate_result);
4188  // Sequential ASCII string. Allocate the result.
4190  __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
4191 
4192  // Allocate and copy the resulting ASCII string.
4193  __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
4194 
4195  // Locate first character of substring to copy.
4196  __ Add(substring_char0, unpacked_char0, from);
4197 
4198  // Locate first character of result.
4199  __ Add(result_char0, result_string,
4201 
4203  __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
4204  __ B(&return_x0);
4205 
4206  // Allocate and copy the resulting two-byte string.
4207  __ Bind(&two_byte_sequential);
4208  __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
4209 
4210  // Locate first character of substring to copy.
4211  __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
4212 
4213  // Locate first character of result.
4214  __ Add(result_char0, result_string,
4216 
4218  __ Add(result_length, result_length, result_length);
4219  __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
4220 
4221  __ Bind(&return_x0);
4222  Counters* counters = masm->isolate()->counters();
4223  __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
4224  __ Drop(3);
4225  __ Ret();
4226 
4227  __ Bind(&runtime);
4228  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
4229 
4230  __ bind(&single_char);
4231  // x1: result_length
4232  // x10: input_string
4233  // x12: input_type
4234  // x15: from (untagged)
4235  __ SmiTag(from);
4236  StringCharAtGenerator generator(
4237  input_string, from, result_length, x0,
4238  &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
4239  generator.GenerateFast(masm);
4240  __ Drop(3);
4241  __ Ret();
4242  generator.SkipSlow(masm, &runtime);
4243 }
4244 
4245 
4246 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4247  Register left,
4248  Register right,
4249  Register scratch1,
4250  Register scratch2,
4251  Register scratch3) {
4252  ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
4253  Register result = x0;
4254  Register left_length = scratch1;
4255  Register right_length = scratch2;
4256 
4257  // Compare lengths. If lengths differ, strings can't be equal. Lengths are
4258  // smis, and don't need to be untagged.
4259  Label strings_not_equal, check_zero_length;
4260  __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
4261  __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
4262  __ Cmp(left_length, right_length);
4263  __ B(eq, &check_zero_length);
4264 
4265  __ Bind(&strings_not_equal);
4266  __ Mov(result, Smi::FromInt(NOT_EQUAL));
4267  __ Ret();
4268 
4269  // Check if the length is zero. If so, the strings must be equal (and empty.)
4270  Label compare_chars;
4271  __ Bind(&check_zero_length);
4272  STATIC_ASSERT(kSmiTag == 0);
4273  __ Cbnz(left_length, &compare_chars);
4274  __ Mov(result, Smi::FromInt(EQUAL));
4275  __ Ret();
4276 
4277  // Compare characters. Falls through if all characters are equal.
4278  __ Bind(&compare_chars);
4279  GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
4280  scratch3, &strings_not_equal);
4281 
4282  // Characters in strings are equal.
4283  __ Mov(result, Smi::FromInt(EQUAL));
4284  __ Ret();
4285 }
4286 
4287 
4288 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4289  Register left,
4290  Register right,
4291  Register scratch1,
4292  Register scratch2,
4293  Register scratch3,
4294  Register scratch4) {
4295  ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
4296  Label result_not_equal, compare_lengths;
4297 
4298  // Find minimum length and length difference.
4299  Register length_delta = scratch3;
4300  __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
4301  __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
4302  __ Subs(length_delta, scratch1, scratch2);
4303 
4304  Register min_length = scratch1;
4305  __ Csel(min_length, scratch2, scratch1, gt);
4306  __ Cbz(min_length, &compare_lengths);
4307 
4308  // Compare loop.
4309  GenerateAsciiCharsCompareLoop(masm,
4310  left, right, min_length, scratch2, scratch4,
4311  &result_not_equal);
4312 
4313  // Compare lengths - strings up to min-length are equal.
4314  __ Bind(&compare_lengths);
4315 
4316  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4317 
4318  // Use length_delta as result if it's zero.
4319  Register result = x0;
4320  __ Subs(result, length_delta, 0);
4321 
4322  __ Bind(&result_not_equal);
4323  Register greater = x10;
4324  Register less = x11;
4325  __ Mov(greater, Smi::FromInt(GREATER));
4326  __ Mov(less, Smi::FromInt(LESS));
4327  __ CmovX(result, greater, gt);
4328  __ CmovX(result, less, lt);
4329  __ Ret();
4330 }
4331 
4332 
4333 void StringCompareStub::GenerateAsciiCharsCompareLoop(
4334  MacroAssembler* masm,
4335  Register left,
4336  Register right,
4337  Register length,
4338  Register scratch1,
4339  Register scratch2,
4340  Label* chars_not_equal) {
4341  ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
4342 
4343  // Change index to run from -length to -1 by adding length to string
4344  // start. This means that loop ends when index reaches zero, which
4345  // doesn't need an additional compare.
4346  __ SmiUntag(length);
4347  __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4348  __ Add(left, left, scratch1);
4349  __ Add(right, right, scratch1);
4350 
4351  Register index = length;
4352  __ Neg(index, length); // index = -length;
4353 
4354  // Compare loop
4355  Label loop;
4356  __ Bind(&loop);
4357  __ Ldrb(scratch1, MemOperand(left, index));
4358  __ Ldrb(scratch2, MemOperand(right, index));
4359  __ Cmp(scratch1, scratch2);
4360  __ B(ne, chars_not_equal);
4361  __ Add(index, index, 1);
4362  __ Cbnz(index, &loop);
4363 }
4364 
4365 
4366 void StringCompareStub::Generate(MacroAssembler* masm) {
4367  Label runtime;
4368 
4369  Counters* counters = masm->isolate()->counters();
4370 
4371  // Stack frame on entry.
4372  // sp[0]: right string
4373  // sp[8]: left string
4374  Register right = x10;
4375  Register left = x11;
4376  Register result = x0;
4377  __ Pop(right, left);
4378 
4379  Label not_same;
4380  __ Subs(result, right, left);
4381  __ B(ne, &not_same);
4382  STATIC_ASSERT(EQUAL == 0);
4383  __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4384  __ Ret();
4385 
4386  __ Bind(&not_same);
4387 
4388  // Check that both objects are sequential ASCII strings.
4389  __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
4390 
4391  // Compare flat ASCII strings natively. Remove arguments from stack first,
4392  // as this function will generate a return.
4393  __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4394  GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
4395 
4396  __ Bind(&runtime);
4397 
4398  // Push arguments back on to the stack.
4399  // sp[0] = right string
4400  // sp[8] = left string.
4401  __ Push(left, right);
4402 
4403  // Call the runtime.
4404  // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
4405  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
4406 }
4407 
4408 
4409 void ArrayPushStub::Generate(MacroAssembler* masm) {
4410  Register receiver = x0;
4411 
4412  int argc = arguments_count();
4413 
4414  if (argc == 0) {
4415  // Nothing to do, just return the length.
4416  __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
4417  __ Drop(argc + 1);
4418  __ Ret();
4419  return;
4420  }
4421 
4422  Isolate* isolate = masm->isolate();
4423 
4424  if (argc != 1) {
4425  __ TailCallExternalReference(
4426  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4427  return;
4428  }
4429 
4430  Label call_builtin, attempt_to_grow_elements, with_write_barrier;
4431 
4432  Register elements_length = x8;
4433  Register length = x7;
4434  Register elements = x6;
4435  Register end_elements = x5;
4436  Register value = x4;
4437  // Get the elements array of the object.
4438  __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
4439 
4440  if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4441  // Check that the elements are in fast mode and writable.
4442  __ CheckMap(elements,
4443  x10,
4444  Heap::kFixedArrayMapRootIndex,
4445  &call_builtin,
4447  }
4448 
4449  // Get the array's length and calculate new length.
4450  __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4451  STATIC_ASSERT(kSmiTag == 0);
4452  __ Add(length, length, Smi::FromInt(argc));
4453 
4454  // Check if we could survive without allocation.
4455  __ Ldr(elements_length,
4457  __ Cmp(length, elements_length);
4458 
4459  const int kEndElementsOffset =
4461 
4462  if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4463  __ B(gt, &attempt_to_grow_elements);
4464 
4465  // Check if value is a smi.
4466  __ Peek(value, (argc - 1) * kPointerSize);
4467  __ JumpIfNotSmi(value, &with_write_barrier);
4468 
4469  // Store the value.
4470  // We may need a register containing the address end_elements below,
4471  // so write back the value in end_elements.
4472  __ Add(end_elements, elements,
4474  __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
4475  } else {
4476  __ B(gt, &call_builtin);
4477 
4478  __ Peek(value, (argc - 1) * kPointerSize);
4479  __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
4480  &call_builtin, argc * kDoubleSize);
4481  }
4482 
4483  // Save new length.
4484  __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4485 
4486  // Return length.
4487  __ Drop(argc + 1);
4488  __ Mov(x0, length);
4489  __ Ret();
4490 
4491  if (IsFastDoubleElementsKind(elements_kind())) {
4492  __ Bind(&call_builtin);
4493  __ TailCallExternalReference(
4494  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4495  return;
4496  }
4497 
4498  __ Bind(&with_write_barrier);
4499 
4500  if (IsFastSmiElementsKind(elements_kind())) {
4501  if (FLAG_trace_elements_transitions) {
4502  __ B(&call_builtin);
4503  }
4504 
4505  __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
4506  __ JumpIfHeapNumber(x10, &call_builtin);
4507 
4508  ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
4510  __ Ldr(x10, GlobalObjectMemOperand());
4513  const int header_size = FixedArrayBase::kHeaderSize;
4514  // Verify that the object can be transitioned in place.
4515  const int origin_offset = header_size + elements_kind() * kPointerSize;
4516  __ ldr(x11, FieldMemOperand(receiver, origin_offset));
4517  __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
4518  __ cmp(x11, x12);
4519  __ B(ne, &call_builtin);
4520 
4521  const int target_offset = header_size + target_kind * kPointerSize;
4522  __ Ldr(x10, FieldMemOperand(x10, target_offset));
4523  __ Mov(x11, receiver);
4526  }
4527 
4528  // Save new length.
4529  __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4530 
4531  // Store the value.
4532  // We may need a register containing the address end_elements below,
4533  // so write back the value in end_elements.
4534  __ Add(end_elements, elements,
4536  __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
4537 
4538  __ RecordWrite(elements,
4539  end_elements,
4540  value,
4544  OMIT_SMI_CHECK);
4545  __ Drop(argc + 1);
4546  __ Mov(x0, length);
4547  __ Ret();
4548 
4549  __ Bind(&attempt_to_grow_elements);
4550 
4551  if (!FLAG_inline_new) {
4552  __ B(&call_builtin);
4553  }
4554 
4555  Register argument = x2;
4556  __ Peek(argument, (argc - 1) * kPointerSize);
4557  // Growing elements that are SMI-only requires special handling in case
4558  // the new element is non-Smi. For now, delegate to the builtin.
4559  if (IsFastSmiElementsKind(elements_kind())) {
4560  __ JumpIfNotSmi(argument, &call_builtin);
4561  }
4562 
4563  // We could be lucky and the elements array could be at the top of new-space.
4564  // In this case we can just grow it in place by moving the allocation pointer
4565  // up.
4566  ExternalReference new_space_allocation_top =
4567  ExternalReference::new_space_allocation_top_address(isolate);
4568  ExternalReference new_space_allocation_limit =
4569  ExternalReference::new_space_allocation_limit_address(isolate);
4570 
4571  const int kAllocationDelta = 4;
4572  ASSERT(kAllocationDelta >= argc);
4573  Register allocation_top_addr = x5;
4574  Register allocation_top = x9;
4575  // Load top and check if it is the end of elements.
4576  __ Add(end_elements, elements,
4578  __ Add(end_elements, end_elements, kEndElementsOffset);
4579  __ Mov(allocation_top_addr, new_space_allocation_top);
4580  __ Ldr(allocation_top, MemOperand(allocation_top_addr));
4581  __ Cmp(end_elements, allocation_top);
4582  __ B(ne, &call_builtin);
4583 
4584  __ Mov(x10, new_space_allocation_limit);
4585  __ Ldr(x10, MemOperand(x10));
4586  __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
4587  __ Cmp(allocation_top, x10);
4588  __ B(hi, &call_builtin);
4589 
4590  // We fit and could grow elements.
4591  // Update new_space_allocation_top.
4592  __ Str(allocation_top, MemOperand(allocation_top_addr));
4593  // Push the argument.
4594  __ Str(argument, MemOperand(end_elements));
4595  // Fill the rest with holes.
4596  __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
4597  ASSERT(kAllocationDelta == 4);
4598  __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize));
4599  __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize));
4600 
4601  // Update elements' and array's sizes.
4602  __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4603  __ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta));
4604  __ Str(elements_length,
4606 
4607  // Elements are in new space, so write barrier is not required.
4608  __ Drop(argc + 1);
4609  __ Mov(x0, length);
4610  __ Ret();
4611 
4612  __ Bind(&call_builtin);
4613  __ TailCallExternalReference(
4614  ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4615 }
4616 
4617 
4618 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4619  // ----------- S t a t e -------------
4620  // -- x1 : left
4621  // -- x0 : right
4622  // -- lr : return address
4623  // -----------------------------------
4624  Isolate* isolate = masm->isolate();
4625 
4626  // Load x2 with the allocation site. We stick an undefined dummy value here
4627  // and replace it with the real allocation site later when we instantiate this
4628  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4629  __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
4630 
4631  // Make sure that we actually patched the allocation site.
4632  if (FLAG_debug_code) {
4633  __ AssertNotSmi(x2, kExpectedAllocationSite);
4634  __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
4635  __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
4636  kExpectedAllocationSite);
4637  }
4638 
4639  // Tail call into the stub that handles binary operations with allocation
4640  // sites.
4641  BinaryOpWithAllocationSiteStub stub(state_);
4642  __ TailCallStub(&stub);
4643 }
4644 
4645 
4646 bool CodeStub::CanUseFPRegisters() {
4647  // FP registers always available on ARM64.
4648  return true;
4649 }
4650 
4651 
4652 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4653  // We need some extra registers for this stub, they have been allocated
4654  // but we need to save them before using them.
4655  regs_.Save(masm);
4656 
4657  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4658  Label dont_need_remembered_set;
4659 
4660  Register value = regs_.scratch0();
4661  __ Ldr(value, MemOperand(regs_.address()));
4662  __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
4663 
4664  __ CheckPageFlagSet(regs_.object(),
4665  value,
4667  &dont_need_remembered_set);
4668 
4669  // First notify the incremental marker if necessary, then update the
4670  // remembered set.
4671  CheckNeedsToInformIncrementalMarker(
4672  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4673  InformIncrementalMarker(masm);
4674  regs_.Restore(masm); // Restore the extra scratch registers we used.
4675 
4676  __ RememberedSetHelper(object_,
4677  address_,
4678  value_, // scratch1
4679  save_fp_regs_mode_,
4681 
4682  __ Bind(&dont_need_remembered_set);
4683  }
4684 
4685  CheckNeedsToInformIncrementalMarker(
4686  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4687  InformIncrementalMarker(masm);
4688  regs_.Restore(masm); // Restore the extra scratch registers we used.
4689  __ Ret();
4690 }
4691 
4692 
4693 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4694  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4695  Register address =
4696  x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4697  ASSERT(!address.Is(regs_.object()));
4698  ASSERT(!address.Is(x0));
4699  __ Mov(address, regs_.address());
4700  __ Mov(x0, regs_.object());
4701  __ Mov(x1, address);
4702  __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
4703 
4704  AllowExternalCallThatCantCauseGC scope(masm);
4705  ExternalReference function =
4706  ExternalReference::incremental_marking_record_write_function(
4707  masm->isolate());
4708  __ CallCFunction(function, 3, 0);
4709 
4710  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4711 }
4712 
4713 
4714 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4715  MacroAssembler* masm,
4716  OnNoNeedToInformIncrementalMarker on_no_need,
4717  Mode mode) {
4718  Label on_black;
4719  Label need_incremental;
4720  Label need_incremental_pop_scratch;
4721 
4722  Register mem_chunk = regs_.scratch0();
4723  Register counter = regs_.scratch1();
4724  __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
4725  __ Ldr(counter,
4727  __ Subs(counter, counter, 1);
4728  __ Str(counter,
4730  __ B(mi, &need_incremental);
4731 
4732  // If the object is not black we don't have to inform the incremental marker.
4733  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4734 
4735  regs_.Restore(masm); // Restore the extra scratch registers we used.
4736  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4737  __ RememberedSetHelper(object_,
4738  address_,
4739  value_, // scratch1
4740  save_fp_regs_mode_,
4742  } else {
4743  __ Ret();
4744  }
4745 
4746  __ Bind(&on_black);
4747  // Get the value from the slot.
4748  Register value = regs_.scratch0();
4749  __ Ldr(value, MemOperand(regs_.address()));
4750 
4751  if (mode == INCREMENTAL_COMPACTION) {
4752  Label ensure_not_white;
4753 
4754  __ CheckPageFlagClear(value,
4755  regs_.scratch1(),
4757  &ensure_not_white);
4758 
4759  __ CheckPageFlagClear(regs_.object(),
4760  regs_.scratch1(),
4762  &need_incremental);
4763 
4764  __ Bind(&ensure_not_white);
4765  }
4766 
4767  // We need extra registers for this, so we push the object and the address
4768  // register temporarily.
4769  __ Push(regs_.address(), regs_.object());
4770  __ EnsureNotWhite(value,
4771  regs_.scratch1(), // Scratch.
4772  regs_.object(), // Scratch.
4773  regs_.address(), // Scratch.
4774  regs_.scratch2(), // Scratch.
4775  &need_incremental_pop_scratch);
4776  __ Pop(regs_.object(), regs_.address());
4777 
4778  regs_.Restore(masm); // Restore the extra scratch registers we used.
4779  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4780  __ RememberedSetHelper(object_,
4781  address_,
4782  value_, // scratch1
4783  save_fp_regs_mode_,
4785  } else {
4786  __ Ret();
4787  }
4788 
4789  __ Bind(&need_incremental_pop_scratch);
4790  __ Pop(regs_.object(), regs_.address());
4791 
4792  __ Bind(&need_incremental);
4793  // Fall through when we need to inform the incremental marker.
4794 }
4795 
4796 
4797 void RecordWriteStub::Generate(MacroAssembler* masm) {
4798  Label skip_to_incremental_noncompacting;
4799  Label skip_to_incremental_compacting;
4800 
4801  // We patch these two first instructions back and forth between a nop and
4802  // real branch when we start and stop incremental heap marking.
4803  // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
4804  // are generated.
4805  // See RecordWriteStub::Patch for details.
4806  {
4807  InstructionAccurateScope scope(masm, 2);
4808  __ adr(xzr, &skip_to_incremental_noncompacting);
4809  __ adr(xzr, &skip_to_incremental_compacting);
4810  }
4811 
4812  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4813  __ RememberedSetHelper(object_,
4814  address_,
4815  value_, // scratch1
4816  save_fp_regs_mode_,
4818  }
4819  __ Ret();
4820 
4821  __ Bind(&skip_to_incremental_noncompacting);
4822  GenerateIncremental(masm, INCREMENTAL);
4823 
4824  __ Bind(&skip_to_incremental_compacting);
4825  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4826 }
4827 
4828 
4829 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4830  // x0 value element value to store
4831  // x3 index_smi element index as smi
4832  // sp[0] array_index_smi array literal index in function as smi
4833  // sp[1] array array literal
4834 
4835  Register value = x0;
4836  Register index_smi = x3;
4837 
4838  Register array = x1;
4839  Register array_map = x2;
4840  Register array_index_smi = x4;
4841  __ PeekPair(array_index_smi, array, 0);
4842  __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
4843 
4844  Label double_elements, smi_element, fast_elements, slow_elements;
4845  Register bitfield2 = x10;
4846  __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
4847 
4848  // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
4849  // FAST_HOLEY_ELEMENTS.
4855  __ B(hi, &double_elements);
4856 
4857  __ JumpIfSmi(value, &smi_element);
4858 
4859  // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
4860  __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift),
4861  &fast_elements);
4862 
4863  // Store into the array literal requires an elements transition. Call into
4864  // the runtime.
4865  __ Bind(&slow_elements);
4866  __ Push(array, index_smi, value);
4869  __ Push(x11, array_index_smi);
4870  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4871 
4872  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4873  __ Bind(&fast_elements);
4874  __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4875  __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4876  __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
4877  __ Str(value, MemOperand(x11));
4878  // Update the write barrier for the array store.
4879  __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
4881  __ Ret();
4882 
4883  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4884  // and value is Smi.
4885  __ Bind(&smi_element);
4886  __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4887  __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4888  __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
4889  __ Ret();
4890 
4891  __ Bind(&double_elements);
4892  __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4893  __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
4894  &slow_elements);
4895  __ Ret();
4896 }
4897 
4898 
4899 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4900  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
4901  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4902  int parameter_count_offset =
4904  __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4905  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4906  __ Add(x1, x1, 1);
4907  }
4908  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4909  __ Drop(x1);
4910  // Return to IC Miss stub, continuation still on stack.
4911  __ Ret();
4912 }
4913 
4914 
4915 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
4916 // a "Push lr" instruction, followed by a call.
4917 static const unsigned int kProfileEntryHookCallSize =
4919 
4920 
4921 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4922  if (masm->isolate()->function_entry_hook() != NULL) {
4923  ProfileEntryHookStub stub;
4924  Assembler::BlockConstPoolScope no_const_pools(masm);
4925  Label entry_hook_call_start;
4926  __ Bind(&entry_hook_call_start);
4927  __ Push(lr);
4928  __ CallStub(&stub);
4929  ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
4930  kProfileEntryHookCallSize);
4931 
4932  __ Pop(lr);
4933  }
4934 }
4935 
4936 
4937 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4938  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4939 
4940  // Save all kCallerSaved registers (including lr), since this can be called
4941  // from anywhere.
4942  // TODO(jbramley): What about FP registers?
4943  __ PushCPURegList(kCallerSaved);
4944  ASSERT(kCallerSaved.IncludesAliasOf(lr));
4945  const int kNumSavedRegs = kCallerSaved.Count();
4946 
4947  // Compute the function's address as the first argument.
4948  __ Sub(x0, lr, kProfileEntryHookCallSize);
4949 
4950 #if V8_HOST_ARCH_ARM64
4951  uintptr_t entry_hook =
4952  reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
4953  __ Mov(x10, entry_hook);
4954 #else
4955  // Under the simulator we need to indirect the entry hook through a trampoline
4956  // function at a known address.
4957  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4958  __ Mov(x10, Operand(ExternalReference(&dispatcher,
4959  ExternalReference::BUILTIN_CALL,
4960  masm->isolate())));
4961  // It additionally takes an isolate as a third parameter
4962  __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
4963 #endif
4964 
4965  // The caller's return address is above the saved temporaries.
4966  // Grab its location for the second argument to the hook.
4967  __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4968 
4969  {
4970  // Create a dummy frame, as CallCFunction requires this.
4971  FrameScope frame(masm, StackFrame::MANUAL);
4972  __ CallCFunction(x10, 2, 0);
4973  }
4974 
4975  __ PopCPURegList(kCallerSaved);
4976  __ Ret();
4977 }
4978 
4979 
4980 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4981  // When calling into C++ code the stack pointer must be csp.
4982  // Therefore this code must use csp for peek/poke operations when the
4983  // stub is generated. When the stub is called
4984  // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4985  // and configure the stack pointer *before* doing the call.
4986  const Register old_stack_pointer = __ StackPointer();
4987  __ SetStackPointer(csp);
4988 
4989  // Put return address on the stack (accessible to GC through exit frame pc).
4990  __ Poke(lr, 0);
4991  // Call the C++ function.
4992  __ Blr(x10);
4993  // Return to calling code.
4994  __ Peek(lr, 0);
4995  __ Ret();
4996 
4997  __ SetStackPointer(old_stack_pointer);
4998 }
4999 
5000 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5001  Register target) {
5002  // Make sure the caller configured the stack pointer (see comment in
5003  // DirectCEntryStub::Generate).
5004  ASSERT(csp.Is(__ StackPointer()));
5005 
5006  intptr_t code =
5007  reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
5008  __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
5009  __ Mov(x10, target);
5010  // Branch to the stub.
5011  __ Blr(lr);
5012 }
5013 
5014 
5015 // Probe the name dictionary in the 'elements' register.
5016 // Jump to the 'done' label if a property with the given name is found.
5017 // Jump to the 'miss' label otherwise.
5018 //
5019 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
5020 // 'elements' and 'name' registers are preserved on miss.
5022  MacroAssembler* masm,
5023  Label* miss,
5024  Label* done,
5025  Register elements,
5026  Register name,
5027  Register scratch1,
5028  Register scratch2) {
5029  ASSERT(!AreAliased(elements, name, scratch1, scratch2));
5030 
5031  // Assert that name contains a string.
5032  __ AssertName(name);
5033 
5034  // Compute the capacity mask.
5035  __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
5036  __ Sub(scratch1, scratch1, 1);
5037 
5038  // Generate an unrolled loop that performs a few probes before giving up.
5039  for (int i = 0; i < kInlinedProbes; i++) {
5040  // Compute the masked index: (hash + i + i * i) & mask.
5041  __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
5042  if (i > 0) {
5043  // Add the probe offset (i + i * i) left shifted to avoid right shifting
5044  // the hash in a separate instruction. The value hash + i + i * i is right
5045  // shifted in the following and instruction.
5046  ASSERT(NameDictionary::GetProbeOffset(i) <
5047  1 << (32 - Name::kHashFieldOffset));
5048  __ Add(scratch2, scratch2, Operand(
5049  NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5050  }
5051  __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
5052 
5053  // Scale the index by multiplying by the element size.
5055  __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
5056 
5057  // Check if the key is identical to the name.
5058  UseScratchRegisterScope temps(masm);
5059  Register scratch3 = temps.AcquireX();
5060  __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
5061  __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
5062  __ Cmp(name, scratch3);
5063  __ B(eq, done);
5064  }
5065 
5066  // The inlined probes didn't find the entry.
5067  // Call the complete stub to scan the whole dictionary.
5068 
5069  CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
5070  spill_list.Combine(lr);
5071  spill_list.Remove(scratch1);
5072  spill_list.Remove(scratch2);
5073 
5074  __ PushCPURegList(spill_list);
5075 
5076  if (name.is(x0)) {
5077  ASSERT(!elements.is(x1));
5078  __ Mov(x1, name);
5079  __ Mov(x0, elements);
5080  } else {
5081  __ Mov(x0, elements);
5082  __ Mov(x1, name);
5083  }
5084 
5085  Label not_found;
5087  __ CallStub(&stub);
5088  __ Cbz(x0, &not_found);
5089  __ Mov(scratch2, x2); // Move entry index into scratch2.
5090  __ PopCPURegList(spill_list);
5091  __ B(done);
5092 
5093  __ Bind(&not_found);
5094  __ PopCPURegList(spill_list);
5095  __ B(miss);
5096 }
5097 
5098 
5099 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5100  Label* miss,
5101  Label* done,
5102  Register receiver,
5103  Register properties,
5104  Handle<Name> name,
5105  Register scratch0) {
5106  ASSERT(!AreAliased(receiver, properties, scratch0));
5107  ASSERT(name->IsUniqueName());
5108  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5109  // not equal to the name and kProbes-th slot is not used (its name is the
5110  // undefined value), it guarantees the hash table doesn't contain the
5111  // property. It's true even if some slots represent deleted properties
5112  // (their names are the hole value).
5113  for (int i = 0; i < kInlinedProbes; i++) {
5114  // scratch0 points to properties hash.
5115  // Compute the masked index: (hash + i + i * i) & mask.
5116  Register index = scratch0;
5117  // Capacity is smi 2^n.
5118  __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
5119  __ Sub(index, index, 1);
5120  __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
5121 
5122  // Scale the index by multiplying by the entry size.
5124  __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
5125 
5126  Register entity_name = scratch0;
5127  // Having undefined at this place means the name is not contained.
5128  Register tmp = index;
5129  __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
5130  __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
5131 
5132  __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
5133 
5134  // Stop if found the property.
5135  __ Cmp(entity_name, Operand(name));
5136  __ B(eq, miss);
5137 
5138  Label good;
5139  __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
5140 
5141  // Check if the entry name is not a unique name.
5142  __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
5143  __ Ldrb(entity_name,
5145  __ JumpIfNotUniqueName(entity_name, miss);
5146  __ Bind(&good);
5147  }
5148 
5149  CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
5150  spill_list.Combine(lr);
5151  spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
5152 
5153  __ PushCPURegList(spill_list);
5154 
5155  __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5156  __ Mov(x1, Operand(name));
5158  __ CallStub(&stub);
5159  // Move stub return value to scratch0. Note that scratch0 is not included in
5160  // spill_list and won't be clobbered by PopCPURegList.
5161  __ Mov(scratch0, x0);
5162  __ PopCPURegList(spill_list);
5163 
5164  __ Cbz(scratch0, done);
5165  __ B(miss);
5166 }
5167 
5168 
5169 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
5170  // This stub overrides SometimesSetsUpAFrame() to return false. That means
5171  // we cannot call anything that could cause a GC from this stub.
5172  //
5173  // Arguments are in x0 and x1:
5174  // x0: property dictionary.
5175  // x1: the name of the property we are looking for.
5176  //
5177  // Return value is in x0 and is zero if lookup failed, non zero otherwise.
5178  // If the lookup is successful, x2 will contains the index of the entry.
5179 
5180  Register result = x0;
5181  Register dictionary = x0;
5182  Register key = x1;
5183  Register index = x2;
5184  Register mask = x3;
5185  Register hash = x4;
5186  Register undefined = x5;
5187  Register entry_key = x6;
5188 
5189  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5190 
5191  __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
5192  __ Sub(mask, mask, 1);
5193 
5194  __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
5195  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5196 
5197  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5198  // Compute the masked index: (hash + i + i * i) & mask.
5199  // Capacity is smi 2^n.
5200  if (i > 0) {
5201  // Add the probe offset (i + i * i) left shifted to avoid right shifting
5202  // the hash in a separate instruction. The value hash + i + i * i is right
5203  // shifted in the following and instruction.
5204  ASSERT(NameDictionary::GetProbeOffset(i) <
5205  1 << (32 - Name::kHashFieldOffset));
5206  __ Add(index, hash,
5207  NameDictionary::GetProbeOffset(i) << Name::kHashShift);
5208  } else {
5209  __ Mov(index, hash);
5210  }
5211  __ And(index, mask, Operand(index, LSR, Name::kHashShift));
5212 
5213  // Scale the index by multiplying by the entry size.
5215  __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
5216 
5217  __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
5218  __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
5219 
5220  // Having undefined at this place means the name is not contained.
5221  __ Cmp(entry_key, undefined);
5222  __ B(eq, &not_in_dictionary);
5223 
5224  // Stop if found the property.
5225  __ Cmp(entry_key, key);
5226  __ B(eq, &in_dictionary);
5227 
5228  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5229  // Check if the entry name is not a unique name.
5230  __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
5231  __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
5232  __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
5233  }
5234  }
5235 
5236  __ Bind(&maybe_in_dictionary);
5237  // If we are doing negative lookup then probing failure should be
5238  // treated as a lookup success. For positive lookup, probing failure
5239  // should be treated as lookup failure.
5240  if (mode_ == POSITIVE_LOOKUP) {
5241  __ Mov(result, 0);
5242  __ Ret();
5243  }
5244 
5245  __ Bind(&in_dictionary);
5246  __ Mov(result, 1);
5247  __ Ret();
5248 
5249  __ Bind(&not_in_dictionary);
5250  __ Mov(result, 0);
5251  __ Ret();
5252 }
5253 
5254 
5255 template<class T>
5256 static void CreateArrayDispatch(MacroAssembler* masm,
5258  ASM_LOCATION("CreateArrayDispatch");
5259  if (mode == DISABLE_ALLOCATION_SITES) {
5260  T stub(GetInitialFastElementsKind(), mode);
5261  __ TailCallStub(&stub);
5262 
5263  } else if (mode == DONT_OVERRIDE) {
5264  Register kind = x3;
5265  int last_index =
5267  for (int i = 0; i <= last_index; ++i) {
5268  Label next;
5270  // TODO(jbramley): Is this the best way to handle this? Can we make the
5271  // tail calls conditional, rather than hopping over each one?
5272  __ CompareAndBranch(kind, candidate_kind, ne, &next);
5273  T stub(candidate_kind);
5274  __ TailCallStub(&stub);
5275  __ Bind(&next);
5276  }
5277 
5278  // If we reached this point there is a problem.
5279  __ Abort(kUnexpectedElementsKindInArrayConstructor);
5280 
5281  } else {
5282  UNREACHABLE();
5283  }
5284 }
5285 
5286 
5287 // TODO(jbramley): If this needs to be a special case, make it a proper template
5288 // specialization, and not a separate function.
5289 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5291  ASM_LOCATION("CreateArrayDispatchOneArgument");
5292  // x0 - argc
5293  // x1 - constructor?
5294  // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5295  // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5296  // sp[0] - last argument
5297 
5298  Register allocation_site = x2;
5299  Register kind = x3;
5300 
5301  Label normal_sequence;
5302  if (mode == DONT_OVERRIDE) {
5309 
5310  // Is the low bit set? If so, the array is holey.
5311  __ Tbnz(kind, 0, &normal_sequence);
5312  }
5313 
5314  // Look at the last argument.
5315  // TODO(jbramley): What does a 0 argument represent?
5316  __ Peek(x10, 0);
5317  __ Cbz(x10, &normal_sequence);
5318 
5319  if (mode == DISABLE_ALLOCATION_SITES) {
5321  ElementsKind holey_initial = GetHoleyElementsKind(initial);
5322 
5323  ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5325  __ TailCallStub(&stub_holey);
5326 
5327  __ Bind(&normal_sequence);
5328  ArraySingleArgumentConstructorStub stub(initial,
5330  __ TailCallStub(&stub);
5331  } else if (mode == DONT_OVERRIDE) {
5332  // We are going to create a holey array, but our kind is non-holey.
5333  // Fix kind and retry (only if we have an allocation site in the slot).
5334  __ Orr(kind, kind, 1);
5335 
5336  if (FLAG_debug_code) {
5337  __ Ldr(x10, FieldMemOperand(allocation_site, 0));
5338  __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
5339  &normal_sequence);
5340  __ Assert(eq, kExpectedAllocationSite);
5341  }
5342 
5343  // Save the resulting elements kind in type info. We can't just store 'kind'
5344  // in the AllocationSite::transition_info field because elements kind is
5345  // restricted to a portion of the field; upper bits need to be left alone.
5347  __ Ldr(x11, FieldMemOperand(allocation_site,
5350  __ Str(x11, FieldMemOperand(allocation_site,
5352 
5353  __ Bind(&normal_sequence);
5354  int last_index =
5356  for (int i = 0; i <= last_index; ++i) {
5357  Label next;
5359  __ CompareAndBranch(kind, candidate_kind, ne, &next);
5360  ArraySingleArgumentConstructorStub stub(candidate_kind);
5361  __ TailCallStub(&stub);
5362  __ Bind(&next);
5363  }
5364 
5365  // If we reached this point there is a problem.
5366  __ Abort(kUnexpectedElementsKindInArrayConstructor);
5367  } else {
5368  UNREACHABLE();
5369  }
5370 }
5371 
5372 
5373 template<class T>
5374 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5375  int to_index = GetSequenceIndexFromFastElementsKind(
5377  for (int i = 0; i <= to_index; ++i) {
5379  T stub(kind);
5380  stub.GetCode(isolate);
5382  T stub1(kind, DISABLE_ALLOCATION_SITES);
5383  stub1.GetCode(isolate);
5384  }
5385  }
5386 }
5387 
5388 
5390  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5391  isolate);
5392  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5393  isolate);
5394  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5395  isolate);
5396 }
5397 
5398 
5400  Isolate* isolate) {
5402  for (int i = 0; i < 2; i++) {
5403  // For internal arrays we only need a few things
5404  InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5405  stubh1.GetCode(isolate);
5406  InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5407  stubh2.GetCode(isolate);
5408  InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5409  stubh3.GetCode(isolate);
5410  }
5411 }
5412 
5413 
5414 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5415  MacroAssembler* masm,
5417  Register argc = x0;
5418  if (argument_count_ == ANY) {
5419  Label zero_case, n_case;
5420  __ Cbz(argc, &zero_case);
5421  __ Cmp(argc, 1);
5422  __ B(ne, &n_case);
5423 
5424  // One argument.
5425  CreateArrayDispatchOneArgument(masm, mode);
5426 
5427  __ Bind(&zero_case);
5428  // No arguments.
5429  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5430 
5431  __ Bind(&n_case);
5432  // N arguments.
5433  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5434 
5435  } else if (argument_count_ == NONE) {
5436  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5437  } else if (argument_count_ == ONE) {
5438  CreateArrayDispatchOneArgument(masm, mode);
5439  } else if (argument_count_ == MORE_THAN_ONE) {
5440  CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5441  } else {
5442  UNREACHABLE();
5443  }
5444 }
5445 
5446 
5447 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5448  ASM_LOCATION("ArrayConstructorStub::Generate");
5449  // ----------- S t a t e -------------
5450  // -- x0 : argc (only if argument_count_ == ANY)
5451  // -- x1 : constructor
5452  // -- x2 : AllocationSite or undefined
5453  // -- sp[0] : return address
5454  // -- sp[4] : last argument
5455  // -----------------------------------
5456  Register constructor = x1;
5457  Register allocation_site = x2;
5458 
5459  if (FLAG_debug_code) {
5460  // The array construct code is only set for the global and natives
5461  // builtin Array functions which always have maps.
5462 
5463  Label unexpected_map, map_ok;
5464  // Initial map for the builtin Array function should be a map.
5465  __ Ldr(x10, FieldMemOperand(constructor,
5467  // Will both indicate a NULL and a Smi.
5468  __ JumpIfSmi(x10, &unexpected_map);
5469  __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5470  __ Bind(&unexpected_map);
5471  __ Abort(kUnexpectedInitialMapForArrayFunction);
5472  __ Bind(&map_ok);
5473 
5474  // We should either have undefined in the allocation_site register or a
5475  // valid AllocationSite.
5476  __ AssertUndefinedOrAllocationSite(allocation_site, x10);
5477  }
5478 
5479  Register kind = x3;
5480  Label no_info;
5481  // Get the elements kind and case on that.
5482  __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
5483 
5484  __ Ldrsw(kind,
5485  UntagSmiFieldMemOperand(allocation_site,
5488  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5489 
5490  __ Bind(&no_info);
5491  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5492 }
5493 
5494 
5495 void InternalArrayConstructorStub::GenerateCase(
5496  MacroAssembler* masm, ElementsKind kind) {
5497  Label zero_case, n_case;
5498  Register argc = x0;
5499 
5500  __ Cbz(argc, &zero_case);
5501  __ CompareAndBranch(argc, 1, ne, &n_case);
5502 
5503  // One argument.
5504  if (IsFastPackedElementsKind(kind)) {
5505  Label packed_case;
5506 
5507  // We might need to create a holey array; look at the first argument.
5508  __ Peek(x10, 0);
5509  __ Cbz(x10, &packed_case);
5510 
5511  InternalArraySingleArgumentConstructorStub
5512  stub1_holey(GetHoleyElementsKind(kind));
5513  __ TailCallStub(&stub1_holey);
5514 
5515  __ Bind(&packed_case);
5516  }
5517  InternalArraySingleArgumentConstructorStub stub1(kind);
5518  __ TailCallStub(&stub1);
5519 
5520  __ Bind(&zero_case);
5521  // No arguments.
5522  InternalArrayNoArgumentConstructorStub stub0(kind);
5523  __ TailCallStub(&stub0);
5524 
5525  __ Bind(&n_case);
5526  // N arguments.
5527  InternalArrayNArgumentsConstructorStub stubN(kind);
5528  __ TailCallStub(&stubN);
5529 }
5530 
5531 
5532 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5533  // ----------- S t a t e -------------
5534  // -- x0 : argc
5535  // -- x1 : constructor
5536  // -- sp[0] : return address
5537  // -- sp[4] : last argument
5538  // -----------------------------------
5539  Handle<Object> undefined_sentinel(
5540  masm->isolate()->heap()->undefined_value(), masm->isolate());
5541 
5542  Register constructor = x1;
5543 
5544  if (FLAG_debug_code) {
5545  // The array construct code is only set for the global and natives
5546  // builtin Array functions which always have maps.
5547 
5548  Label unexpected_map, map_ok;
5549  // Initial map for the builtin Array function should be a map.
5550  __ Ldr(x10, FieldMemOperand(constructor,
5552  // Will both indicate a NULL and a Smi.
5553  __ JumpIfSmi(x10, &unexpected_map);
5554  __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5555  __ Bind(&unexpected_map);
5556  __ Abort(kUnexpectedInitialMapForArrayFunction);
5557  __ Bind(&map_ok);
5558  }
5559 
5560  Register kind = w3;
5561  // Figure out the right elements kind
5562  __ Ldr(x10, FieldMemOperand(constructor,
5564 
5565  // Retrieve elements_kind from map.
5566  __ LoadElementsKindFromMap(kind, x10);
5567 
5568  if (FLAG_debug_code) {
5569  Label done;
5570  __ Cmp(x3, FAST_ELEMENTS);
5571  __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
5572  __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5573  }
5574 
5575  Label fast_elements_case;
5576  __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
5577  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5578 
5579  __ Bind(&fast_elements_case);
5580  GenerateCase(masm, FAST_ELEMENTS);
5581 }
5582 
5583 
5584 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5585  // ----------- S t a t e -------------
5586  // -- x0 : callee
5587  // -- x4 : call_data
5588  // -- x2 : holder
5589  // -- x1 : api_function_address
5590  // -- cp : context
5591  // --
5592  // -- sp[0] : last argument
5593  // -- ...
5594  // -- sp[(argc - 1) * 8] : first argument
5595  // -- sp[argc * 8] : receiver
5596  // -----------------------------------
5597 
5598  Register callee = x0;
5599  Register call_data = x4;
5600  Register holder = x2;
5601  Register api_function_address = x1;
5602  Register context = cp;
5603 
5604  int argc = ArgumentBits::decode(bit_field_);
5605  bool is_store = IsStoreBits::decode(bit_field_);
5606  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5607 
5608  typedef FunctionCallbackArguments FCA;
5609 
5610  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5611  STATIC_ASSERT(FCA::kCalleeIndex == 5);
5612  STATIC_ASSERT(FCA::kDataIndex == 4);
5613  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5614  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5615  STATIC_ASSERT(FCA::kIsolateIndex == 1);
5616  STATIC_ASSERT(FCA::kHolderIndex == 0);
5617  STATIC_ASSERT(FCA::kArgsLength == 7);
5618 
5619  Isolate* isolate = masm->isolate();
5620 
5621  // FunctionCallbackArguments: context, callee and call data.
5622  __ Push(context, callee, call_data);
5623 
5624  // Load context from callee
5625  __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5626 
5627  if (!call_data_undefined) {
5628  __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5629  }
5630  Register isolate_reg = x5;
5631  __ Mov(isolate_reg, ExternalReference::isolate_address(isolate));
5632 
5633  // FunctionCallbackArguments:
5634  // return value, return value default, isolate, holder.
5635  __ Push(call_data, call_data, isolate_reg, holder);
5636 
5637  // Prepare arguments.
5638  Register args = x6;
5639  __ Mov(args, masm->StackPointer());
5640 
5641  // Allocate the v8::Arguments structure in the arguments' space, since it's
5642  // not controlled by GC.
5643  const int kApiStackSpace = 4;
5644 
5645  // Allocate space for CallApiFunctionAndReturn can store some scratch
5646  // registeres on the stack.
5647  const int kCallApiFunctionSpillSpace = 4;
5648 
5649  FrameScope frame_scope(masm, StackFrame::MANUAL);
5650  __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5651 
5652  ASSERT(!AreAliased(x0, api_function_address));
5653  // x0 = FunctionCallbackInfo&
5654  // Arguments is after the return address.
5655  __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5656  // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
5657  __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5658  __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
5659  // FunctionCallbackInfo::length_ = argc and
5660  // FunctionCallbackInfo::is_construct_call = 0
5661  __ Mov(x10, argc);
5662  __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
5663 
5664  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5665  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5666  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
5667  ApiFunction thunk_fun(thunk_address);
5668  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5669  masm->isolate());
5670 
5671  AllowExternalCallThatCantCauseGC scope(masm);
5672  MemOperand context_restore_operand(
5673  fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5674  // Stores return the first js argument
5675  int return_value_offset = 0;
5676  if (is_store) {
5677  return_value_offset = 2 + FCA::kArgsLength;
5678  } else {
5679  return_value_offset = 2 + FCA::kReturnValueOffset;
5680  }
5681  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5682 
5683  const int spill_offset = 1 + kApiStackSpace;
5684  __ CallApiFunctionAndReturn(api_function_address,
5685  thunk_ref,
5686  kStackUnwindSpace,
5687  spill_offset,
5688  return_value_operand,
5689  &context_restore_operand);
5690 }
5691 
5692 
5693 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5694  // ----------- S t a t e -------------
5695  // -- sp[0] : name
5696  // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
5697  // -- ...
5698  // -- x2 : api_function_address
5699  // -----------------------------------
5700 
5701  Register api_function_address = x2;
5702 
5703  __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
5704  __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
5705 
5706  const int kApiStackSpace = 1;
5707 
5708  // Allocate space for CallApiFunctionAndReturn can store some scratch
5709  // registeres on the stack.
5710  const int kCallApiFunctionSpillSpace = 4;
5711 
5712  FrameScope frame_scope(masm, StackFrame::MANUAL);
5713  __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5714 
5715  // Create PropertyAccessorInfo instance on the stack above the exit frame with
5716  // x1 (internal::Object** args_) as the data.
5717  __ Poke(x1, 1 * kPointerSize);
5718  __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
5719 
5720  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5721 
5723  ExternalReference::Type thunk_type =
5724  ExternalReference::PROFILING_GETTER_CALL;
5725  ApiFunction thunk_fun(thunk_address);
5726  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5727  masm->isolate());
5728 
5729  const int spill_offset = 1 + kApiStackSpace;
5730  __ CallApiFunctionAndReturn(api_function_address,
5731  thunk_ref,
5732  kStackUnwindSpace,
5733  spill_offset,
5734  MemOperand(fp, 6 * kPointerSize),
5735  NULL);
5736 }
5737 
5738 
5739 #undef __
5740 
5741 } } // namespace v8::internal
5742 
5743 #endif // V8_TARGET_ARCH_ARM64
byte * Address
Definition: globals.h:186
static const int kResourceDataOffset
Definition: objects.h:9244
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void GenerateFast(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:8629
static const int kBitFieldOffset
Definition: objects.h:6461
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:5480
static const int kEvacuationCandidateMask
Definition: spaces.h:430
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
bool IsHoleyElementsKind(ElementsKind kind)
const RegList kCallerSaved
Definition: frames-arm.h:75
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:7519
#define COMPARE(asm_, compare_string)
static const int kValueOffset
Definition: objects.h:9547
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:217
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const unsigned kWRegSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
Definition: flags.cc:350
static const int kCallerStackParameterCountFrameOffset
Definition: frames.h:776
void Generate(MacroAssembler *masm)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kTwoByteStringTag
Definition: objects.h:610
const Register cp
static const int kCallSizeWithRelocation
const LowDwVfpRegister d0
static Failure * InternalError()
Definition: objects-inl.h:1239
static Smi * FromInt(int value)
Definition: objects-inl.h:1209
#define ASM_LOCATION(message)
Definition: checks.h:69
void Generate(MacroAssembler *masm)
const int64_t kWRegMask
static const int kDataOffset
Definition: objects.h:7921
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:997
static Representation Integer32()
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:541
const unsigned kByteSizeInBytes
const unsigned kXRegSizeInBits
#define kCallerSavedFP
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
TypeImpl< ZoneTypeConfig > Type
static const int kExponentBias
Definition: objects.h:1985
int int32_t
Definition: unicode.cc:47
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor) V8_OVERRIDE
static const intptr_t kPageAlignmentMask
Definition: spaces.h:823
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Failure * Exception()
Definition: objects-inl.h:1244
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
int MaskToBit(uint64_t mask)
AllocationSiteOverrideMode
Definition: code-stubs.h:759
static const Function * FunctionForId(FunctionId id)
Definition: runtime.cc:15154
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int64_t kDSignMask
#define ASSERT(condition)
Definition: checks.h:329
void Generate(MacroAssembler *masm)
friend class BlockConstPoolScope
static const int kContextOffset
Definition: frames.h:185
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static void GenerateAheadOfTime(Isolate *isolate)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:6680
const int kPointerSizeLog2
Definition: globals.h:281
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
const LowDwVfpRegister d3
const uint32_t kStringRepresentationMask
Definition: objects.h:615
static const int kCallerFPOffset
Definition: frames.h:188
RestoreRegistersStateStub(SaveFPRegsMode with_fp)
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
static Operand UntagSmiAndScale(Register smi, int scale)
static const int kContextOffset
Definition: objects.h:7523
const uint32_t kShortExternalStringMask
Definition: objects.h:643
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind)
static void GenerateAheadOfTime(Isolate *isolate)
static const int kLastSubjectOffset
Definition: jsregexp.h:190
ProfileEntryHookStub()
Definition: code-stubs.h:2504
const int64_t kXSignBit
static const int kZeroHash
Definition: objects.h:8520
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void GenerateCall(MacroAssembler *masm, Register target)
static const int kLastCaptureCountOffset
Definition: jsregexp.h:188
static const int kFirstOffset
Definition: objects.h:9165
static const int kMinLength
Definition: objects.h:9170
const uint32_t kNotStringTag
Definition: objects.h:599
static const int kParentOffset
Definition: objects.h:9209
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
static const int kLiteralsOffset
Definition: objects.h:7524
#define UNREACHABLE()
Definition: checks.h:52
const int kFastElementsKindPackedToHoley
Definition: elements-kind.h:94
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
DwVfpRegister DoubleRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kLengthOffset
Definition: objects.h:8905
STATIC_ASSERT(sizeof(CPURegister)==sizeof(Register))
const LowDwVfpRegister d7
const intptr_t kFailureTagMask
Definition: v8globals.h:64
const LowDwVfpRegister d4
static const int kValueOffset
Definition: objects.h:1971
const int kFailureTagSize
Definition: v8globals.h:63
bool IsFastPackedElementsKind(ElementsKind kind)
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:266
static const int kIrregexpCaptureCountOffset
Definition: objects.h:7967
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:577
static const int8_t kMaximumBitField2FastHoleyElementValue
Definition: objects.h:6493
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number)
const LowDwVfpRegister d6
const int kPointerSize
Definition: globals.h:268
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kTransitionInfoOffset
Definition: objects.h:8411
const unsigned kInstructionSize
StoreRegistersStateStub(SaveFPRegsMode with_fp)
static void MaybeCallEntryHook(MacroAssembler *masm)
static void GenerateAheadOfTime(Isolate *isolate)
const int kHeapObjectTag
Definition: v8.h:5473
void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define __
static const int kCallerSPOffset
Definition: frames.h:190
static const int kPropertiesOffset
Definition: objects.h:2755
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
virtual void Generate(MacroAssembler *masm)
bool IsFastSmiElementsKind(ElementsKind kind)
static const int kMinLength
Definition: objects.h:9214
const uint32_t kShortExternalStringTag
Definition: objects.h:644
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:9042
void Generate(MacroAssembler *masm)
const int kBitsPerByte
Definition: globals.h:287
const int kSmiShift
static void GenerateStubsAheadOfTime(Isolate *isolate)
static const int kElementsOffset
Definition: objects.h:2756
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kStringTag
Definition: objects.h:598
void Generate(MacroAssembler *masm)
static bool IsEqualityOp(Value op)
Definition: token.h:228
static Representation External()
static const int kOffsetOffset
Definition: objects.h:9210
const int64_t kWSignBit
const uint32_t kInternalizedTag
Definition: objects.h:605
static const int kLengthOffset
Definition: objects.h:10076
#define T(name, string, precedence)
Definition: token.cc:48
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< Name > name, Register scratch0)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
bool IsFastSmiOrObjectElementsKind(ElementsKind kind)
static const int kLastMatchOverhead
Definition: jsregexp.h:185
const unsigned kXRegSize
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kHeaderSize
Definition: objects.h:3016
void Generate(MacroAssembler *masm)
const Register lr
static Builtins::Name MissBuiltin(Code::Kind kind)
Definition: stub-cache.h:466
static const int kMapOffset
Definition: objects.h:1890
static const int kMaxShortLength
Definition: objects.h:9247
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:433
const uint32_t kIsNotStringMask
Definition: objects.h:597
const LowDwVfpRegister d2
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const uint32_t kSlicedNotConsMask
Definition: objects.h:633
static const int kLengthOffset
Definition: objects.h:3015
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:9166
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:6675
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2=NoReg, Register reg3=NoReg, Register reg4=NoReg)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor) V8_OVERRIDE
Handle< T > handle(T *t, Isolate *isolate)
Definition: handles.h:103
static const int kCallerFPOffset
Definition: frames-arm.h:105
static const int kArgumentsLengthIndex
Definition: heap.h:1104
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
MemOperand FieldMemOperand(Register object, int offset)
ElementsKind GetInitialFastElementsKind()
static const int kFirstCaptureOffset
Definition: jsregexp.h:194
static const int kLastInputOffset
Definition: jsregexp.h:192
static void GenerateMapChangeElementsTransition(MacroAssembler *masm, AllocationSiteMode mode, Label *allocation_memento_found)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
static const int kHeaderSize
Definition: objects.h:5604
void InvokeAccessorGetterCallback(v8::Local< v8::String > property, const v8::PropertyCallbackInfo< v8::Value > &info, v8::AccessorGetterCallback getter)
Definition: api.cc:7628
static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind)
Definition: objects-inl.h:1477
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
const int64_t kXSignMask
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:7963
ICCompareStub(Token::Value op, CompareIC::State left, CompareIC::State right, CompareIC::State handler)
Definition: code-stubs.h:1329
static void GenerateStubsAheadOfTime(Isolate *isolate)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
static const int kElementsKindShift
Definition: objects.h:6482
CodeStubInterfaceDescriptor * GetInterfaceDescriptor(Isolate *isolate)
Definition: code-stubs.h:395
void USE(T)
Definition: globals.h:341
const uint32_t kOneByteStringTag
Definition: objects.h:611
void Generate(MacroAssembler *masm)
static const int kArgumentsCalleeIndex
Definition: heap.h:1106
const int kSmiTag
Definition: v8.h:5478
static const int kIsUndetectable
Definition: objects.h:6472
static const int kHeaderSize
Definition: objects.h:2757
Code::Kind kind()
Definition: code-stubs.h:831
static void InitializeForIsolate(Isolate *isolate)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static const int kDataTagOffset
Definition: objects.h:7961
static const uint32_t kHashBitMask
Definition: objects.h:8646
static const int kPrototypeOffset
Definition: objects.h:6427
void Generate(MacroAssembler *masm)
static const int kHashShift
Definition: objects.h:8642
static void GenerateAheadOfTime(Isolate *isolate)
Definition: code-stubs.cc:697
const LowDwVfpRegister d1
const int kFailureTypeTagMask
Definition: objects.h:1713
const Register fp
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
static Representation Tagged()
static const int kNativeContextOffset
Definition: objects.h:7611
MemOperand ContextMemOperand(Register context, int index)
static void GenerateFixedRegStubsAheadOfTime(Isolate *isolate)
static const int kConstructStubOffset
Definition: objects.h:7106
static const int kExponentBits
Definition: objects.h:1984
static const int kCompilerHintsOffset
Definition: objects.h:7171
static Operand UntagSmi(Register smi)
static const int kSharedFunctionInfoOffset
Definition: objects.h:7521
#define FUNCTION_ADDR(f)
Definition: globals.h:345
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kSloppyArgumentsObjectSize
Definition: heap.h:1098
void InvokeFunctionCallback(const v8::FunctionCallbackInfo< v8::Value > &info, v8::FunctionCallback callback)
Definition: api.cc:7642
static const int kBitField2Offset
Definition: objects.h:6462
static const int kMantissaBits
Definition: objects.h:1983
void Generate(MacroAssembler *masm)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:1492
ElementsKind GetHoleyElementsKind(ElementsKind packed_kind)
static const int32_t kMaxOneByteCharCode
Definition: objects.h:8914
static const int kDataUC16CodeOffset
Definition: objects.h:7965
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static const int kStrictArgumentsObjectSize
Definition: heap.h:1101
MemOperand GlobalObjectMemOperand()
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
bool IsFastDoubleElementsKind(ElementsKind kind)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:224
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
static const int kInstanceTypeOffset
Definition: objects.h:6459
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)
void Generate(MacroAssembler *masm)
virtual void InitializeInterfaceDescriptor(Isolate *isolate, CodeStubInterfaceDescriptor *descriptor)