v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 void ToNumberStub::Generate(MacroAssembler* masm) {
42  // The ToNumber stub takes one argument in eax.
43  Label check_heap_number, call_builtin;
44  __ SmiTest(rax);
45  __ j(not_zero, &check_heap_number, Label::kNear);
46  __ Ret();
47 
48  __ bind(&check_heap_number);
50  Heap::kHeapNumberMapRootIndex);
51  __ j(not_equal, &call_builtin, Label::kNear);
52  __ Ret();
53 
54  __ bind(&call_builtin);
55  __ pop(rcx); // Pop return address.
56  __ push(rax);
57  __ push(rcx); // Push return address.
58  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
59 }
60 
61 
62 void FastNewClosureStub::Generate(MacroAssembler* masm) {
63  // Create a new closure from the given function info in new
64  // space. Set the context to the current context in rsi.
65  Counters* counters = masm->isolate()->counters();
66 
67  Label gc;
68  __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
69 
70  __ IncrementCounter(counters->fast_new_closure_total(), 1);
71 
72  // Get the function info from the stack.
73  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
74 
75  int map_index = (language_mode_ == CLASSIC_MODE)
78 
79  // Compute the function map in the current native context and set that
80  // as the map of the allocated object.
83  __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
85 
86  // Initialize the rest of the function. We don't have to update the
87  // write barrier because the allocated object is in new space.
88  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
89  __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
90  __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
97 
98  // Initialize the code pointer in the function to be the one
99  // found in the shared function info object.
100  // But first check if there is an optimized version for our context.
101  Label check_optimized;
102  Label install_unoptimized;
103  if (FLAG_cache_optimized_code) {
104  __ movq(rbx,
106  __ testq(rbx, rbx);
107  __ j(not_zero, &check_optimized, Label::kNear);
108  }
109  __ bind(&install_unoptimized);
111  rdi); // Initialize with undefined.
115 
116  // Return and remove the on-stack parameter.
117  __ ret(1 * kPointerSize);
118 
119  __ bind(&check_optimized);
120 
121  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
122 
123  // rcx holds native context, ebx points to fixed array of 3-element entries
124  // (native context, optimized code, literals).
125  // The optimized code map must never be empty, so check the first elements.
126  Label install_optimized;
127  // Speculatively move code object into edx.
130  __ j(equal, &install_optimized);
131 
132  // Iterate through the rest of map backwards. rdx holds an index.
133  Label loop;
134  Label restore;
136  __ SmiToInteger32(rdx, rdx);
137  __ bind(&loop);
138  // Do not double check first entry.
139  __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
140  __ j(equal, &restore);
141  __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
142  __ cmpq(rcx, FieldOperand(rbx,
143  rdx,
146  __ j(not_equal, &loop, Label::kNear);
147  // Hit: fetch the optimized code.
148  __ movq(rdx, FieldOperand(rbx,
149  rdx,
152 
153  __ bind(&install_optimized);
154  __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
155 
156  // TODO(fschneider): Idea: store proper code pointers in the map and either
157  // unmangle them on marking or do nothing as the whole map is discarded on
158  // major GC anyway.
161 
162  // Now link a function into a list of optimized functions.
164 
166  // No need for write barrier as JSFunction (rax) is in the new space.
167 
169  // Store JSFunction (rax) into rdx before issuing write barrier as
170  // it clobbers all the registers passed.
171  __ movq(rdx, rax);
172  __ RecordWriteContextSlot(
173  rcx,
175  rdx,
176  rbx,
178 
179  // Return and remove the on-stack parameter.
180  __ ret(1 * kPointerSize);
181 
182  __ bind(&restore);
183  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
184  __ jmp(&install_unoptimized);
185 
186  // Create a new closure through the slower runtime call.
187  __ bind(&gc);
188  __ pop(rcx); // Temporarily remove return address.
189  __ pop(rdx);
190  __ push(rsi);
191  __ push(rdx);
192  __ PushRoot(Heap::kFalseValueRootIndex);
193  __ push(rcx); // Restore return address.
194  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
195 }
196 
197 
198 void FastNewContextStub::Generate(MacroAssembler* masm) {
199  // Try to allocate the context in new space.
200  Label gc;
201  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
202  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
203  rax, rbx, rcx, &gc, TAG_OBJECT);
204 
205  // Get the function from the stack.
206  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
207 
208  // Set up the object header.
209  __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
212 
213  // Set up the fixed slots.
214  __ Set(rbx, 0); // Set to NULL.
218 
219  // Copy the global object from the previous context.
222 
223  // Initialize the rest of the slots to undefined.
224  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
225  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
226  __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
227  }
228 
229  // Return and remove the on-stack parameter.
230  __ movq(rsi, rax);
231  __ ret(1 * kPointerSize);
232 
233  // Need to collect. Call into runtime system.
234  __ bind(&gc);
235  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
236 }
237 
238 
239 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
240  // Stack layout on entry:
241  //
242  // [rsp + (1 * kPointerSize)]: function
243  // [rsp + (2 * kPointerSize)]: serialized scope info
244 
245  // Try to allocate the context in new space.
246  Label gc;
247  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
248  __ AllocateInNewSpace(FixedArray::SizeFor(length),
249  rax, rbx, rcx, &gc, TAG_OBJECT);
250 
251  // Get the function from the stack.
252  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
253 
254  // Get the serialized scope info from the stack.
255  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
256 
257  // Set up the object header.
258  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
261 
262  // If this block context is nested in the native context we get a smi
263  // sentinel instead of a function. The block context should get the
264  // canonical empty function of the native context as its closure which
265  // we still have to look up.
266  Label after_sentinel;
267  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
268  if (FLAG_debug_code) {
269  const char* message = "Expected 0 as a Smi sentinel";
270  __ cmpq(rcx, Immediate(0));
271  __ Assert(equal, message);
272  }
273  __ movq(rcx, GlobalObjectOperand());
276  __ bind(&after_sentinel);
277 
278  // Set up the fixed slots.
282 
283  // Copy the global object from the previous context.
286 
287  // Initialize the rest of the slots to the hole value.
288  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
289  for (int i = 0; i < slots_; i++) {
290  __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
291  }
292 
293  // Return and remove the on-stack parameter.
294  __ movq(rsi, rax);
295  __ ret(2 * kPointerSize);
296 
297  // Need to collect. Call into runtime system.
298  __ bind(&gc);
299  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
300 }
301 
302 
303 static void GenerateFastCloneShallowArrayCommon(
304  MacroAssembler* masm,
305  int length,
307  Label* fail) {
308  // Registers on entry:
309  //
310  // rcx: boilerplate literal array.
312 
313  // All sizes here are multiples of kPointerSize.
314  int elements_size = 0;
315  if (length > 0) {
317  ? FixedDoubleArray::SizeFor(length)
318  : FixedArray::SizeFor(length);
319  }
320  int size = JSArray::kSize + elements_size;
321 
322  // Allocate both the JS array and the elements array in one big
323  // allocation. This avoids multiple limit checks.
324  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
325 
326  // Copy the JS array part.
327  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
328  if ((i != JSArray::kElementsOffset) || (length == 0)) {
329  __ movq(rbx, FieldOperand(rcx, i));
330  __ movq(FieldOperand(rax, i), rbx);
331  }
332  }
333 
334  if (length > 0) {
335  // Get hold of the elements array of the boilerplate and setup the
336  // elements pointer in the resulting object.
338  __ lea(rdx, Operand(rax, JSArray::kSize));
340 
341  // Copy the elements array.
343  for (int i = 0; i < elements_size; i += kPointerSize) {
344  __ movq(rbx, FieldOperand(rcx, i));
345  __ movq(FieldOperand(rdx, i), rbx);
346  }
347  } else {
349  int i;
350  for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
351  __ movq(rbx, FieldOperand(rcx, i));
352  __ movq(FieldOperand(rdx, i), rbx);
353  }
354  while (i < elements_size) {
355  __ movsd(xmm0, FieldOperand(rcx, i));
356  __ movsd(FieldOperand(rdx, i), xmm0);
357  i += kDoubleSize;
358  }
359  ASSERT(i == elements_size);
360  }
361  }
362 }
363 
364 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
365  // Stack layout on entry:
366  //
367  // [rsp + kPointerSize]: constant elements.
368  // [rsp + (2 * kPointerSize)]: literal index.
369  // [rsp + (3 * kPointerSize)]: literals array.
370 
371  // Load boilerplate object into rcx and check if we need to create a
372  // boilerplate.
373  __ movq(rcx, Operand(rsp, 3 * kPointerSize));
374  __ movq(rax, Operand(rsp, 2 * kPointerSize));
375  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
376  __ movq(rcx,
377  FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
378  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
379  Label slow_case;
380  __ j(equal, &slow_case);
381 
382  FastCloneShallowArrayStub::Mode mode = mode_;
383  // rcx is boilerplate object.
384  Factory* factory = masm->isolate()->factory();
385  if (mode == CLONE_ANY_ELEMENTS) {
386  Label double_elements, check_fast_elements;
389  factory->fixed_cow_array_map());
390  __ j(not_equal, &check_fast_elements);
391  GenerateFastCloneShallowArrayCommon(masm, 0,
392  COPY_ON_WRITE_ELEMENTS, &slow_case);
393  __ ret(3 * kPointerSize);
394 
395  __ bind(&check_fast_elements);
397  factory->fixed_array_map());
398  __ j(not_equal, &double_elements);
399  GenerateFastCloneShallowArrayCommon(masm, length_,
400  CLONE_ELEMENTS, &slow_case);
401  __ ret(3 * kPointerSize);
402 
403  __ bind(&double_elements);
404  mode = CLONE_DOUBLE_ELEMENTS;
405  // Fall through to generate the code to handle double elements.
406  }
407 
408  if (FLAG_debug_code) {
409  const char* message;
410  Heap::RootListIndex expected_map_index;
411  if (mode == CLONE_ELEMENTS) {
412  message = "Expected (writable) fixed array";
413  expected_map_index = Heap::kFixedArrayMapRootIndex;
414  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
415  message = "Expected (writable) fixed double array";
416  expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
417  } else {
419  message = "Expected copy-on-write fixed array";
420  expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
421  }
422  __ push(rcx);
425  expected_map_index);
426  __ Assert(equal, message);
427  __ pop(rcx);
428  }
429 
430  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
431  __ ret(3 * kPointerSize);
432 
433  __ bind(&slow_case);
434  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
435 }
436 
437 
438 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
439  // Stack layout on entry:
440  //
441  // [rsp + kPointerSize]: object literal flags.
442  // [rsp + (2 * kPointerSize)]: constant properties.
443  // [rsp + (3 * kPointerSize)]: literal index.
444  // [rsp + (4 * kPointerSize)]: literals array.
445 
446  // Load boilerplate object into ecx and check if we need to create a
447  // boilerplate.
448  Label slow_case;
449  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
450  __ movq(rax, Operand(rsp, 3 * kPointerSize));
451  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
452  __ movq(rcx,
453  FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
454  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
455  __ j(equal, &slow_case);
456 
457  // Check that the boilerplate contains only fast properties and we can
458  // statically determine the instance size.
459  int size = JSObject::kHeaderSize + length_ * kPointerSize;
462  __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
463  __ j(not_equal, &slow_case);
464 
465  // Allocate the JS object and copy header together with all in-object
466  // properties from the boilerplate.
467  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
468  for (int i = 0; i < size; i += kPointerSize) {
469  __ movq(rbx, FieldOperand(rcx, i));
470  __ movq(FieldOperand(rax, i), rbx);
471  }
472 
473  // Return and remove the on-stack parameters.
474  __ ret(4 * kPointerSize);
475 
476  __ bind(&slow_case);
477  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
478 }
479 
480 
481 // The stub expects its argument on the stack and returns its result in tos_:
482 // zero for false, and a non-zero value for true.
483 void ToBooleanStub::Generate(MacroAssembler* masm) {
484  // This stub overrides SometimesSetsUpAFrame() to return false. That means
485  // we cannot call anything that could cause a GC from this stub.
486  Label patch;
487  const Register argument = rax;
488  const Register map = rdx;
489 
490  if (!types_.IsEmpty()) {
491  __ movq(argument, Operand(rsp, 1 * kPointerSize));
492  }
493 
494  // undefined -> false
495  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
496 
497  // Boolean -> its value
498  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
499  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
500 
501  // 'null' -> false.
502  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
503 
504  if (types_.Contains(SMI)) {
505  // Smis: 0 -> false, all other -> true
506  Label not_smi;
507  __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
508  // argument contains the correct return value already
509  if (!tos_.is(argument)) {
510  __ movq(tos_, argument);
511  }
512  __ ret(1 * kPointerSize);
513  __ bind(&not_smi);
514  } else if (types_.NeedsMap()) {
515  // If we need a map later and have a Smi -> patch.
516  __ JumpIfSmi(argument, &patch, Label::kNear);
517  }
518 
519  if (types_.NeedsMap()) {
520  __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
521 
522  if (types_.CanBeUndetectable()) {
524  Immediate(1 << Map::kIsUndetectable));
525  // Undetectable -> false.
526  Label not_undetectable;
527  __ j(zero, &not_undetectable, Label::kNear);
528  __ Set(tos_, 0);
529  __ ret(1 * kPointerSize);
530  __ bind(&not_undetectable);
531  }
532  }
533 
534  if (types_.Contains(SPEC_OBJECT)) {
535  // spec object -> true.
536  Label not_js_object;
537  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
538  __ j(below, &not_js_object, Label::kNear);
539  // argument contains the correct return value already.
540  if (!tos_.is(argument)) {
541  __ Set(tos_, 1);
542  }
543  __ ret(1 * kPointerSize);
544  __ bind(&not_js_object);
545  }
546 
547  if (types_.Contains(STRING)) {
548  // String value -> false iff empty.
549  Label not_string;
550  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
551  __ j(above_equal, &not_string, Label::kNear);
552  __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
553  __ ret(1 * kPointerSize); // the string length is OK as the return value
554  __ bind(&not_string);
555  }
556 
557  if (types_.Contains(HEAP_NUMBER)) {
558  // heap number -> false iff +0, -0, or NaN.
559  Label not_heap_number, false_result;
560  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
561  __ j(not_equal, &not_heap_number, Label::kNear);
562  __ xorps(xmm0, xmm0);
563  __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
564  __ j(zero, &false_result, Label::kNear);
565  // argument contains the correct return value already.
566  if (!tos_.is(argument)) {
567  __ Set(tos_, 1);
568  }
569  __ ret(1 * kPointerSize);
570  __ bind(&false_result);
571  __ Set(tos_, 0);
572  __ ret(1 * kPointerSize);
573  __ bind(&not_heap_number);
574  }
575 
576  __ bind(&patch);
577  GenerateTypeTransition(masm);
578 }
579 
580 
581 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
582  __ PushCallerSaved(save_doubles_);
583  const int argument_count = 1;
584  __ PrepareCallCFunction(argument_count);
585 #ifdef _WIN64
586  __ LoadAddress(rcx, ExternalReference::isolate_address());
587 #else
588  __ LoadAddress(rdi, ExternalReference::isolate_address());
589 #endif
590 
591  AllowExternalCallThatCantCauseGC scope(masm);
592  __ CallCFunction(
593  ExternalReference::store_buffer_overflow_function(masm->isolate()),
594  argument_count);
595  __ PopCallerSaved(save_doubles_);
596  __ ret(0);
597 }
598 
599 
600 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
601  Type type,
602  Heap::RootListIndex value,
603  bool result) {
604  const Register argument = rax;
605  if (types_.Contains(type)) {
606  // If we see an expected oddball, return its ToBoolean value tos_.
607  Label different_value;
608  __ CompareRoot(argument, value);
609  __ j(not_equal, &different_value, Label::kNear);
610  if (!result) {
611  // If we have to return zero, there is no way around clearing tos_.
612  __ Set(tos_, 0);
613  } else if (!tos_.is(argument)) {
614  // If we have to return non-zero, we can re-use the argument if it is the
615  // same register as the result, because we never see Smi-zero here.
616  __ Set(tos_, 1);
617  }
618  __ ret(1 * kPointerSize);
619  __ bind(&different_value);
620  }
621 }
622 
623 
624 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
625  __ pop(rcx); // Get return address, operand is now on top of stack.
626  __ Push(Smi::FromInt(tos_.code()));
627  __ Push(Smi::FromInt(types_.ToByte()));
628  __ push(rcx); // Push return address.
629  // Patch the caller to an appropriate specialized stub and return the
630  // operation result to the caller of the stub.
631  __ TailCallExternalReference(
632  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
633  3,
634  1);
635 }
636 
637 
638 class FloatingPointHelper : public AllStatic {
639  public:
640  // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
641  // If the operands are not both numbers, jump to not_numbers.
642  // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
643  // NumberOperands assumes both are smis or heap numbers.
644  static void LoadSSE2SmiOperands(MacroAssembler* masm);
645  static void LoadSSE2NumberOperands(MacroAssembler* masm);
646  static void LoadSSE2UnknownOperands(MacroAssembler* masm,
647  Label* not_numbers);
648 
649  // Takes the operands in rdx and rax and loads them as integers in rax
650  // and rcx.
651  static void LoadAsIntegers(MacroAssembler* masm,
652  Label* operand_conversion_failure,
653  Register heap_number_map);
654  // As above, but we know the operands to be numbers. In that case,
655  // conversion can't fail.
656  static void LoadNumbersAsIntegers(MacroAssembler* masm);
657 
658  // Tries to convert two values to smis losslessly.
659  // This fails if either argument is not a Smi nor a HeapNumber,
660  // or if it's a HeapNumber with a value that can't be converted
661  // losslessly to a Smi. In that case, control transitions to the
662  // on_not_smis label.
663  // On success, either control goes to the on_success label (if one is
664  // provided), or it falls through at the end of the code (if on_success
665  // is NULL).
666  // On success, both first and second holds Smi tagged values.
667  // One of first or second must be non-Smi when entering.
668  static void NumbersToSmis(MacroAssembler* masm,
669  Register first,
670  Register second,
671  Register scratch1,
672  Register scratch2,
673  Register scratch3,
674  Label* on_success,
675  Label* on_not_smis);
676 };
677 
678 
679 // Get the integer part of a heap number.
680 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
681 void IntegerConvert(MacroAssembler* masm,
682  Register result,
683  Register source) {
684  // Result may be rcx. If result and source are the same register, source will
685  // be overwritten.
686  ASSERT(!result.is(rdi) && !result.is(rbx));
687  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
688  // cvttsd2si (32-bit version) directly.
689  Register double_exponent = rbx;
690  Register double_value = rdi;
691  Label done, exponent_63_plus;
692  // Get double and extract exponent.
693  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
694  // Clear result preemptively, in case we need to return zero.
695  __ xorl(result, result);
696  __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
697  // Double to remove sign bit, shift exponent down to least significant bits.
698  // and subtract bias to get the unshifted, unbiased exponent.
699  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
700  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
701  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
702  // Check whether the exponent is too big for a 63 bit unsigned integer.
703  __ cmpl(double_exponent, Immediate(63));
704  __ j(above_equal, &exponent_63_plus, Label::kNear);
705  // Handle exponent range 0..62.
706  __ cvttsd2siq(result, xmm0);
707  __ jmp(&done, Label::kNear);
708 
709  __ bind(&exponent_63_plus);
710  // Exponent negative or 63+.
711  __ cmpl(double_exponent, Immediate(83));
712  // If exponent negative or above 83, number contains no significant bits in
713  // the range 0..2^31, so result is zero, and rcx already holds zero.
714  __ j(above, &done, Label::kNear);
715 
716  // Exponent in rage 63..83.
717  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
718  // the least significant exponent-52 bits.
719 
720  // Negate low bits of mantissa if value is negative.
721  __ addq(double_value, double_value); // Move sign bit to carry.
722  __ sbbl(result, result); // And convert carry to -1 in result register.
723  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
724  __ addl(double_value, result);
725  // Do xor in opposite directions depending on where we want the result
726  // (depending on whether result is rcx or not).
727 
728  if (result.is(rcx)) {
729  __ xorl(double_value, result);
730  // Left shift mantissa by (exponent - mantissabits - 1) to save the
731  // bits that have positional values below 2^32 (the extra -1 comes from the
732  // doubling done above to move the sign bit into the carry flag).
733  __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
734  __ shll_cl(double_value);
735  __ movl(result, double_value);
736  } else {
737  // As the then-branch, but move double-value to result before shifting.
738  __ xorl(result, double_value);
739  __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
740  __ shll_cl(result);
741  }
742 
743  __ bind(&done);
744 }
745 
746 
747 void UnaryOpStub::Generate(MacroAssembler* masm) {
748  switch (operand_type_) {
750  GenerateTypeTransition(masm);
751  break;
752  case UnaryOpIC::SMI:
753  GenerateSmiStub(masm);
754  break;
756  GenerateHeapNumberStub(masm);
757  break;
758  case UnaryOpIC::GENERIC:
759  GenerateGenericStub(masm);
760  break;
761  }
762 }
763 
764 
765 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
766  __ pop(rcx); // Save return address.
767 
768  __ push(rax); // the operand
769  __ Push(Smi::FromInt(op_));
770  __ Push(Smi::FromInt(mode_));
771  __ Push(Smi::FromInt(operand_type_));
772 
773  __ push(rcx); // Push return address.
774 
775  // Patch the caller to an appropriate specialized stub and return the
776  // operation result to the caller of the stub.
777  __ TailCallExternalReference(
778  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
779 }
780 
781 
782 // TODO(svenpanne): Use virtual functions instead of switch.
783 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
784  switch (op_) {
785  case Token::SUB:
786  GenerateSmiStubSub(masm);
787  break;
788  case Token::BIT_NOT:
789  GenerateSmiStubBitNot(masm);
790  break;
791  default:
792  UNREACHABLE();
793  }
794 }
795 
796 
797 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
798  Label slow;
799  GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
800  __ bind(&slow);
801  GenerateTypeTransition(masm);
802 }
803 
804 
805 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
806  Label non_smi;
807  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
808  __ bind(&non_smi);
809  GenerateTypeTransition(masm);
810 }
811 
812 
813 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
814  Label* non_smi,
815  Label* slow,
816  Label::Distance non_smi_near,
817  Label::Distance slow_near) {
818  Label done;
819  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
820  __ SmiNeg(rax, rax, &done, Label::kNear);
821  __ jmp(slow, slow_near);
822  __ bind(&done);
823  __ ret(0);
824 }
825 
826 
827 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
828  Label* non_smi,
829  Label::Distance non_smi_near) {
830  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
831  __ SmiNot(rax, rax);
832  __ ret(0);
833 }
834 
835 
836 // TODO(svenpanne): Use virtual functions instead of switch.
837 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
838  switch (op_) {
839  case Token::SUB:
840  GenerateHeapNumberStubSub(masm);
841  break;
842  case Token::BIT_NOT:
843  GenerateHeapNumberStubBitNot(masm);
844  break;
845  default:
846  UNREACHABLE();
847  }
848 }
849 
850 
851 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
852  Label non_smi, slow, call_builtin;
853  GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
854  __ bind(&non_smi);
855  GenerateHeapNumberCodeSub(masm, &slow);
856  __ bind(&slow);
857  GenerateTypeTransition(masm);
858  __ bind(&call_builtin);
859  GenerateGenericCodeFallback(masm);
860 }
861 
862 
863 void UnaryOpStub::GenerateHeapNumberStubBitNot(
864  MacroAssembler* masm) {
865  Label non_smi, slow;
866  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
867  __ bind(&non_smi);
868  GenerateHeapNumberCodeBitNot(masm, &slow);
869  __ bind(&slow);
870  GenerateTypeTransition(masm);
871 }
872 
873 
874 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
875  Label* slow) {
876  // Check if the operand is a heap number.
878  Heap::kHeapNumberMapRootIndex);
879  __ j(not_equal, slow);
880 
881  // Operand is a float, negate its value by flipping the sign bit.
882  if (mode_ == UNARY_OVERWRITE) {
883  __ Set(kScratchRegister, 0x01);
884  __ shl(kScratchRegister, Immediate(63));
886  } else {
887  // Allocate a heap number before calculating the answer,
888  // so we don't have an untagged double around during GC.
889  Label slow_allocate_heapnumber, heapnumber_allocated;
890  __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
891  __ jmp(&heapnumber_allocated);
892 
893  __ bind(&slow_allocate_heapnumber);
894  {
895  FrameScope scope(masm, StackFrame::INTERNAL);
896  __ push(rax);
897  __ CallRuntime(Runtime::kNumberAlloc, 0);
898  __ movq(rcx, rax);
899  __ pop(rax);
900  }
901  __ bind(&heapnumber_allocated);
902  // rcx: allocated 'empty' number
903 
904  // Copy the double value to the new heap number, flipping the sign.
906  __ Set(kScratchRegister, 0x01);
907  __ shl(kScratchRegister, Immediate(63));
908  __ xor_(rdx, kScratchRegister); // Flip sign.
910  __ movq(rax, rcx);
911  }
912  __ ret(0);
913 }
914 
915 
916 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
917  Label* slow) {
918  // Check if the operand is a heap number.
920  Heap::kHeapNumberMapRootIndex);
921  __ j(not_equal, slow);
922 
923  // Convert the heap number in rax to an untagged integer in rcx.
924  IntegerConvert(masm, rax, rax);
925 
926  // Do the bitwise operation and smi tag the result.
927  __ notl(rax);
928  __ Integer32ToSmi(rax, rax);
929  __ ret(0);
930 }
931 
932 
933 // TODO(svenpanne): Use virtual functions instead of switch.
934 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
935  switch (op_) {
936  case Token::SUB:
937  GenerateGenericStubSub(masm);
938  break;
939  case Token::BIT_NOT:
940  GenerateGenericStubBitNot(masm);
941  break;
942  default:
943  UNREACHABLE();
944  }
945 }
946 
947 
948 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
949  Label non_smi, slow;
950  GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
951  __ bind(&non_smi);
952  GenerateHeapNumberCodeSub(masm, &slow);
953  __ bind(&slow);
954  GenerateGenericCodeFallback(masm);
955 }
956 
957 
958 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
959  Label non_smi, slow;
960  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
961  __ bind(&non_smi);
962  GenerateHeapNumberCodeBitNot(masm, &slow);
963  __ bind(&slow);
964  GenerateGenericCodeFallback(masm);
965 }
966 
967 
968 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
969  // Handle the slow case by jumping to the JavaScript builtin.
970  __ pop(rcx); // pop return address
971  __ push(rax);
972  __ push(rcx); // push return address
973  switch (op_) {
974  case Token::SUB:
975  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
976  break;
977  case Token::BIT_NOT:
978  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
979  break;
980  default:
981  UNREACHABLE();
982  }
983 }
984 
985 
986 void UnaryOpStub::PrintName(StringStream* stream) {
987  const char* op_name = Token::Name(op_);
988  const char* overwrite_name = NULL; // Make g++ happy.
989  switch (mode_) {
990  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
991  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
992  }
993  stream->Add("UnaryOpStub_%s_%s_%s",
994  op_name,
995  overwrite_name,
996  UnaryOpIC::GetName(operand_type_));
997 }
998 
999 
1000 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1001  __ pop(rcx); // Save return address.
1002  __ push(rdx);
1003  __ push(rax);
1004  // Left and right arguments are now on top.
1005  // Push this stub's key. Although the operation and the type info are
1006  // encoded into the key, the encoding is opaque, so push them too.
1007  __ Push(Smi::FromInt(MinorKey()));
1008  __ Push(Smi::FromInt(op_));
1009  __ Push(Smi::FromInt(operands_type_));
1010 
1011  __ push(rcx); // Push return address.
1012 
1013  // Patch the caller to an appropriate specialized stub and return the
1014  // operation result to the caller of the stub.
1015  __ TailCallExternalReference(
1016  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1017  masm->isolate()),
1018  5,
1019  1);
1020 }
1021 
1022 
1023 void BinaryOpStub::Generate(MacroAssembler* masm) {
1024  // Explicitly allow generation of nested stubs. It is safe here because
1025  // generation code does not use any raw pointers.
1026  AllowStubCallsScope allow_stub_calls(masm, true);
1027 
1028  switch (operands_type_) {
1030  GenerateTypeTransition(masm);
1031  break;
1032  case BinaryOpIC::SMI:
1033  GenerateSmiStub(masm);
1034  break;
1035  case BinaryOpIC::INT32:
1036  UNREACHABLE();
1037  // The int32 case is identical to the Smi case. We avoid creating this
1038  // ic state on x64.
1039  break;
1041  GenerateHeapNumberStub(masm);
1042  break;
1043  case BinaryOpIC::ODDBALL:
1044  GenerateOddballStub(masm);
1045  break;
1047  GenerateBothStringStub(masm);
1048  break;
1049  case BinaryOpIC::STRING:
1050  GenerateStringStub(masm);
1051  break;
1052  case BinaryOpIC::GENERIC:
1053  GenerateGeneric(masm);
1054  break;
1055  default:
1056  UNREACHABLE();
1057  }
1058 }
1059 
1060 
1061 void BinaryOpStub::PrintName(StringStream* stream) {
1062  const char* op_name = Token::Name(op_);
1063  const char* overwrite_name;
1064  switch (mode_) {
1065  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1066  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1067  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1068  default: overwrite_name = "UnknownOverwrite"; break;
1069  }
1070  stream->Add("BinaryOpStub_%s_%s_%s",
1071  op_name,
1072  overwrite_name,
1073  BinaryOpIC::GetName(operands_type_));
1074 }
1075 
1076 
1077 void BinaryOpStub::GenerateSmiCode(
1078  MacroAssembler* masm,
1079  Label* slow,
1080  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1081 
1082  // Arguments to BinaryOpStub are in rdx and rax.
1083  const Register left = rdx;
1084  const Register right = rax;
1085 
1086  // We only generate heapnumber answers for overflowing calculations
1087  // for the four basic arithmetic operations and logical right shift by 0.
1088  bool generate_inline_heapnumber_results =
1089  (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
1090  (op_ == Token::ADD || op_ == Token::SUB ||
1091  op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
1092 
1093  // Smi check of both operands. If op is BIT_OR, the check is delayed
1094  // until after the OR operation.
1095  Label not_smis;
1096  Label use_fp_on_smis;
1097  Label fail;
1098 
1099  if (op_ != Token::BIT_OR) {
1100  Comment smi_check_comment(masm, "-- Smi check arguments");
1101  __ JumpIfNotBothSmi(left, right, &not_smis);
1102  }
1103 
1104  Label smi_values;
1105  __ bind(&smi_values);
1106  // Perform the operation.
1107  Comment perform_smi(masm, "-- Perform smi operation");
1108  switch (op_) {
1109  case Token::ADD:
1110  ASSERT(right.is(rax));
1111  __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
1112  break;
1113 
1114  case Token::SUB:
1115  __ SmiSub(left, left, right, &use_fp_on_smis);
1116  __ movq(rax, left);
1117  break;
1118 
1119  case Token::MUL:
1120  ASSERT(right.is(rax));
1121  __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
1122  break;
1123 
1124  case Token::DIV:
1125  // SmiDiv will not accept left in rdx or right in rax.
1126  __ movq(rbx, rax);
1127  __ movq(rcx, rdx);
1128  __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
1129  break;
1130 
1131  case Token::MOD:
1132  // SmiMod will not accept left in rdx or right in rax.
1133  __ movq(rbx, rax);
1134  __ movq(rcx, rdx);
1135  __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
1136  break;
1137 
1138  case Token::BIT_OR: {
1139  ASSERT(right.is(rax));
1140  __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
1141  break;
1142  }
1143  case Token::BIT_XOR:
1144  ASSERT(right.is(rax));
1145  __ SmiXor(right, right, left); // BIT_XOR is commutative.
1146  break;
1147 
1148  case Token::BIT_AND:
1149  ASSERT(right.is(rax));
1150  __ SmiAnd(right, right, left); // BIT_AND is commutative.
1151  break;
1152 
1153  case Token::SHL:
1154  __ SmiShiftLeft(left, left, right);
1155  __ movq(rax, left);
1156  break;
1157 
1158  case Token::SAR:
1159  __ SmiShiftArithmeticRight(left, left, right);
1160  __ movq(rax, left);
1161  break;
1162 
1163  case Token::SHR:
1164  __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
1165  __ movq(rax, left);
1166  break;
1167 
1168  default:
1169  UNREACHABLE();
1170  }
1171 
1172  // 5. Emit return of result in rax. Some operations have registers pushed.
1173  __ ret(0);
1174 
1175  if (use_fp_on_smis.is_linked()) {
1176  // 6. For some operations emit inline code to perform floating point
1177  // operations on known smis (e.g., if the result of the operation
1178  // overflowed the smi range).
1179  __ bind(&use_fp_on_smis);
1180  if (op_ == Token::DIV || op_ == Token::MOD) {
1181  // Restore left and right to rdx and rax.
1182  __ movq(rdx, rcx);
1183  __ movq(rax, rbx);
1184  }
1185 
1186  if (generate_inline_heapnumber_results) {
1187  __ AllocateHeapNumber(rcx, rbx, slow);
1188  Comment perform_float(masm, "-- Perform float operation on smis");
1189  if (op_ == Token::SHR) {
1190  __ SmiToInteger32(left, left);
1191  __ cvtqsi2sd(xmm0, left);
1192  } else {
1193  FloatingPointHelper::LoadSSE2SmiOperands(masm);
1194  switch (op_) {
1195  case Token::ADD: __ addsd(xmm0, xmm1); break;
1196  case Token::SUB: __ subsd(xmm0, xmm1); break;
1197  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1198  case Token::DIV: __ divsd(xmm0, xmm1); break;
1199  default: UNREACHABLE();
1200  }
1201  }
1203  __ movq(rax, rcx);
1204  __ ret(0);
1205  } else {
1206  __ jmp(&fail);
1207  }
1208  }
1209 
1210  // 7. Non-smi operands reach the end of the code generated by
1211  // GenerateSmiCode, and fall through to subsequent code,
1212  // with the operands in rdx and rax.
1213  // But first we check if non-smi values are HeapNumbers holding
1214  // values that could be smi.
1215  __ bind(&not_smis);
1216  Comment done_comment(masm, "-- Enter non-smi code");
1217  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
1218  &smi_values, &fail);
1219  __ jmp(&smi_values);
1220  __ bind(&fail);
1221 }
1222 
1223 
1224 void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
1225  Label* allocation_failure,
1226  Label* non_numeric_failure) {
1227  switch (op_) {
1228  case Token::ADD:
1229  case Token::SUB:
1230  case Token::MUL:
1231  case Token::DIV: {
1232  FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
1233 
1234  switch (op_) {
1235  case Token::ADD: __ addsd(xmm0, xmm1); break;
1236  case Token::SUB: __ subsd(xmm0, xmm1); break;
1237  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1238  case Token::DIV: __ divsd(xmm0, xmm1); break;
1239  default: UNREACHABLE();
1240  }
1241  GenerateHeapResultAllocation(masm, allocation_failure);
1243  __ ret(0);
1244  break;
1245  }
1246  case Token::MOD: {
1247  // For MOD we jump to the allocation_failure label, to call runtime.
1248  __ jmp(allocation_failure);
1249  break;
1250  }
1251  case Token::BIT_OR:
1252  case Token::BIT_AND:
1253  case Token::BIT_XOR:
1254  case Token::SAR:
1255  case Token::SHL:
1256  case Token::SHR: {
1257  Label non_smi_shr_result;
1258  Register heap_number_map = r9;
1259  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1260  FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
1261  heap_number_map);
1262  switch (op_) {
1263  case Token::BIT_OR: __ orl(rax, rcx); break;
1264  case Token::BIT_AND: __ andl(rax, rcx); break;
1265  case Token::BIT_XOR: __ xorl(rax, rcx); break;
1266  case Token::SAR: __ sarl_cl(rax); break;
1267  case Token::SHL: __ shll_cl(rax); break;
1268  case Token::SHR: {
1269  __ shrl_cl(rax);
1270  // Check if result is negative. This can only happen for a shift
1271  // by zero.
1272  __ testl(rax, rax);
1273  __ j(negative, &non_smi_shr_result);
1274  break;
1275  }
1276  default: UNREACHABLE();
1277  }
1279  // Tag smi result and return.
1280  __ Integer32ToSmi(rax, rax);
1281  __ Ret();
1282 
1283  // Logical shift right can produce an unsigned int32 that is not
1284  // an int32, and so is not in the smi range. Allocate a heap number
1285  // in that case.
1286  if (op_ == Token::SHR) {
1287  __ bind(&non_smi_shr_result);
1288  Label allocation_failed;
1289  __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
1290  // Allocate heap number in new space.
1291  // Not using AllocateHeapNumber macro in order to reuse
1292  // already loaded heap_number_map.
1293  __ AllocateInNewSpace(HeapNumber::kSize,
1294  rax,
1295  rdx,
1296  no_reg,
1297  &allocation_failed,
1298  TAG_OBJECT);
1299  // Set the map.
1300  __ AssertRootValue(heap_number_map,
1301  Heap::kHeapNumberMapRootIndex,
1302  "HeapNumberMap register clobbered.");
1304  heap_number_map);
1305  __ cvtqsi2sd(xmm0, rbx);
1307  __ Ret();
1308 
1309  __ bind(&allocation_failed);
1310  // We need tagged values in rdx and rax for the following code,
1311  // not int32 in rax and rcx.
1312  __ Integer32ToSmi(rax, rcx);
1313  __ Integer32ToSmi(rdx, rbx);
1314  __ jmp(allocation_failure);
1315  }
1316  break;
1317  }
1318  default: UNREACHABLE(); break;
1319  }
1320  // No fall-through from this generated code.
1321  if (FLAG_debug_code) {
1322  __ Abort("Unexpected fall-through in "
1323  "BinaryStub::GenerateFloatingPointCode.");
1324  }
1325 }
1326 
1327 
1328 void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
1329  ASSERT(op_ == Token::ADD);
1330  Label left_not_string, call_runtime;
1331 
1332  // Registers containing left and right operands respectively.
1333  Register left = rdx;
1334  Register right = rax;
1335 
1336  // Test if left operand is a string.
1337  __ JumpIfSmi(left, &left_not_string, Label::kNear);
1338  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1339  __ j(above_equal, &left_not_string, Label::kNear);
1340  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1341  GenerateRegisterArgsPush(masm);
1342  __ TailCallStub(&string_add_left_stub);
1343 
1344  // Left operand is not a string, test right.
1345  __ bind(&left_not_string);
1346  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1347  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1348  __ j(above_equal, &call_runtime, Label::kNear);
1349 
1350  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1351  GenerateRegisterArgsPush(masm);
1352  __ TailCallStub(&string_add_right_stub);
1353 
1354  // Neither argument is a string.
1355  __ bind(&call_runtime);
1356 }
1357 
1358 
1359 void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
1360  GenerateRegisterArgsPush(masm);
1361  switch (op_) {
1362  case Token::ADD:
1363  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1364  break;
1365  case Token::SUB:
1366  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1367  break;
1368  case Token::MUL:
1369  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1370  break;
1371  case Token::DIV:
1372  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1373  break;
1374  case Token::MOD:
1375  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1376  break;
1377  case Token::BIT_OR:
1378  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1379  break;
1380  case Token::BIT_AND:
1381  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1382  break;
1383  case Token::BIT_XOR:
1384  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1385  break;
1386  case Token::SAR:
1387  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1388  break;
1389  case Token::SHL:
1390  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1391  break;
1392  case Token::SHR:
1393  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1394  break;
1395  default:
1396  UNREACHABLE();
1397  }
1398 }
1399 
1400 
1401 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1402  Label call_runtime;
1403  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1404  result_type_ == BinaryOpIC::SMI) {
1405  // Only allow smi results.
1406  GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
1407  } else {
1408  // Allow heap number result and don't make a transition if a heap number
1409  // cannot be allocated.
1410  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1411  }
1412 
1413  // Code falls through if the result is not returned as either a smi or heap
1414  // number.
1415  GenerateTypeTransition(masm);
1416 
1417  if (call_runtime.is_linked()) {
1418  __ bind(&call_runtime);
1419  GenerateCallRuntimeCode(masm);
1420  }
1421 }
1422 
1423 
1424 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1425  ASSERT(operands_type_ == BinaryOpIC::STRING);
1426  ASSERT(op_ == Token::ADD);
1427  GenerateStringAddCode(masm);
1428  // Try to add arguments as strings, otherwise, transition to the generic
1429  // BinaryOpIC type.
1430  GenerateTypeTransition(masm);
1431 }
1432 
1433 
1434 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1435  Label call_runtime;
1436  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1437  ASSERT(op_ == Token::ADD);
1438  // If both arguments are strings, call the string add stub.
1439  // Otherwise, do a transition.
1440 
1441  // Registers containing left and right operands respectively.
1442  Register left = rdx;
1443  Register right = rax;
1444 
1445  // Test if left operand is a string.
1446  __ JumpIfSmi(left, &call_runtime);
1447  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1448  __ j(above_equal, &call_runtime);
1449 
1450  // Test if right operand is a string.
1451  __ JumpIfSmi(right, &call_runtime);
1452  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1453  __ j(above_equal, &call_runtime);
1454 
1455  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1456  GenerateRegisterArgsPush(masm);
1457  __ TailCallStub(&string_add_stub);
1458 
1459  __ bind(&call_runtime);
1460  GenerateTypeTransition(masm);
1461 }
1462 
1463 
1464 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1465  Label call_runtime;
1466 
1467  if (op_ == Token::ADD) {
1468  // Handle string addition here, because it is the only operation
1469  // that does not do a ToNumber conversion on the operands.
1470  GenerateStringAddCode(masm);
1471  }
1472 
1473  // Convert oddball arguments to numbers.
1474  Label check, done;
1475  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1476  __ j(not_equal, &check, Label::kNear);
1477  if (Token::IsBitOp(op_)) {
1478  __ xor_(rdx, rdx);
1479  } else {
1480  __ LoadRoot(rdx, Heap::kNanValueRootIndex);
1481  }
1482  __ jmp(&done, Label::kNear);
1483  __ bind(&check);
1484  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1485  __ j(not_equal, &done, Label::kNear);
1486  if (Token::IsBitOp(op_)) {
1487  __ xor_(rax, rax);
1488  } else {
1489  __ LoadRoot(rax, Heap::kNanValueRootIndex);
1490  }
1491  __ bind(&done);
1492 
1493  GenerateHeapNumberStub(masm);
1494 }
1495 
1496 
1497 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1498  Label gc_required, not_number;
1499  GenerateFloatingPointCode(masm, &gc_required, &not_number);
1500 
1501  __ bind(&not_number);
1502  GenerateTypeTransition(masm);
1503 
1504  __ bind(&gc_required);
1505  GenerateCallRuntimeCode(masm);
1506 }
1507 
1508 
1509 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1510  Label call_runtime, call_string_add_or_runtime;
1511 
1512  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1513 
1514  GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
1515 
1516  __ bind(&call_string_add_or_runtime);
1517  if (op_ == Token::ADD) {
1518  GenerateStringAddCode(masm);
1519  }
1520 
1521  __ bind(&call_runtime);
1522  GenerateCallRuntimeCode(masm);
1523 }
1524 
1525 
1526 void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1527  Label* alloc_failure) {
1528  Label skip_allocation;
1529  OverwriteMode mode = mode_;
1530  switch (mode) {
1531  case OVERWRITE_LEFT: {
1532  // If the argument in rdx is already an object, we skip the
1533  // allocation of a heap number.
1534  __ JumpIfNotSmi(rdx, &skip_allocation);
1535  // Allocate a heap number for the result. Keep eax and edx intact
1536  // for the possible runtime call.
1537  __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1538  // Now rdx can be overwritten losing one of the arguments as we are
1539  // now done and will not need it any more.
1540  __ movq(rdx, rbx);
1541  __ bind(&skip_allocation);
1542  // Use object in rdx as a result holder
1543  __ movq(rax, rdx);
1544  break;
1545  }
1546  case OVERWRITE_RIGHT:
1547  // If the argument in rax is already an object, we skip the
1548  // allocation of a heap number.
1549  __ JumpIfNotSmi(rax, &skip_allocation);
1550  // Fall through!
1551  case NO_OVERWRITE:
1552  // Allocate a heap number for the result. Keep rax and rdx intact
1553  // for the possible runtime call.
1554  __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1555  // Now rax can be overwritten losing one of the arguments as we are
1556  // now done and will not need it any more.
1557  __ movq(rax, rbx);
1558  __ bind(&skip_allocation);
1559  break;
1560  default: UNREACHABLE();
1561  }
1562 }
1563 
1564 
1565 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1566  __ pop(rcx);
1567  __ push(rdx);
1568  __ push(rax);
1569  __ push(rcx);
1570 }
1571 
1572 
1573 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1574  // TAGGED case:
1575  // Input:
1576  // rsp[8]: argument (should be number).
1577  // rsp[0]: return address.
1578  // Output:
1579  // rax: tagged double result.
1580  // UNTAGGED case:
1581  // Input::
1582  // rsp[0]: return address.
1583  // xmm1: untagged double input argument
1584  // Output:
1585  // xmm1: untagged double result.
1586 
1587  Label runtime_call;
1588  Label runtime_call_clear_stack;
1589  Label skip_cache;
1590  const bool tagged = (argument_type_ == TAGGED);
1591  if (tagged) {
1592  Label input_not_smi, loaded;
1593  // Test that rax is a number.
1594  __ movq(rax, Operand(rsp, kPointerSize));
1595  __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
1596  // Input is a smi. Untag and load it onto the FPU stack.
1597  // Then load the bits of the double into rbx.
1598  __ SmiToInteger32(rax, rax);
1599  __ subq(rsp, Immediate(kDoubleSize));
1600  __ cvtlsi2sd(xmm1, rax);
1601  __ movsd(Operand(rsp, 0), xmm1);
1602  __ movq(rbx, xmm1);
1603  __ movq(rdx, xmm1);
1604  __ fld_d(Operand(rsp, 0));
1605  __ addq(rsp, Immediate(kDoubleSize));
1606  __ jmp(&loaded, Label::kNear);
1607 
1608  __ bind(&input_not_smi);
1609  // Check if input is a HeapNumber.
1610  __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
1612  __ j(not_equal, &runtime_call);
1613  // Input is a HeapNumber. Push it on the FPU stack and load its
1614  // bits into rbx.
1617  __ movq(rdx, rbx);
1618 
1619  __ bind(&loaded);
1620  } else { // UNTAGGED.
1621  __ movq(rbx, xmm1);
1622  __ movq(rdx, xmm1);
1623  }
1624 
1625  // ST[0] == double value, if TAGGED.
1626  // rbx = bits of double value.
1627  // rdx = also bits of double value.
1628  // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
1629  // h = h0 = bits ^ (bits >> 32);
1630  // h ^= h >> 16;
1631  // h ^= h >> 8;
1632  // h = h & (cacheSize - 1);
1633  // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
1634  __ sar(rdx, Immediate(32));
1635  __ xorl(rdx, rbx);
1636  __ movl(rcx, rdx);
1637  __ movl(rax, rdx);
1638  __ movl(rdi, rdx);
1639  __ sarl(rdx, Immediate(8));
1640  __ sarl(rcx, Immediate(16));
1641  __ sarl(rax, Immediate(24));
1642  __ xorl(rcx, rdx);
1643  __ xorl(rax, rdi);
1644  __ xorl(rcx, rax);
1645  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1646  __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
1647 
1648  // ST[0] == double value.
1649  // rbx = bits of double value.
1650  // rcx = TranscendentalCache::hash(double value).
1651  ExternalReference cache_array =
1652  ExternalReference::transcendental_cache_array_address(masm->isolate());
1653  __ movq(rax, cache_array);
1654  int cache_array_index =
1655  type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
1656  __ movq(rax, Operand(rax, cache_array_index));
1657  // rax points to the cache for the type type_.
1658  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1659  __ testq(rax, rax);
1660  __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
1661 #ifdef DEBUG
1662  // Check that the layout of cache elements match expectations.
1663  { // NOLINT - doesn't like a single brace on a line.
1664  TranscendentalCache::SubCache::Element test_elem[2];
1665  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1666  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1667  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1668  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1669  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1670  // Two uint_32's and a pointer per element.
1671  CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
1672  CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
1673  CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
1674  CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
1675  }
1676 #endif
1677  // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
1678  __ addl(rcx, rcx);
1679  __ lea(rcx, Operand(rax, rcx, times_8, 0));
1680  // Check if cache matches: Double value is stored in uint32_t[2] array.
1681  Label cache_miss;
1682  __ cmpq(rbx, Operand(rcx, 0));
1683  __ j(not_equal, &cache_miss, Label::kNear);
1684  // Cache hit!
1685  Counters* counters = masm->isolate()->counters();
1686  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
1687  __ movq(rax, Operand(rcx, 2 * kIntSize));
1688  if (tagged) {
1689  __ fstp(0); // Clear FPU stack.
1690  __ ret(kPointerSize);
1691  } else { // UNTAGGED.
1693  __ Ret();
1694  }
1695 
1696  __ bind(&cache_miss);
1697  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
1698  // Update cache with new value.
1699  if (tagged) {
1700  __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1701  } else { // UNTAGGED.
1702  __ AllocateHeapNumber(rax, rdi, &skip_cache);
1705  }
1706  GenerateOperation(masm, type_);
1707  __ movq(Operand(rcx, 0), rbx);
1708  __ movq(Operand(rcx, 2 * kIntSize), rax);
1710  if (tagged) {
1711  __ ret(kPointerSize);
1712  } else { // UNTAGGED.
1714  __ Ret();
1715 
1716  // Skip cache and return answer directly, only in untagged case.
1717  __ bind(&skip_cache);
1718  __ subq(rsp, Immediate(kDoubleSize));
1719  __ movsd(Operand(rsp, 0), xmm1);
1720  __ fld_d(Operand(rsp, 0));
1721  GenerateOperation(masm, type_);
1722  __ fstp_d(Operand(rsp, 0));
1723  __ movsd(xmm1, Operand(rsp, 0));
1724  __ addq(rsp, Immediate(kDoubleSize));
1725  // We return the value in xmm1 without adding it to the cache, but
1726  // we cause a scavenging GC so that future allocations will succeed.
1727  {
1728  FrameScope scope(masm, StackFrame::INTERNAL);
1729  // Allocate an unused object bigger than a HeapNumber.
1730  __ Push(Smi::FromInt(2 * kDoubleSize));
1731  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1732  }
1733  __ Ret();
1734  }
1735 
1736  // Call runtime, doing whatever allocation and cleanup is necessary.
1737  if (tagged) {
1738  __ bind(&runtime_call_clear_stack);
1739  __ fstp(0);
1740  __ bind(&runtime_call);
1741  __ TailCallExternalReference(
1742  ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1743  } else { // UNTAGGED.
1744  __ bind(&runtime_call_clear_stack);
1745  __ bind(&runtime_call);
1746  __ AllocateHeapNumber(rax, rdi, &skip_cache);
1748  {
1749  FrameScope scope(masm, StackFrame::INTERNAL);
1750  __ push(rax);
1751  __ CallRuntime(RuntimeFunction(), 1);
1752  }
1754  __ Ret();
1755  }
1756 }
1757 
1758 
1759 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1760  switch (type_) {
1761  // Add more cases when necessary.
1762  case TranscendentalCache::SIN: return Runtime::kMath_sin;
1763  case TranscendentalCache::COS: return Runtime::kMath_cos;
1764  case TranscendentalCache::TAN: return Runtime::kMath_tan;
1765  case TranscendentalCache::LOG: return Runtime::kMath_log;
1766  default:
1767  UNIMPLEMENTED();
1768  return Runtime::kAbort;
1769  }
1770 }
1771 
1772 
1774  MacroAssembler* masm, TranscendentalCache::Type type) {
1775  // Registers:
1776  // rax: Newly allocated HeapNumber, which must be preserved.
1777  // rbx: Bits of input double. Must be preserved.
1778  // rcx: Pointer to cache entry. Must be preserved.
1779  // st(0): Input double
1780  Label done;
1781  if (type == TranscendentalCache::SIN ||
1782  type == TranscendentalCache::COS ||
1783  type == TranscendentalCache::TAN) {
1784  // Both fsin and fcos require arguments in the range +/-2^63 and
1785  // return NaN for infinities and NaN. They can share all code except
1786  // the actual fsin/fcos operation.
1787  Label in_range;
1788  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1789  // work. We must reduce it to the appropriate range.
1790  __ movq(rdi, rbx);
1791  // Move exponent and sign bits to low bits.
1792  __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1793  // Remove sign bit.
1794  __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1795  int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1796  __ cmpl(rdi, Immediate(supported_exponent_limit));
1797  __ j(below, &in_range);
1798  // Check for infinity and NaN. Both return NaN for sin.
1799  __ cmpl(rdi, Immediate(0x7ff));
1800  Label non_nan_result;
1801  __ j(not_equal, &non_nan_result, Label::kNear);
1802  // Input is +/-Infinity or NaN. Result is NaN.
1803  __ fstp(0);
1804  // NaN is represented by 0x7ff8000000000000.
1805  __ subq(rsp, Immediate(kPointerSize));
1806  __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
1807  __ movl(Operand(rsp, 0), Immediate(0x00000000));
1808  __ fld_d(Operand(rsp, 0));
1809  __ addq(rsp, Immediate(kPointerSize));
1810  __ jmp(&done);
1811 
1812  __ bind(&non_nan_result);
1813 
1814  // Use fpmod to restrict argument to the range +/-2*PI.
1815  __ movq(rdi, rax); // Save rax before using fnstsw_ax.
1816  __ fldpi();
1817  __ fadd(0);
1818  __ fld(1);
1819  // FPU Stack: input, 2*pi, input.
1820  {
1821  Label no_exceptions;
1822  __ fwait();
1823  __ fnstsw_ax();
1824  // Clear if Illegal Operand or Zero Division exceptions are set.
1825  __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
1826  __ j(zero, &no_exceptions);
1827  __ fnclex();
1828  __ bind(&no_exceptions);
1829  }
1830 
1831  // Compute st(0) % st(1)
1832  {
1833  Label partial_remainder_loop;
1834  __ bind(&partial_remainder_loop);
1835  __ fprem1();
1836  __ fwait();
1837  __ fnstsw_ax();
1838  __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
1839  // If C2 is set, computation only has partial result. Loop to
1840  // continue computation.
1841  __ j(not_zero, &partial_remainder_loop);
1842  }
1843  // FPU Stack: input, 2*pi, input % 2*pi
1844  __ fstp(2);
1845  // FPU Stack: input % 2*pi, 2*pi,
1846  __ fstp(0);
1847  // FPU Stack: input % 2*pi
1848  __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
1849  __ bind(&in_range);
1850  switch (type) {
1852  __ fsin();
1853  break;
1855  __ fcos();
1856  break;
1858  // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
1859  // FP register stack.
1860  __ fptan();
1861  __ fstp(0); // Pop FP register stack.
1862  break;
1863  default:
1864  UNREACHABLE();
1865  }
1866  __ bind(&done);
1867  } else {
1869  __ fldln2();
1870  __ fxch();
1871  __ fyl2x();
1872  }
1873 }
1874 
1875 
1876 // Input: rdx, rax are the left and right objects of a bit op.
1877 // Output: rax, rcx are left and right integers for a bit op.
1878 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1879  // Check float operands.
1880  Label done;
1881  Label rax_is_smi;
1882  Label rax_is_object;
1883  Label rdx_is_object;
1884 
1885  __ JumpIfNotSmi(rdx, &rdx_is_object);
1886  __ SmiToInteger32(rdx, rdx);
1887  __ JumpIfSmi(rax, &rax_is_smi);
1888 
1889  __ bind(&rax_is_object);
1890  IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
1891  __ jmp(&done);
1892 
1893  __ bind(&rdx_is_object);
1894  IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
1895  __ JumpIfNotSmi(rax, &rax_is_object);
1896  __ bind(&rax_is_smi);
1897  __ SmiToInteger32(rcx, rax);
1898 
1899  __ bind(&done);
1900  __ movl(rax, rdx);
1901 }
1902 
1903 
1904 // Input: rdx, rax are the left and right objects of a bit op.
1905 // Output: rax, rcx are left and right integers for a bit op.
1906 // Jump to conversion_failure: rdx and rax are unchanged.
1907 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1908  Label* conversion_failure,
1909  Register heap_number_map) {
1910  // Check float operands.
1911  Label arg1_is_object, check_undefined_arg1;
1912  Label arg2_is_object, check_undefined_arg2;
1913  Label load_arg2, done;
1914 
1915  __ JumpIfNotSmi(rdx, &arg1_is_object);
1916  __ SmiToInteger32(r8, rdx);
1917  __ jmp(&load_arg2);
1918 
1919  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1920  __ bind(&check_undefined_arg1);
1921  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1922  __ j(not_equal, conversion_failure);
1923  __ Set(r8, 0);
1924  __ jmp(&load_arg2);
1925 
1926  __ bind(&arg1_is_object);
1927  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1928  __ j(not_equal, &check_undefined_arg1);
1929  // Get the untagged integer version of the rdx heap number in rcx.
1930  IntegerConvert(masm, r8, rdx);
1931 
1932  // Here r8 has the untagged integer, rax has a Smi or a heap number.
1933  __ bind(&load_arg2);
1934  // Test if arg2 is a Smi.
1935  __ JumpIfNotSmi(rax, &arg2_is_object);
1936  __ SmiToInteger32(rcx, rax);
1937  __ jmp(&done);
1938 
1939  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1940  __ bind(&check_undefined_arg2);
1941  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1942  __ j(not_equal, conversion_failure);
1943  __ Set(rcx, 0);
1944  __ jmp(&done);
1945 
1946  __ bind(&arg2_is_object);
1947  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1948  __ j(not_equal, &check_undefined_arg2);
1949  // Get the untagged integer version of the rax heap number in rcx.
1950  IntegerConvert(masm, rcx, rax);
1951  __ bind(&done);
1952  __ movl(rax, r8);
1953 }
1954 
1955 
1956 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1957  __ SmiToInteger32(kScratchRegister, rdx);
1958  __ cvtlsi2sd(xmm0, kScratchRegister);
1959  __ SmiToInteger32(kScratchRegister, rax);
1960  __ cvtlsi2sd(xmm1, kScratchRegister);
1961 }
1962 
1963 
1964 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1965  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1966  // Load operand in rdx into xmm0.
1967  __ JumpIfSmi(rdx, &load_smi_rdx);
1969  // Load operand in rax into xmm1.
1970  __ JumpIfSmi(rax, &load_smi_rax);
1971  __ bind(&load_nonsmi_rax);
1973  __ jmp(&done);
1974 
1975  __ bind(&load_smi_rdx);
1976  __ SmiToInteger32(kScratchRegister, rdx);
1977  __ cvtlsi2sd(xmm0, kScratchRegister);
1978  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1979 
1980  __ bind(&load_smi_rax);
1981  __ SmiToInteger32(kScratchRegister, rax);
1982  __ cvtlsi2sd(xmm1, kScratchRegister);
1983 
1984  __ bind(&done);
1985 }
1986 
1987 
1988 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1989  Label* not_numbers) {
1990  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1991  // Load operand in rdx into xmm0, or branch to not_numbers.
1992  __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1993  __ JumpIfSmi(rdx, &load_smi_rdx);
1995  __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1997  // Load operand in rax into xmm1, or branch to not_numbers.
1998  __ JumpIfSmi(rax, &load_smi_rax);
1999 
2000  __ bind(&load_nonsmi_rax);
2002  __ j(not_equal, not_numbers);
2004  __ jmp(&done);
2005 
2006  __ bind(&load_smi_rdx);
2007  __ SmiToInteger32(kScratchRegister, rdx);
2008  __ cvtlsi2sd(xmm0, kScratchRegister);
2009  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
2010 
2011  __ bind(&load_smi_rax);
2012  __ SmiToInteger32(kScratchRegister, rax);
2013  __ cvtlsi2sd(xmm1, kScratchRegister);
2014  __ bind(&done);
2015 }
2016 
2017 
2018 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
2019  Register first,
2020  Register second,
2021  Register scratch1,
2022  Register scratch2,
2023  Register scratch3,
2024  Label* on_success,
2025  Label* on_not_smis) {
2026  Register heap_number_map = scratch3;
2027  Register smi_result = scratch1;
2028  Label done;
2029 
2030  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2031 
2032  Label first_smi;
2033  __ JumpIfSmi(first, &first_smi, Label::kNear);
2034  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
2035  __ j(not_equal, on_not_smis);
2036  // Convert HeapNumber to smi if possible.
2037  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
2038  __ movq(scratch2, xmm0);
2039  __ cvttsd2siq(smi_result, xmm0);
2040  // Check if conversion was successful by converting back and
2041  // comparing to the original double's bits.
2042  __ cvtlsi2sd(xmm1, smi_result);
2043  __ movq(kScratchRegister, xmm1);
2044  __ cmpq(scratch2, kScratchRegister);
2045  __ j(not_equal, on_not_smis);
2046  __ Integer32ToSmi(first, smi_result);
2047 
2048  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
2049  __ bind(&first_smi);
2050  __ AssertNotSmi(second);
2051  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
2052  __ j(not_equal, on_not_smis);
2053  // Convert second to smi, if possible.
2054  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
2055  __ movq(scratch2, xmm0);
2056  __ cvttsd2siq(smi_result, xmm0);
2057  __ cvtlsi2sd(xmm1, smi_result);
2058  __ movq(kScratchRegister, xmm1);
2059  __ cmpq(scratch2, kScratchRegister);
2060  __ j(not_equal, on_not_smis);
2061  __ Integer32ToSmi(second, smi_result);
2062  if (on_success != NULL) {
2063  __ jmp(on_success);
2064  } else {
2065  __ bind(&done);
2066  }
2067 }
2068 
2069 
2070 void MathPowStub::Generate(MacroAssembler* masm) {
2071  // Choose register conforming to calling convention (when bailing out).
2072 #ifdef _WIN64
2073  const Register exponent = rdx;
2074 #else
2075  const Register exponent = rdi;
2076 #endif
2077  const Register base = rax;
2078  const Register scratch = rcx;
2079  const XMMRegister double_result = xmm3;
2080  const XMMRegister double_base = xmm2;
2081  const XMMRegister double_exponent = xmm1;
2082  const XMMRegister double_scratch = xmm4;
2083 
2084  Label call_runtime, done, exponent_not_smi, int_exponent;
2085 
2086  // Save 1 in double_result - we need this several times later on.
2087  __ movq(scratch, Immediate(1));
2088  __ cvtlsi2sd(double_result, scratch);
2089 
2090  if (exponent_type_ == ON_STACK) {
2091  Label base_is_smi, unpack_exponent;
2092  // The exponent and base are supplied as arguments on the stack.
2093  // This can only happen if the stub is called from non-optimized code.
2094  // Load input parameters from stack.
2095  __ movq(base, Operand(rsp, 2 * kPointerSize));
2096  __ movq(exponent, Operand(rsp, 1 * kPointerSize));
2097  __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2098  __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
2099  Heap::kHeapNumberMapRootIndex);
2100  __ j(not_equal, &call_runtime);
2101 
2102  __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2103  __ jmp(&unpack_exponent, Label::kNear);
2104 
2105  __ bind(&base_is_smi);
2106  __ SmiToInteger32(base, base);
2107  __ cvtlsi2sd(double_base, base);
2108  __ bind(&unpack_exponent);
2109 
2110  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2111  __ SmiToInteger32(exponent, exponent);
2112  __ jmp(&int_exponent);
2113 
2114  __ bind(&exponent_not_smi);
2115  __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
2116  Heap::kHeapNumberMapRootIndex);
2117  __ j(not_equal, &call_runtime);
2118  __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2119  } else if (exponent_type_ == TAGGED) {
2120  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2121  __ SmiToInteger32(exponent, exponent);
2122  __ jmp(&int_exponent);
2123 
2124  __ bind(&exponent_not_smi);
2125  __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2126  }
2127 
2128  if (exponent_type_ != INTEGER) {
2129  Label fast_power;
2130  // Detect integer exponents stored as double.
2131  __ cvttsd2si(exponent, double_exponent);
2132  // Skip to runtime if possibly NaN (indicated by the indefinite integer).
2133  __ cmpl(exponent, Immediate(0x80000000u));
2134  __ j(equal, &call_runtime);
2135  __ cvtlsi2sd(double_scratch, exponent);
2136  // Already ruled out NaNs for exponent.
2137  __ ucomisd(double_exponent, double_scratch);
2138  __ j(equal, &int_exponent);
2139 
2140  if (exponent_type_ == ON_STACK) {
2141  // Detect square root case. Crankshaft detects constant +/-0.5 at
2142  // compile time and uses DoMathPowHalf instead. We then skip this check
2143  // for non-constant cases of +/-0.5 as these hardly occur.
2144  Label continue_sqrt, continue_rsqrt, not_plus_half;
2145  // Test for 0.5.
2146  // Load double_scratch with 0.5.
2147  __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
2148  __ movq(double_scratch, scratch);
2149  // Already ruled out NaNs for exponent.
2150  __ ucomisd(double_scratch, double_exponent);
2151  __ j(not_equal, &not_plus_half, Label::kNear);
2152 
2153  // Calculates square root of base. Check for the special case of
2154  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
2155  // According to IEEE-754, double-precision -Infinity has the highest
2156  // 12 bits set and the lowest 52 bits cleared.
2157  __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2158  __ movq(double_scratch, scratch);
2159  __ ucomisd(double_scratch, double_base);
2160  // Comparing -Infinity with NaN results in "unordered", which sets the
2161  // zero flag as if both were equal. However, it also sets the carry flag.
2162  __ j(not_equal, &continue_sqrt, Label::kNear);
2163  __ j(carry, &continue_sqrt, Label::kNear);
2164 
2165  // Set result to Infinity in the special case.
2166  __ xorps(double_result, double_result);
2167  __ subsd(double_result, double_scratch);
2168  __ jmp(&done);
2169 
2170  __ bind(&continue_sqrt);
2171  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2172  __ xorps(double_scratch, double_scratch);
2173  __ addsd(double_scratch, double_base); // Convert -0 to 0.
2174  __ sqrtsd(double_result, double_scratch);
2175  __ jmp(&done);
2176 
2177  // Test for -0.5.
2178  __ bind(&not_plus_half);
2179  // Load double_scratch with -0.5 by substracting 1.
2180  __ subsd(double_scratch, double_result);
2181  // Already ruled out NaNs for exponent.
2182  __ ucomisd(double_scratch, double_exponent);
2183  __ j(not_equal, &fast_power, Label::kNear);
2184 
2185  // Calculates reciprocal of square root of base. Check for the special
2186  // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
2187  // According to IEEE-754, double-precision -Infinity has the highest
2188  // 12 bits set and the lowest 52 bits cleared.
2189  __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2190  __ movq(double_scratch, scratch);
2191  __ ucomisd(double_scratch, double_base);
2192  // Comparing -Infinity with NaN results in "unordered", which sets the
2193  // zero flag as if both were equal. However, it also sets the carry flag.
2194  __ j(not_equal, &continue_rsqrt, Label::kNear);
2195  __ j(carry, &continue_rsqrt, Label::kNear);
2196 
2197  // Set result to 0 in the special case.
2198  __ xorps(double_result, double_result);
2199  __ jmp(&done);
2200 
2201  __ bind(&continue_rsqrt);
2202  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2203  __ xorps(double_exponent, double_exponent);
2204  __ addsd(double_exponent, double_base); // Convert -0 to +0.
2205  __ sqrtsd(double_exponent, double_exponent);
2206  __ divsd(double_result, double_exponent);
2207  __ jmp(&done);
2208  }
2209 
2210  // Using FPU instructions to calculate power.
2211  Label fast_power_failed;
2212  __ bind(&fast_power);
2213  __ fnclex(); // Clear flags to catch exceptions later.
2214  // Transfer (B)ase and (E)xponent onto the FPU register stack.
2215  __ subq(rsp, Immediate(kDoubleSize));
2216  __ movsd(Operand(rsp, 0), double_exponent);
2217  __ fld_d(Operand(rsp, 0)); // E
2218  __ movsd(Operand(rsp, 0), double_base);
2219  __ fld_d(Operand(rsp, 0)); // B, E
2220 
2221  // Exponent is in st(1) and base is in st(0)
2222  // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
2223  // FYL2X calculates st(1) * log2(st(0))
2224  __ fyl2x(); // X
2225  __ fld(0); // X, X
2226  __ frndint(); // rnd(X), X
2227  __ fsub(1); // rnd(X), X-rnd(X)
2228  __ fxch(1); // X - rnd(X), rnd(X)
2229  // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
2230  __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
2231  __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
2232  __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
2233  // FSCALE calculates st(0) * 2^st(1)
2234  __ fscale(); // 2^X, rnd(X)
2235  __ fstp(1);
2236  // Bail out to runtime in case of exceptions in the status word.
2237  __ fnstsw_ax();
2238  __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
2239  __ j(not_zero, &fast_power_failed, Label::kNear);
2240  __ fstp_d(Operand(rsp, 0));
2241  __ movsd(double_result, Operand(rsp, 0));
2242  __ addq(rsp, Immediate(kDoubleSize));
2243  __ jmp(&done);
2244 
2245  __ bind(&fast_power_failed);
2246  __ fninit();
2247  __ addq(rsp, Immediate(kDoubleSize));
2248  __ jmp(&call_runtime);
2249  }
2250 
2251  // Calculate power with integer exponent.
2252  __ bind(&int_exponent);
2253  const XMMRegister double_scratch2 = double_exponent;
2254  // Back up exponent as we need to check if exponent is negative later.
2255  __ movq(scratch, exponent); // Back up exponent.
2256  __ movsd(double_scratch, double_base); // Back up base.
2257  __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
2258 
2259  // Get absolute value of exponent.
2260  Label no_neg, while_true, while_false;
2261  __ testl(scratch, scratch);
2262  __ j(positive, &no_neg, Label::kNear);
2263  __ negl(scratch);
2264  __ bind(&no_neg);
2265 
2266  __ j(zero, &while_false, Label::kNear);
2267  __ shrl(scratch, Immediate(1));
2268  // Above condition means CF==0 && ZF==0. This means that the
2269  // bit that has been shifted out is 0 and the result is not 0.
2270  __ j(above, &while_true, Label::kNear);
2271  __ movsd(double_result, double_scratch);
2272  __ j(zero, &while_false, Label::kNear);
2273 
2274  __ bind(&while_true);
2275  __ shrl(scratch, Immediate(1));
2276  __ mulsd(double_scratch, double_scratch);
2277  __ j(above, &while_true, Label::kNear);
2278  __ mulsd(double_result, double_scratch);
2279  __ j(not_zero, &while_true);
2280 
2281  __ bind(&while_false);
2282  // If the exponent is negative, return 1/result.
2283  __ testl(exponent, exponent);
2284  __ j(greater, &done);
2285  __ divsd(double_scratch2, double_result);
2286  __ movsd(double_result, double_scratch2);
2287  // Test whether result is zero. Bail out to check for subnormal result.
2288  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2289  __ xorps(double_scratch2, double_scratch2);
2290  __ ucomisd(double_scratch2, double_result);
2291  // double_exponent aliased as double_scratch2 has already been overwritten
2292  // and may not have contained the exponent value in the first place when the
2293  // input was a smi. We reset it with exponent value before bailing out.
2294  __ j(not_equal, &done);
2295  __ cvtlsi2sd(double_exponent, exponent);
2296 
2297  // Returning or bailing out.
2298  Counters* counters = masm->isolate()->counters();
2299  if (exponent_type_ == ON_STACK) {
2300  // The arguments are still on the stack.
2301  __ bind(&call_runtime);
2302  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2303 
2304  // The stub is called from non-optimized code, which expects the result
2305  // as heap number in eax.
2306  __ bind(&done);
2307  __ AllocateHeapNumber(rax, rcx, &call_runtime);
2308  __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
2309  __ IncrementCounter(counters->math_pow(), 1);
2310  __ ret(2 * kPointerSize);
2311  } else {
2312  __ bind(&call_runtime);
2313  // Move base to the correct argument register. Exponent is already in xmm1.
2314  __ movsd(xmm0, double_base);
2315  ASSERT(double_exponent.is(xmm1));
2316  {
2317  AllowExternalCallThatCantCauseGC scope(masm);
2318  __ PrepareCallCFunction(2);
2319  __ CallCFunction(
2320  ExternalReference::power_double_double_function(masm->isolate()), 2);
2321  }
2322  // Return value is in xmm0.
2323  __ movsd(double_result, xmm0);
2324  // Restore context register.
2326 
2327  __ bind(&done);
2328  __ IncrementCounter(counters->math_pow(), 1);
2329  __ ret(0);
2330  }
2331 }
2332 
2333 
2334 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2335  // The key is in rdx and the parameter count is in rax.
2336 
2337  // The displacement is used for skipping the frame pointer on the
2338  // stack. It is the offset of the last parameter (if any) relative
2339  // to the frame pointer.
2340  static const int kDisplacement = 1 * kPointerSize;
2341 
2342  // Check that the key is a smi.
2343  Label slow;
2344  __ JumpIfNotSmi(rdx, &slow);
2345 
2346  // Check if the calling frame is an arguments adaptor frame. We look at the
2347  // context offset, and if the frame is not a regular one, then we find a
2348  // Smi instead of the context. We can't use SmiCompare here, because that
2349  // only works for comparing two smis.
2350  Label adaptor;
2354  __ j(equal, &adaptor);
2355 
2356  // Check index against formal parameters count limit passed in
2357  // through register rax. Use unsigned comparison to get negative
2358  // check for free.
2359  __ cmpq(rdx, rax);
2360  __ j(above_equal, &slow);
2361 
2362  // Read the argument from the stack and return it.
2363  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
2364  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
2365  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2366  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2367  __ Ret();
2368 
2369  // Arguments adaptor case: Check index against actual arguments
2370  // limit found in the arguments adaptor frame. Use unsigned
2371  // comparison to get negative check for free.
2372  __ bind(&adaptor);
2374  __ cmpq(rdx, rcx);
2375  __ j(above_equal, &slow);
2376 
2377  // Read the argument from the stack and return it.
2378  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
2379  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
2380  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2381  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2382  __ Ret();
2383 
2384  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2385  // by calling the runtime system.
2386  __ bind(&slow);
2387  __ pop(rbx); // Return address.
2388  __ push(rdx);
2389  __ push(rbx);
2390  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2391 }
2392 
2393 
2394 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2395  // Stack layout:
2396  // rsp[0] : return address
2397  // rsp[8] : number of parameters (tagged)
2398  // rsp[16] : receiver displacement
2399  // rsp[24] : function
2400  // Registers used over the whole function:
2401  // rbx: the mapped parameter count (untagged)
2402  // rax: the allocated object (tagged).
2403 
2404  Factory* factory = masm->isolate()->factory();
2405 
2406  __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
2407  // rbx = parameter count (untagged)
2408 
2409  // Check if the calling frame is an arguments adaptor frame.
2410  Label runtime;
2411  Label adaptor_frame, try_allocate;
2415  __ j(equal, &adaptor_frame);
2416 
2417  // No adaptor, parameter count = argument count.
2418  __ movq(rcx, rbx);
2419  __ jmp(&try_allocate, Label::kNear);
2420 
2421  // We have an adaptor frame. Patch the parameters pointer.
2422  __ bind(&adaptor_frame);
2423  __ SmiToInteger64(rcx,
2424  Operand(rdx,
2426  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2428  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2429 
2430  // rbx = parameter count (untagged)
2431  // rcx = argument count (untagged)
2432  // Compute the mapped parameter count = min(rbx, rcx) in rbx.
2433  __ cmpq(rbx, rcx);
2434  __ j(less_equal, &try_allocate, Label::kNear);
2435  __ movq(rbx, rcx);
2436 
2437  __ bind(&try_allocate);
2438 
2439  // Compute the sizes of backing store, parameter map, and arguments object.
2440  // 1. Parameter map, has 2 extra words containing context and backing store.
2441  const int kParameterMapHeaderSize =
2443  Label no_parameter_map;
2444  __ xor_(r8, r8);
2445  __ testq(rbx, rbx);
2446  __ j(zero, &no_parameter_map, Label::kNear);
2447  __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
2448  __ bind(&no_parameter_map);
2449 
2450  // 2. Backing store.
2452 
2453  // 3. Arguments object.
2454  __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
2455 
2456  // Do the allocation of all three objects in one go.
2457  __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
2458 
2459  // rax = address of new object(s) (tagged)
2460  // rcx = argument count (untagged)
2461  // Get the arguments boilerplate from the current native context into rdi.
2462  Label has_mapped_parameters, copy;
2465  __ testq(rbx, rbx);
2466  __ j(not_zero, &has_mapped_parameters, Label::kNear);
2467 
2468  const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
2469  __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
2470  __ jmp(&copy, Label::kNear);
2471 
2472  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
2473  __ bind(&has_mapped_parameters);
2474  __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
2475  __ bind(&copy);
2476 
2477  // rax = address of new object (tagged)
2478  // rbx = mapped parameter count (untagged)
2479  // rcx = argument count (untagged)
2480  // rdi = address of boilerplate object (tagged)
2481  // Copy the JS object part.
2482  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2483  __ movq(rdx, FieldOperand(rdi, i));
2484  __ movq(FieldOperand(rax, i), rdx);
2485  }
2486 
2487  // Set up the callee in-object property.
2489  __ movq(rdx, Operand(rsp, 3 * kPointerSize));
2490  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2491  Heap::kArgumentsCalleeIndex * kPointerSize),
2492  rdx);
2493 
2494  // Use the length (smi tagged) and set that as an in-object property too.
2495  // Note: rcx is tagged from here on.
2497  __ Integer32ToSmi(rcx, rcx);
2498  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2499  Heap::kArgumentsLengthIndex * kPointerSize),
2500  rcx);
2501 
2502  // Set up the elements pointer in the allocated arguments object.
2503  // If we allocated a parameter map, edi will point there, otherwise to the
2504  // backing store.
2505  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
2507 
2508  // rax = address of new object (tagged)
2509  // rbx = mapped parameter count (untagged)
2510  // rcx = argument count (tagged)
2511  // rdi = address of parameter map or backing store (tagged)
2512 
2513  // Initialize parameter map. If there are no mapped arguments, we're done.
2514  Label skip_parameter_map;
2515  __ testq(rbx, rbx);
2516  __ j(zero, &skip_parameter_map);
2517 
2518  __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
2519  // rbx contains the untagged argument count. Add 2 and tag to write.
2521  __ Integer64PlusConstantToSmi(r9, rbx, 2);
2523  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
2524  __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2525  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
2526 
2527  // Copy the parameter slots and the holes in the arguments.
2528  // We need to fill in mapped_parameter_count slots. They index the context,
2529  // where parameters are stored in reverse order, at
2530  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2531  // The mapped parameter thus need to get indices
2532  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2533  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2534  // We loop from right to left.
2535  Label parameters_loop, parameters_test;
2536 
2537  // Load tagged parameter count into r9.
2538  __ Integer32ToSmi(r9, rbx);
2539  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
2540  __ addq(r8, Operand(rsp, 1 * kPointerSize));
2541  __ subq(r8, r9);
2542  __ Move(r11, factory->the_hole_value());
2543  __ movq(rdx, rdi);
2544  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2545  // r9 = loop variable (tagged)
2546  // r8 = mapping index (tagged)
2547  // r11 = the hole value
2548  // rdx = address of parameter map (tagged)
2549  // rdi = address of backing store (tagged)
2550  __ jmp(&parameters_test, Label::kNear);
2551 
2552  __ bind(&parameters_loop);
2553  __ SmiSubConstant(r9, r9, Smi::FromInt(1));
2554  __ SmiToInteger64(kScratchRegister, r9);
2557  kParameterMapHeaderSize),
2558  r8);
2562  r11);
2563  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
2564  __ bind(&parameters_test);
2565  __ SmiTest(r9);
2566  __ j(not_zero, &parameters_loop, Label::kNear);
2567 
2568  __ bind(&skip_parameter_map);
2569 
2570  // rcx = argument count (tagged)
2571  // rdi = address of backing store (tagged)
2572  // Copy arguments header and remaining slots (if there are any).
2574  factory->fixed_array_map());
2576 
2577  Label arguments_loop, arguments_test;
2578  __ movq(r8, rbx);
2579  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2580  // Untag rcx for the loop below.
2581  __ SmiToInteger64(rcx, rcx);
2582  __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
2583  __ subq(rdx, kScratchRegister);
2584  __ jmp(&arguments_test, Label::kNear);
2585 
2586  __ bind(&arguments_loop);
2587  __ subq(rdx, Immediate(kPointerSize));
2588  __ movq(r9, Operand(rdx, 0));
2589  __ movq(FieldOperand(rdi, r8,
2592  r9);
2593  __ addq(r8, Immediate(1));
2594 
2595  __ bind(&arguments_test);
2596  __ cmpq(r8, rcx);
2597  __ j(less, &arguments_loop, Label::kNear);
2598 
2599  // Return and remove the on-stack parameters.
2600  __ ret(3 * kPointerSize);
2601 
2602  // Do the runtime call to allocate the arguments object.
2603  // rcx = argument count (untagged)
2604  __ bind(&runtime);
2605  __ Integer32ToSmi(rcx, rcx);
2606  __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
2607  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2608 }
2609 
2610 
2611 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2612  // esp[0] : return address
2613  // esp[8] : number of parameters
2614  // esp[16] : receiver displacement
2615  // esp[24] : function
2616 
2617  // Check if the calling frame is an arguments adaptor frame.
2618  Label runtime;
2622  __ j(not_equal, &runtime);
2623 
2624  // Patch the arguments.length and the parameters pointer.
2626  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2627  __ SmiToInteger64(rcx, rcx);
2628  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2630  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2631 
2632  __ bind(&runtime);
2633  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2634 }
2635 
2636 
2637 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2638  // rsp[0] : return address
2639  // rsp[8] : number of parameters
2640  // rsp[16] : receiver displacement
2641  // rsp[24] : function
2642 
2643  // Check if the calling frame is an arguments adaptor frame.
2644  Label adaptor_frame, try_allocate, runtime;
2648  __ j(equal, &adaptor_frame);
2649 
2650  // Get the length from the frame.
2651  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2652  __ SmiToInteger64(rcx, rcx);
2653  __ jmp(&try_allocate);
2654 
2655  // Patch the arguments.length and the parameters pointer.
2656  __ bind(&adaptor_frame);
2658  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2659  __ SmiToInteger64(rcx, rcx);
2660  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2662  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2663 
2664  // Try the new space allocation. Start out with computing the size of
2665  // the arguments object and the elements array.
2666  Label add_arguments_object;
2667  __ bind(&try_allocate);
2668  __ testq(rcx, rcx);
2669  __ j(zero, &add_arguments_object, Label::kNear);
2671  __ bind(&add_arguments_object);
2672  __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
2673 
2674  // Do the allocation of both objects in one go.
2675  __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
2676 
2677  // Get the arguments boilerplate from the current native context.
2680  const int offset =
2682  __ movq(rdi, Operand(rdi, offset));
2683 
2684  // Copy the JS object part.
2685  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2686  __ movq(rbx, FieldOperand(rdi, i));
2687  __ movq(FieldOperand(rax, i), rbx);
2688  }
2689 
2690  // Get the length (smi tagged) and set that as an in-object property too.
2692  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2693  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2694  Heap::kArgumentsLengthIndex * kPointerSize),
2695  rcx);
2696 
2697  // If there are no actual arguments, we're done.
2698  Label done;
2699  __ testq(rcx, rcx);
2700  __ j(zero, &done);
2701 
2702  // Get the parameters pointer from the stack.
2703  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2704 
2705  // Set up the elements pointer in the allocated arguments object and
2706  // initialize the header in the elements fixed array.
2707  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
2709  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
2711 
2712 
2714  // Untag the length for the loop below.
2715  __ SmiToInteger64(rcx, rcx);
2716 
2717  // Copy the fixed array slots.
2718  Label loop;
2719  __ bind(&loop);
2720  __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
2722  __ addq(rdi, Immediate(kPointerSize));
2723  __ subq(rdx, Immediate(kPointerSize));
2724  __ decq(rcx);
2725  __ j(not_zero, &loop);
2726 
2727  // Return and remove the on-stack parameters.
2728  __ bind(&done);
2729  __ ret(3 * kPointerSize);
2730 
2731  // Do the runtime call to allocate the arguments object.
2732  __ bind(&runtime);
2733  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2734 }
2735 
2736 
2737 void RegExpExecStub::Generate(MacroAssembler* masm) {
2738  // Just jump directly to runtime if native RegExp is not selected at compile
2739  // time or if regexp entry in generated code is turned off runtime switch or
2740  // at compilation.
2741 #ifdef V8_INTERPRETED_REGEXP
2742  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2743 #else // V8_INTERPRETED_REGEXP
2744 
2745  // Stack frame on entry.
2746  // rsp[0]: return address
2747  // rsp[8]: last_match_info (expected JSArray)
2748  // rsp[16]: previous index
2749  // rsp[24]: subject string
2750  // rsp[32]: JSRegExp object
2751 
2752  static const int kLastMatchInfoOffset = 1 * kPointerSize;
2753  static const int kPreviousIndexOffset = 2 * kPointerSize;
2754  static const int kSubjectOffset = 3 * kPointerSize;
2755  static const int kJSRegExpOffset = 4 * kPointerSize;
2756 
2757  Label runtime;
2758  // Ensure that a RegExp stack is allocated.
2759  Isolate* isolate = masm->isolate();
2760  ExternalReference address_of_regexp_stack_memory_address =
2761  ExternalReference::address_of_regexp_stack_memory_address(isolate);
2762  ExternalReference address_of_regexp_stack_memory_size =
2763  ExternalReference::address_of_regexp_stack_memory_size(isolate);
2764  __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
2766  __ j(zero, &runtime);
2767 
2768  // Check that the first argument is a JSRegExp object.
2769  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2770  __ JumpIfSmi(rax, &runtime);
2771  __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
2772  __ j(not_equal, &runtime);
2773  // Check that the RegExp has been compiled (data contains a fixed array).
2775  if (FLAG_debug_code) {
2776  Condition is_smi = masm->CheckSmi(rax);
2777  __ Check(NegateCondition(is_smi),
2778  "Unexpected type for RegExp data, FixedArray expected");
2779  __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
2780  __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
2781  }
2782 
2783  // rax: RegExp data (FixedArray)
2784  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2785  __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
2786  __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
2787  __ j(not_equal, &runtime);
2788 
2789  // rax: RegExp data (FixedArray)
2790  // Check that the number of captures fit in the static offsets vector buffer.
2791  __ SmiToInteger32(rdx,
2793  // Calculate number of capture registers (number_of_captures + 1) * 2.
2794  __ leal(rdx, Operand(rdx, rdx, times_1, 2));
2795  // Check that the static offsets vector buffer is large enough.
2797  __ j(above, &runtime);
2798 
2799  // rax: RegExp data (FixedArray)
2800  // rdx: Number of capture registers
2801  // Check that the second argument is a string.
2802  __ movq(rdi, Operand(rsp, kSubjectOffset));
2803  __ JumpIfSmi(rdi, &runtime);
2804  Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
2805  __ j(NegateCondition(is_string), &runtime);
2806 
2807  // rdi: Subject string.
2808  // rax: RegExp data (FixedArray).
2809  // rdx: Number of capture registers.
2810  // Check that the third argument is a positive smi less than the string
2811  // length. A negative value will be greater (unsigned comparison).
2812  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
2813  __ JumpIfNotSmi(rbx, &runtime);
2814  __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
2815  __ j(above_equal, &runtime);
2816 
2817  // rax: RegExp data (FixedArray)
2818  // rdx: Number of capture registers
2819  // Check that the fourth object is a JSArray object.
2820  __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
2821  __ JumpIfSmi(rdi, &runtime);
2822  __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
2823  __ j(not_equal, &runtime);
2824  // Check that the JSArray is in fast case.
2827  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
2828  Heap::kFixedArrayMapRootIndex);
2829  __ j(not_equal, &runtime);
2830  // Check that the last match info has space for the capture registers and the
2831  // additional information. Ensure no overflow in add.
2833  __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
2834  __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
2835  __ cmpl(rdx, rdi);
2836  __ j(greater, &runtime);
2837 
2838  // Reset offset for possibly sliced string.
2839  __ Set(r14, 0);
2840  // rax: RegExp data (FixedArray)
2841  // Check the representation and encoding of the subject string.
2842  Label seq_ascii_string, seq_two_byte_string, check_code;
2843  __ movq(rdi, Operand(rsp, kSubjectOffset));
2844  // Make a copy of the original subject string.
2845  __ movq(r15, rdi);
2848  // First check for flat two byte string.
2849  __ andb(rbx, Immediate(kIsNotStringMask |
2854  __ j(zero, &seq_two_byte_string, Label::kNear);
2855  // Any other flat string must be a flat ASCII string. None of the following
2856  // string type tests will succeed if subject is not a string or a short
2857  // external string.
2858  __ andb(rbx, Immediate(kIsNotStringMask |
2861  __ j(zero, &seq_ascii_string, Label::kNear);
2862 
2863  // rbx: whether subject is a string and if yes, its string representation
2864  // Check for flat cons string or sliced string.
2865  // A flat cons string is a cons string where the second part is the empty
2866  // string. In that case the subject string is just the first part of the cons
2867  // string. Also in this case the first part of the cons string is known to be
2868  // a sequential string or an external string.
2869  // In the case of a sliced string its offset has to be taken into account.
2870  Label cons_string, external_string, check_encoding;
2875  __ cmpq(rbx, Immediate(kExternalStringTag));
2876  __ j(less, &cons_string, Label::kNear);
2877  __ j(equal, &external_string);
2878 
2879  // Catch non-string subject or short external string.
2881  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
2882  __ j(not_zero, &runtime);
2883 
2884  // String is sliced.
2885  __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
2887  // r14: slice offset
2888  // r15: original subject string
2889  // rdi: parent string
2890  __ jmp(&check_encoding, Label::kNear);
2891  // String is a cons string, check whether it is flat.
2892  __ bind(&cons_string);
2894  Heap::kEmptyStringRootIndex);
2895  __ j(not_equal, &runtime);
2897  // rdi: first part of cons string or parent of sliced string.
2898  // rbx: map of first part of cons string or map of parent of sliced string.
2899  // Is first part of cons or parent of slice a flat two byte string?
2900  __ bind(&check_encoding);
2905  __ j(zero, &seq_two_byte_string, Label::kNear);
2906  // Any other flat string must be sequential ASCII or external.
2908  Immediate(kStringRepresentationMask));
2909  __ j(not_zero, &external_string);
2910 
2911  __ bind(&seq_ascii_string);
2912  // rdi: subject string (sequential ASCII)
2913  // rax: RegExp data (FixedArray)
2915  __ Set(rcx, 1); // Type is ASCII.
2916  __ jmp(&check_code, Label::kNear);
2917 
2918  __ bind(&seq_two_byte_string);
2919  // rdi: subject string (flat two-byte)
2920  // rax: RegExp data (FixedArray)
2922  __ Set(rcx, 0); // Type is two byte.
2923 
2924  __ bind(&check_code);
2925  // Check that the irregexp code has been generated for the actual string
2926  // encoding. If it has, the field contains a code object otherwise it contains
2927  // smi (code flushing support)
2928  __ JumpIfSmi(r11, &runtime);
2929 
2930  // rdi: subject string
2931  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
2932  // r11: code
2933  // Load used arguments before starting to push arguments for call to native
2934  // RegExp code to avoid handling changing stack height.
2935  __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
2936 
2937  // rdi: subject string
2938  // rbx: previous index
2939  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2940  // r11: code
2941  // All checks done. Now push arguments for native regexp code.
2942  Counters* counters = masm->isolate()->counters();
2943  __ IncrementCounter(counters->regexp_entry_native(), 1);
2944 
2945  // Isolates: note we add an additional parameter here (isolate pointer).
2946  static const int kRegExpExecuteArguments = 9;
2947  int argument_slots_on_stack =
2948  masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
2949  __ EnterApiExitFrame(argument_slots_on_stack);
2950 
2951  // Argument 9: Pass current isolate address.
2952  // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2953  // Immediate(ExternalReference::isolate_address()));
2954  __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
2955  __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2957 
2958  // Argument 8: Indicate that this is a direct call from JavaScript.
2959  __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
2960  Immediate(1));
2961 
2962  // Argument 7: Start (high end) of backtracking stack memory area.
2963  __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
2964  __ movq(r9, Operand(kScratchRegister, 0));
2965  __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
2966  __ addq(r9, Operand(kScratchRegister, 0));
2967  __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
2968 
2969  // Argument 6: Set the number of capture registers to zero to force global
2970  // regexps to behave as non-global. This does not affect non-global regexps.
2971  // Argument 6 is passed in r9 on Linux and on the stack on Windows.
2972 #ifdef _WIN64
2973  __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
2974  Immediate(0));
2975 #else
2976  __ Set(r9, 0);
2977 #endif
2978 
2979  // Argument 5: static offsets vector buffer.
2980  __ LoadAddress(r8,
2981  ExternalReference::address_of_static_offsets_vector(isolate));
2982  // Argument 5 passed in r8 on Linux and on the stack on Windows.
2983 #ifdef _WIN64
2984  __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
2985 #endif
2986 
2987  // First four arguments are passed in registers on both Linux and Windows.
2988 #ifdef _WIN64
2989  Register arg4 = r9;
2990  Register arg3 = r8;
2991  Register arg2 = rdx;
2992  Register arg1 = rcx;
2993 #else
2994  Register arg4 = rcx;
2995  Register arg3 = rdx;
2996  Register arg2 = rsi;
2997  Register arg1 = rdi;
2998 #endif
2999 
3000  // Keep track on aliasing between argX defined above and the registers used.
3001  // rdi: subject string
3002  // rbx: previous index
3003  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
3004  // r11: code
3005  // r14: slice offset
3006  // r15: original subject string
3007 
3008  // Argument 2: Previous index.
3009  __ movq(arg2, rbx);
3010 
3011  // Argument 4: End of string data
3012  // Argument 3: Start of string data
3013  Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
3014  // Prepare start and end index of the input.
3015  // Load the length from the original sliced string if that is the case.
3016  __ addq(rbx, r14);
3017  __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
3018  __ addq(r14, arg3); // Using arg3 as scratch.
3019 
3020  // rbx: start index of the input
3021  // r14: end index of the input
3022  // r15: original subject string
3023  __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
3024  __ j(zero, &setup_two_byte, Label::kNear);
3027  __ jmp(&setup_rest, Label::kNear);
3028  __ bind(&setup_two_byte);
3031  __ bind(&setup_rest);
3032 
3033  // Argument 1: Original subject string.
3034  // The original subject is in the previous stack frame. Therefore we have to
3035  // use rbp, which points exactly to one pointer size below the previous rsp.
3036  // (Because creating a new stack frame pushes the previous rbp onto the stack
3037  // and thereby moves up rsp by one kPointerSize.)
3038  __ movq(arg1, r15);
3039 
3040  // Locate the code entry and call it.
3041  __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
3042  __ call(r11);
3043 
3044  __ LeaveApiExitFrame();
3045 
3046  // Check the result.
3047  Label success;
3048  Label exception;
3049  __ cmpl(rax, Immediate(1));
3050  // We expect exactly one result since we force the called regexp to behave
3051  // as non-global.
3052  __ j(equal, &success, Label::kNear);
3053  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
3054  __ j(equal, &exception);
3055  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
3056  // If none of the above, it can only be retry.
3057  // Handle that in the runtime system.
3058  __ j(not_equal, &runtime);
3059 
3060  // For failure return null.
3061  __ LoadRoot(rax, Heap::kNullValueRootIndex);
3062  __ ret(4 * kPointerSize);
3063 
3064  // Load RegExp data.
3065  __ bind(&success);
3066  __ movq(rax, Operand(rsp, kJSRegExpOffset));
3068  __ SmiToInteger32(rax,
3070  // Calculate number of capture registers (number_of_captures + 1) * 2.
3071  __ leal(rdx, Operand(rax, rax, times_1, 2));
3072 
3073  // rdx: Number of capture registers
3074  // Load last_match_info which is still known to be a fast case JSArray.
3075  __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
3077 
3078  // rbx: last_match_info backing store (FixedArray)
3079  // rdx: number of capture registers
3080  // Store the capture count.
3081  __ Integer32ToSmi(kScratchRegister, rdx);
3084  // Store last subject and last input.
3085  __ movq(rax, Operand(rsp, kSubjectOffset));
3087  __ RecordWriteField(rbx,
3089  rax,
3090  rdi,
3091  kDontSaveFPRegs);
3092  __ movq(rax, Operand(rsp, kSubjectOffset));
3094  __ RecordWriteField(rbx,
3096  rax,
3097  rdi,
3098  kDontSaveFPRegs);
3099 
3100  // Get the static offsets vector filled by the native regexp code.
3101  __ LoadAddress(rcx,
3102  ExternalReference::address_of_static_offsets_vector(isolate));
3103 
3104  // rbx: last_match_info backing store (FixedArray)
3105  // rcx: offsets vector
3106  // rdx: number of capture registers
3107  Label next_capture, done;
3108  // Capture register counter starts from number of capture registers and
3109  // counts down until wraping after zero.
3110  __ bind(&next_capture);
3111  __ subq(rdx, Immediate(1));
3112  __ j(negative, &done, Label::kNear);
3113  // Read the value from the static offsets vector buffer and make it a smi.
3114  __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
3115  __ Integer32ToSmi(rdi, rdi);
3116  // Store the smi value in the last match info.
3117  __ movq(FieldOperand(rbx,
3118  rdx,
3121  rdi);
3122  __ jmp(&next_capture);
3123  __ bind(&done);
3124 
3125  // Return last match info.
3126  __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
3127  __ ret(4 * kPointerSize);
3128 
3129  __ bind(&exception);
3130  // Result must now be exception. If there is no pending exception already a
3131  // stack overflow (on the backtrack stack) was detected in RegExp code but
3132  // haven't created the exception yet. Handle that in the runtime system.
3133  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3134  ExternalReference pending_exception_address(
3135  Isolate::kPendingExceptionAddress, isolate);
3136  Operand pending_exception_operand =
3137  masm->ExternalOperand(pending_exception_address, rbx);
3138  __ movq(rax, pending_exception_operand);
3139  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3140  __ cmpq(rax, rdx);
3141  __ j(equal, &runtime);
3142  __ movq(pending_exception_operand, rdx);
3143 
3144  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3145  Label termination_exception;
3146  __ j(equal, &termination_exception, Label::kNear);
3147  __ Throw(rax);
3148 
3149  __ bind(&termination_exception);
3150  __ ThrowUncatchable(rax);
3151 
3152  // External string. Short external strings have already been ruled out.
3153  // rdi: subject string (expected to be external)
3154  // rbx: scratch
3155  __ bind(&external_string);
3158  if (FLAG_debug_code) {
3159  // Assert that we do not have a cons or slice (indirect strings) here.
3160  // Sequential strings have already been ruled out.
3161  __ testb(rbx, Immediate(kIsIndirectStringMask));
3162  __ Assert(zero, "external string expected, but not found");
3163  }
3165  // Move the pointer so that offset-wise, it looks like a sequential string.
3167  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3169  __ testb(rbx, Immediate(kStringEncodingMask));
3170  __ j(not_zero, &seq_ascii_string);
3171  __ jmp(&seq_two_byte_string);
3172 
3173  // Do the runtime call to execute the regexp.
3174  __ bind(&runtime);
3175  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3176 #endif // V8_INTERPRETED_REGEXP
3177 }
3178 
3179 
3180 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3181  const int kMaxInlineLength = 100;
3182  Label slowcase;
3183  Label done;
3184  __ movq(r8, Operand(rsp, kPointerSize * 3));
3185  __ JumpIfNotSmi(r8, &slowcase);
3186  __ SmiToInteger32(rbx, r8);
3187  __ cmpl(rbx, Immediate(kMaxInlineLength));
3188  __ j(above, &slowcase);
3189  // Smi-tagging is equivalent to multiplying by 2.
3190  STATIC_ASSERT(kSmiTag == 0);
3191  STATIC_ASSERT(kSmiTagSize == 1);
3192  // Allocate RegExpResult followed by FixedArray with size in rbx.
3193  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
3194  // Elements: [Map][Length][..elements..]
3195  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3197  rbx, // In: Number of elements.
3198  rax, // Out: Start of allocation (tagged).
3199  rcx, // Out: End of allocation.
3200  rdx, // Scratch register
3201  &slowcase,
3202  TAG_OBJECT);
3203  // rax: Start of allocated area, object-tagged.
3204  // rbx: Number of array elements as int32.
3205  // r8: Number of array elements as smi.
3206 
3207  // Set JSArray map to global.regexp_result_map().
3212 
3213  // Set empty properties FixedArray.
3214  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
3216 
3217  // Set elements to point to FixedArray allocated right after the JSArray.
3218  __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
3220 
3221  // Set input, index and length fields from arguments.
3222  __ movq(r8, Operand(rsp, kPointerSize * 1));
3224  __ movq(r8, Operand(rsp, kPointerSize * 2));
3226  __ movq(r8, Operand(rsp, kPointerSize * 3));
3228 
3229  // Fill out the elements FixedArray.
3230  // rax: JSArray.
3231  // rcx: FixedArray.
3232  // rbx: Number of elements in array as int32.
3233 
3234  // Set map.
3235  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
3237  // Set length.
3238  __ Integer32ToSmi(rdx, rbx);
3240  // Fill contents of fixed-array with undefined.
3241  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
3243  // Fill fixed array elements with undefined.
3244  // rax: JSArray.
3245  // rbx: Number of elements in array that remains to be filled, as int32.
3246  // rcx: Start of elements in FixedArray.
3247  // rdx: undefined.
3248  Label loop;
3249  __ testl(rbx, rbx);
3250  __ bind(&loop);
3251  __ j(less_equal, &done); // Jump if rcx is negative or zero.
3252  __ subl(rbx, Immediate(1));
3253  __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
3254  __ jmp(&loop);
3255 
3256  __ bind(&done);
3257  __ ret(3 * kPointerSize);
3258 
3259  __ bind(&slowcase);
3260  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3261 }
3262 
3263 
3264 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3265  Register object,
3266  Register result,
3267  Register scratch1,
3268  Register scratch2,
3269  bool object_is_smi,
3270  Label* not_found) {
3271  // Use of registers. Register result is used as a temporary.
3272  Register number_string_cache = result;
3273  Register mask = scratch1;
3274  Register scratch = scratch2;
3275 
3276  // Load the number string cache.
3277  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3278 
3279  // Make the hash mask from the length of the number string cache. It
3280  // contains two elements (number and string) for each cache entry.
3281  __ SmiToInteger32(
3282  mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3283  __ shrl(mask, Immediate(1));
3284  __ subq(mask, Immediate(1)); // Make mask.
3285 
3286  // Calculate the entry in the number string cache. The hash value in the
3287  // number string cache for smis is just the smi value, and the hash for
3288  // doubles is the xor of the upper and lower words. See
3289  // Heap::GetNumberStringCache.
3290  Label is_smi;
3291  Label load_result_from_cache;
3292  Factory* factory = masm->isolate()->factory();
3293  if (!object_is_smi) {
3294  __ JumpIfSmi(object, &is_smi);
3295  __ CheckMap(object,
3296  factory->heap_number_map(),
3297  not_found,
3299 
3300  STATIC_ASSERT(8 == kDoubleSize);
3301  __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3302  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3303  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3304 
3305  Register index = scratch;
3306  Register probe = mask;
3307  __ movq(probe,
3308  FieldOperand(number_string_cache,
3309  index,
3310  times_1,
3312  __ JumpIfSmi(probe, not_found);
3313  __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3314  __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3315  __ ucomisd(xmm0, xmm1);
3316  __ j(parity_even, not_found); // Bail out if NaN is involved.
3317  __ j(not_equal, not_found); // The cache did not contain this value.
3318  __ jmp(&load_result_from_cache);
3319  }
3320 
3321  __ bind(&is_smi);
3322  __ SmiToInteger32(scratch, object);
3323  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3324 
3325  Register index = scratch;
3326  // Check if the entry is the smi we are looking for.
3327  __ cmpq(object,
3328  FieldOperand(number_string_cache,
3329  index,
3330  times_1,
3332  __ j(not_equal, not_found);
3333 
3334  // Get the result from the cache.
3335  __ bind(&load_result_from_cache);
3336  __ movq(result,
3337  FieldOperand(number_string_cache,
3338  index,
3339  times_1,
3340  FixedArray::kHeaderSize + kPointerSize));
3341  Counters* counters = masm->isolate()->counters();
3342  __ IncrementCounter(counters->number_to_string_native(), 1);
3343 }
3344 
3345 
3346 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
3347  Register hash,
3348  Register mask) {
3349  __ and_(hash, mask);
3350  // Each entry in string cache consists of two pointer sized fields,
3351  // but times_twice_pointer_size (multiplication by 16) scale factor
3352  // is not supported by addrmode on x64 platform.
3353  // So we have to premultiply entry index before lookup.
3354  __ shl(hash, Immediate(kPointerSizeLog2 + 1));
3355 }
3356 
3357 
3358 void NumberToStringStub::Generate(MacroAssembler* masm) {
3359  Label runtime;
3360 
3361  __ movq(rbx, Operand(rsp, kPointerSize));
3362 
3363  // Generate code to lookup number in the number string cache.
3364  GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
3365  __ ret(1 * kPointerSize);
3366 
3367  __ bind(&runtime);
3368  // Handle number to string in the runtime system if not found in the cache.
3369  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3370 }
3371 
3372 
3373 static int NegativeComparisonResult(Condition cc) {
3374  ASSERT(cc != equal);
3375  ASSERT((cc == less) || (cc == less_equal)
3376  || (cc == greater) || (cc == greater_equal));
3377  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3378 }
3379 
3380 
3381 void CompareStub::Generate(MacroAssembler* masm) {
3382  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3383 
3384  Label check_unequal_objects, done;
3385  Factory* factory = masm->isolate()->factory();
3386 
3387  // Compare two smis if required.
3388  if (include_smi_compare_) {
3389  Label non_smi, smi_done;
3390  __ JumpIfNotBothSmi(rax, rdx, &non_smi);
3391  __ subq(rdx, rax);
3392  __ j(no_overflow, &smi_done);
3393  __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
3394  __ bind(&smi_done);
3395  __ movq(rax, rdx);
3396  __ ret(0);
3397  __ bind(&non_smi);
3398  } else if (FLAG_debug_code) {
3399  Label ok;
3400  __ JumpIfNotSmi(rdx, &ok);
3401  __ JumpIfNotSmi(rax, &ok);
3402  __ Abort("CompareStub: smi operands");
3403  __ bind(&ok);
3404  }
3405 
3406  // The compare stub returns a positive, negative, or zero 64-bit integer
3407  // value in rax, corresponding to result of comparing the two inputs.
3408  // NOTICE! This code is only reached after a smi-fast-case check, so
3409  // it is certain that at least one operand isn't a smi.
3410 
3411  // Two identical objects are equal unless they are both NaN or undefined.
3412  {
3413  Label not_identical;
3414  __ cmpq(rax, rdx);
3415  __ j(not_equal, &not_identical, Label::kNear);
3416 
3417  if (cc_ != equal) {
3418  // Check for undefined. undefined OP undefined is false even though
3419  // undefined == undefined.
3420  Label check_for_nan;
3421  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
3422  __ j(not_equal, &check_for_nan, Label::kNear);
3423  __ Set(rax, NegativeComparisonResult(cc_));
3424  __ ret(0);
3425  __ bind(&check_for_nan);
3426  }
3427 
3428  // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
3429  // so we do the second best thing - test it ourselves.
3430  // Note: if cc_ != equal, never_nan_nan_ is not used.
3431  // We cannot set rax to EQUAL until just before return because
3432  // rax must be unchanged on jump to not_identical.
3433  if (never_nan_nan_ && (cc_ == equal)) {
3434  __ Set(rax, EQUAL);
3435  __ ret(0);
3436  } else {
3437  Label heap_number;
3438  // If it's not a heap number, then return equal for (in)equality operator.
3440  factory->heap_number_map());
3441  __ j(equal, &heap_number, Label::kNear);
3442  if (cc_ != equal) {
3443  // Call runtime on identical objects. Otherwise return equal.
3444  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3445  __ j(above_equal, &not_identical, Label::kNear);
3446  }
3447  __ Set(rax, EQUAL);
3448  __ ret(0);
3449 
3450  __ bind(&heap_number);
3451  // It is a heap number, so return equal if it's not NaN.
3452  // For NaN, return 1 for every condition except greater and
3453  // greater-equal. Return -1 for them, so the comparison yields
3454  // false for all conditions except not-equal.
3455  __ Set(rax, EQUAL);
3457  __ ucomisd(xmm0, xmm0);
3458  __ setcc(parity_even, rax);
3459  // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
3460  if (cc_ == greater_equal || cc_ == greater) {
3461  __ neg(rax);
3462  }
3463  __ ret(0);
3464  }
3465 
3466  __ bind(&not_identical);
3467  }
3468 
3469  if (cc_ == equal) { // Both strict and non-strict.
3470  Label slow; // Fallthrough label.
3471 
3472  // If we're doing a strict equality comparison, we don't have to do
3473  // type conversion, so we generate code to do fast comparison for objects
3474  // and oddballs. Non-smi numbers and strings still go through the usual
3475  // slow-case code.
3476  if (strict_) {
3477  // If either is a Smi (we know that not both are), then they can only
3478  // be equal if the other is a HeapNumber. If so, use the slow case.
3479  {
3480  Label not_smis;
3481  __ SelectNonSmi(rbx, rax, rdx, &not_smis);
3482 
3483  // Check if the non-smi operand is a heap number.
3485  factory->heap_number_map());
3486  // If heap number, handle it in the slow case.
3487  __ j(equal, &slow);
3488  // Return non-equal. ebx (the lower half of rbx) is not zero.
3489  __ movq(rax, rbx);
3490  __ ret(0);
3491 
3492  __ bind(&not_smis);
3493  }
3494 
3495  // If either operand is a JSObject or an oddball value, then they are not
3496  // equal since their pointers are different
3497  // There is no test for undetectability in strict equality.
3498 
3499  // If the first object is a JS object, we have done pointer comparison.
3501  Label first_non_object;
3502  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3503  __ j(below, &first_non_object, Label::kNear);
3504  // Return non-zero (eax (not rax) is not zero)
3505  Label return_not_equal;
3507  __ bind(&return_not_equal);
3508  __ ret(0);
3509 
3510  __ bind(&first_non_object);
3511  // Check for oddballs: true, false, null, undefined.
3512  __ CmpInstanceType(rcx, ODDBALL_TYPE);
3513  __ j(equal, &return_not_equal);
3514 
3515  __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3516  __ j(above_equal, &return_not_equal);
3517 
3518  // Check for oddballs: true, false, null, undefined.
3519  __ CmpInstanceType(rcx, ODDBALL_TYPE);
3520  __ j(equal, &return_not_equal);
3521 
3522  // Fall through to the general case.
3523  }
3524  __ bind(&slow);
3525  }
3526 
3527  // Generate the number comparison code.
3528  if (include_number_compare_) {
3529  Label non_number_comparison;
3530  Label unordered;
3531  FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
3532  __ xorl(rax, rax);
3533  __ xorl(rcx, rcx);
3534  __ ucomisd(xmm0, xmm1);
3535 
3536  // Don't base result on EFLAGS when a NaN is involved.
3537  __ j(parity_even, &unordered, Label::kNear);
3538  // Return a result of -1, 0, or 1, based on EFLAGS.
3539  __ setcc(above, rax);
3540  __ setcc(below, rcx);
3541  __ subq(rax, rcx);
3542  __ ret(0);
3543 
3544  // If one of the numbers was NaN, then the result is always false.
3545  // The cc is never not-equal.
3546  __ bind(&unordered);
3547  ASSERT(cc_ != not_equal);
3548  if (cc_ == less || cc_ == less_equal) {
3549  __ Set(rax, 1);
3550  } else {
3551  __ Set(rax, -1);
3552  }
3553  __ ret(0);
3554 
3555  // The number comparison code did not provide a valid result.
3556  __ bind(&non_number_comparison);
3557  }
3558 
3559  // Fast negative check for symbol-to-symbol equality.
3560  Label check_for_strings;
3561  if (cc_ == equal) {
3562  BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
3563  BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
3564 
3565  // We've already checked for object identity, so if both operands
3566  // are symbols they aren't equal. Register eax (not rax) already holds a
3567  // non-zero value, which indicates not equal, so just return.
3568  __ ret(0);
3569  }
3570 
3571  __ bind(&check_for_strings);
3572 
3573  __ JumpIfNotBothSequentialAsciiStrings(
3574  rdx, rax, rcx, rbx, &check_unequal_objects);
3575 
3576  // Inline comparison of ASCII strings.
3577  if (cc_ == equal) {
3579  rdx,
3580  rax,
3581  rcx,
3582  rbx);
3583  } else {
3585  rdx,
3586  rax,
3587  rcx,
3588  rbx,
3589  rdi,
3590  r8);
3591  }
3592 
3593 #ifdef DEBUG
3594  __ Abort("Unexpected fall-through from string comparison");
3595 #endif
3596 
3597  __ bind(&check_unequal_objects);
3598  if (cc_ == equal && !strict_) {
3599  // Not strict equality. Objects are unequal if
3600  // they are both JSObjects and not undetectable,
3601  // and their pointers are different.
3602  Label not_both_objects, return_unequal;
3603  // At most one is a smi, so we can test for smi by adding the two.
3604  // A smi plus a heap object has the low bit set, a heap object plus
3605  // a heap object has the low bit clear.
3606  STATIC_ASSERT(kSmiTag == 0);
3607  STATIC_ASSERT(kSmiTagMask == 1);
3608  __ lea(rcx, Operand(rax, rdx, times_1, 0));
3609  __ testb(rcx, Immediate(kSmiTagMask));
3610  __ j(not_zero, &not_both_objects, Label::kNear);
3611  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
3612  __ j(below, &not_both_objects, Label::kNear);
3613  __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3614  __ j(below, &not_both_objects, Label::kNear);
3616  Immediate(1 << Map::kIsUndetectable));
3617  __ j(zero, &return_unequal, Label::kNear);
3619  Immediate(1 << Map::kIsUndetectable));
3620  __ j(zero, &return_unequal, Label::kNear);
3621  // The objects are both undetectable, so they both compare as the value
3622  // undefined, and are equal.
3623  __ Set(rax, EQUAL);
3624  __ bind(&return_unequal);
3625  // Return non-equal by returning the non-zero object pointer in rax,
3626  // or return equal if we fell through to here.
3627  __ ret(0);
3628  __ bind(&not_both_objects);
3629  }
3630 
3631  // Push arguments below the return address to prepare jump to builtin.
3632  __ pop(rcx);
3633  __ push(rdx);
3634  __ push(rax);
3635 
3636  // Figure out which native to call and setup the arguments.
3637  Builtins::JavaScript builtin;
3638  if (cc_ == equal) {
3639  builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
3640  } else {
3641  builtin = Builtins::COMPARE;
3642  __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
3643  }
3644 
3645  // Restore return address on the stack.
3646  __ push(rcx);
3647 
3648  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
3649  // tagged as a small integer.
3650  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
3651 }
3652 
3653 
3654 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
3655  Label* label,
3656  Register object,
3657  Register scratch) {
3658  __ JumpIfSmi(object, label);
3659  __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
3660  __ movzxbq(scratch,
3662  // Ensure that no non-strings have the symbol bit set.
3664  STATIC_ASSERT(kSymbolTag != 0);
3665  __ testb(scratch, Immediate(kIsSymbolMask));
3666  __ j(zero, label);
3667 }
3668 
3669 
3670 void StackCheckStub::Generate(MacroAssembler* masm) {
3671  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3672 }
3673 
3674 
3675 void InterruptStub::Generate(MacroAssembler* masm) {
3676  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3677 }
3678 
3679 
3680 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3681  // Cache the called function in a global property cell. Cache states
3682  // are uninitialized, monomorphic (indicated by a JSFunction), and
3683  // megamorphic.
3684  // rbx : cache cell for call target
3685  // rdi : the function to call
3686  Isolate* isolate = masm->isolate();
3687  Label initialize, done;
3688 
3689  // Load the cache state into rcx.
3691 
3692  // A monomorphic cache hit or an already megamorphic state: invoke the
3693  // function without changing the state.
3694  __ cmpq(rcx, rdi);
3695  __ j(equal, &done, Label::kNear);
3697  __ j(equal, &done, Label::kNear);
3698 
3699  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3700  // megamorphic.
3702  __ j(equal, &initialize, Label::kNear);
3703  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3704  // write-barrier is needed.
3707  __ jmp(&done, Label::kNear);
3708 
3709  // An uninitialized cache is patched with the function.
3710  __ bind(&initialize);
3712  // No need for a write barrier here - cells are rescanned.
3713 
3714  __ bind(&done);
3715 }
3716 
3717 
3718 void CallFunctionStub::Generate(MacroAssembler* masm) {
3719  // rbx : cache cell for call target
3720  // rdi : the function to call
3721  Isolate* isolate = masm->isolate();
3722  Label slow, non_function;
3723 
3724  // The receiver might implicitly be the global object. This is
3725  // indicated by passing the hole as the receiver to the call
3726  // function stub.
3727  if (ReceiverMightBeImplicit()) {
3728  Label call;
3729  // Get the receiver from the stack.
3730  // +1 ~ return address
3731  __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
3732  // Call as function is indicated with the hole.
3733  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3734  __ j(not_equal, &call, Label::kNear);
3735  // Patch the receiver on the stack with the global receiver object.
3736  __ movq(rcx, GlobalObjectOperand());
3738  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
3739  __ bind(&call);
3740  }
3741 
3742  // Check that the function really is a JavaScript function.
3743  __ JumpIfSmi(rdi, &non_function);
3744  // Goto slow case if we do not have a function.
3745  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3746  __ j(not_equal, &slow);
3747 
3748  if (RecordCallTarget()) {
3749  GenerateRecordCallTarget(masm);
3750  }
3751 
3752  // Fast-case: Just invoke the function.
3753  ParameterCount actual(argc_);
3754 
3755  if (ReceiverMightBeImplicit()) {
3756  Label call_as_function;
3757  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3758  __ j(equal, &call_as_function);
3759  __ InvokeFunction(rdi,
3760  actual,
3761  JUMP_FUNCTION,
3762  NullCallWrapper(),
3763  CALL_AS_METHOD);
3764  __ bind(&call_as_function);
3765  }
3766  __ InvokeFunction(rdi,
3767  actual,
3768  JUMP_FUNCTION,
3769  NullCallWrapper(),
3771 
3772  // Slow-case: Non-function called.
3773  __ bind(&slow);
3774  if (RecordCallTarget()) {
3775  // If there is a call target cache, mark it megamorphic in the
3776  // non-function case. MegamorphicSentinel is an immortal immovable
3777  // object (undefined) so no write barrier is needed.
3780  }
3781  // Check for function proxy.
3782  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3783  __ j(not_equal, &non_function);
3784  __ pop(rcx);
3785  __ push(rdi); // put proxy as additional argument under return address
3786  __ push(rcx);
3787  __ Set(rax, argc_ + 1);
3788  __ Set(rbx, 0);
3789  __ SetCallKind(rcx, CALL_AS_METHOD);
3790  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
3791  {
3792  Handle<Code> adaptor =
3793  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3794  __ jmp(adaptor, RelocInfo::CODE_TARGET);
3795  }
3796 
3797  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3798  // of the original receiver from the call site).
3799  __ bind(&non_function);
3800  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
3801  __ Set(rax, argc_);
3802  __ Set(rbx, 0);
3803  __ SetCallKind(rcx, CALL_AS_METHOD);
3804  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
3805  Handle<Code> adaptor =
3806  Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
3807  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3808 }
3809 
3810 
3811 void CallConstructStub::Generate(MacroAssembler* masm) {
3812  // rax : number of arguments
3813  // rbx : cache cell for call target
3814  // rdi : constructor function
3815  Label slow, non_function_call;
3816 
3817  // Check that function is not a smi.
3818  __ JumpIfSmi(rdi, &non_function_call);
3819  // Check that function is a JSFunction.
3820  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3821  __ j(not_equal, &slow);
3822 
3823  if (RecordCallTarget()) {
3824  GenerateRecordCallTarget(masm);
3825  }
3826 
3827  // Jump to the function-specific construct stub.
3831  __ jmp(rbx);
3832 
3833  // rdi: called object
3834  // rax: number of arguments
3835  // rcx: object map
3836  Label do_call;
3837  __ bind(&slow);
3838  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3839  __ j(not_equal, &non_function_call);
3840  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3841  __ jmp(&do_call);
3842 
3843  __ bind(&non_function_call);
3844  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3845  __ bind(&do_call);
3846  // Set expected number of arguments to zero (not changing rax).
3847  __ Set(rbx, 0);
3848  __ SetCallKind(rcx, CALL_AS_METHOD);
3849  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3850  RelocInfo::CODE_TARGET);
3851 }
3852 
3853 
3854 bool CEntryStub::NeedsImmovableCode() {
3855  return false;
3856 }
3857 
3858 
3860 #ifdef _WIN64
3861  return result_size_ == 1;
3862 #else
3863  return true;
3864 #endif
3865 }
3866 
3867 
3868 void CodeStub::GenerateStubsAheadOfTime() {
3871  // It is important that the store buffer overflow stubs are generated first.
3873 }
3874 
3875 
3876 void CodeStub::GenerateFPStubs() {
3877 }
3878 
3879 
3881  CEntryStub stub(1, kDontSaveFPRegs);
3882  stub.GetCode()->set_is_pregenerated(true);
3883  CEntryStub save_doubles(1, kSaveFPRegs);
3884  save_doubles.GetCode()->set_is_pregenerated(true);
3885 }
3886 
3887 
3888 void CEntryStub::GenerateCore(MacroAssembler* masm,
3889  Label* throw_normal_exception,
3890  Label* throw_termination_exception,
3891  Label* throw_out_of_memory_exception,
3892  bool do_gc,
3893  bool always_allocate_scope) {
3894  // rax: result parameter for PerformGC, if any.
3895  // rbx: pointer to C function (C callee-saved).
3896  // rbp: frame pointer (restored after C call).
3897  // rsp: stack pointer (restored after C call).
3898  // r14: number of arguments including receiver (C callee-saved).
3899  // r15: pointer to the first argument (C callee-saved).
3900  // This pointer is reused in LeaveExitFrame(), so it is stored in a
3901  // callee-saved register.
3902 
3903  // Simple results returned in rax (both AMD64 and Win64 calling conventions).
3904  // Complex results must be written to address passed as first argument.
3905  // AMD64 calling convention: a struct of two pointers in rax+rdx
3906 
3907  // Check stack alignment.
3908  if (FLAG_debug_code) {
3909  __ CheckStackAlignment();
3910  }
3911 
3912  if (do_gc) {
3913  // Pass failure code returned from last attempt as first argument to
3914  // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3915  // stack is known to be aligned. This function takes one argument which is
3916  // passed in register.
3917 #ifdef _WIN64
3918  __ movq(rcx, rax);
3919 #else // _WIN64
3920  __ movq(rdi, rax);
3921 #endif
3922  __ movq(kScratchRegister,
3925  __ call(kScratchRegister);
3926  }
3927 
3928  ExternalReference scope_depth =
3929  ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3930  if (always_allocate_scope) {
3931  Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3932  __ incl(scope_depth_operand);
3933  }
3934 
3935  // Call C function.
3936 #ifdef _WIN64
3937  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
3938  // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
3939  __ movq(StackSpaceOperand(0), r14); // argc.
3940  __ movq(StackSpaceOperand(1), r15); // argv.
3941  if (result_size_ < 2) {
3942  // Pass a pointer to the Arguments object as the first argument.
3943  // Return result in single register (rax).
3944  __ lea(rcx, StackSpaceOperand(0));
3945  __ LoadAddress(rdx, ExternalReference::isolate_address());
3946  } else {
3947  ASSERT_EQ(2, result_size_);
3948  // Pass a pointer to the result location as the first argument.
3949  __ lea(rcx, StackSpaceOperand(2));
3950  // Pass a pointer to the Arguments object as the second argument.
3951  __ lea(rdx, StackSpaceOperand(0));
3952  __ LoadAddress(r8, ExternalReference::isolate_address());
3953  }
3954 
3955 #else // _WIN64
3956  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
3957  __ movq(rdi, r14); // argc.
3958  __ movq(rsi, r15); // argv.
3959  __ movq(rdx, ExternalReference::isolate_address());
3960 #endif
3961  __ call(rbx);
3962  // Result is in rax - do not destroy this register!
3963 
3964  if (always_allocate_scope) {
3965  Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3966  __ decl(scope_depth_operand);
3967  }
3968 
3969  // Check for failure result.
3970  Label failure_returned;
3971  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3972 #ifdef _WIN64
3973  // If return value is on the stack, pop it to registers.
3974  if (result_size_ > 1) {
3975  ASSERT_EQ(2, result_size_);
3976  // Read result values stored on stack. Result is stored
3977  // above the four argument mirror slots and the two
3978  // Arguments object slots.
3979  __ movq(rax, Operand(rsp, 6 * kPointerSize));
3980  __ movq(rdx, Operand(rsp, 7 * kPointerSize));
3981  }
3982 #endif
3983  __ lea(rcx, Operand(rax, 1));
3984  // Lower 2 bits of rcx are 0 iff rax has failure tag.
3985  __ testl(rcx, Immediate(kFailureTagMask));
3986  __ j(zero, &failure_returned);
3987 
3988  // Exit the JavaScript to C++ exit frame.
3989  __ LeaveExitFrame(save_doubles_);
3990  __ ret(0);
3991 
3992  // Handling of failure.
3993  __ bind(&failure_returned);
3994 
3995  Label retry;
3996  // If the returned exception is RETRY_AFTER_GC continue at retry label
3998  __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3999  __ j(zero, &retry, Label::kNear);
4000 
4001  // Special handling of out of memory exceptions.
4003  __ cmpq(rax, kScratchRegister);
4004  __ j(equal, throw_out_of_memory_exception);
4005 
4006  // Retrieve the pending exception and clear the variable.
4007  ExternalReference pending_exception_address(
4008  Isolate::kPendingExceptionAddress, masm->isolate());
4009  Operand pending_exception_operand =
4010  masm->ExternalOperand(pending_exception_address);
4011  __ movq(rax, pending_exception_operand);
4012  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
4013  __ movq(pending_exception_operand, rdx);
4014 
4015  // Special handling of termination exceptions which are uncatchable
4016  // by javascript code.
4017  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
4018  __ j(equal, throw_termination_exception);
4019 
4020  // Handle normal exception.
4021  __ jmp(throw_normal_exception);
4022 
4023  // Retry.
4024  __ bind(&retry);
4025 }
4026 
4027 
4028 void CEntryStub::Generate(MacroAssembler* masm) {
4029  // rax: number of arguments including receiver
4030  // rbx: pointer to C function (C callee-saved)
4031  // rbp: frame pointer of calling JS frame (restored after C call)
4032  // rsp: stack pointer (restored after C call)
4033  // rsi: current context (restored)
4034 
4035  // NOTE: Invocations of builtins may return failure objects
4036  // instead of a proper result. The builtin entry handles
4037  // this by performing a garbage collection and retrying the
4038  // builtin once.
4039 
4040  // Enter the exit frame that transitions from JavaScript to C++.
4041 #ifdef _WIN64
4042  int arg_stack_space = (result_size_ < 2 ? 2 : 4);
4043 #else
4044  int arg_stack_space = 0;
4045 #endif
4046  __ EnterExitFrame(arg_stack_space, save_doubles_);
4047 
4048  // rax: Holds the context at this point, but should not be used.
4049  // On entry to code generated by GenerateCore, it must hold
4050  // a failure result if the collect_garbage argument to GenerateCore
4051  // is true. This failure result can be the result of code
4052  // generated by a previous call to GenerateCore. The value
4053  // of rax is then passed to Runtime::PerformGC.
4054  // rbx: pointer to builtin function (C callee-saved).
4055  // rbp: frame pointer of exit frame (restored after C call).
4056  // rsp: stack pointer (restored after C call).
4057  // r14: number of arguments including receiver (C callee-saved).
4058  // r15: argv pointer (C callee-saved).
4059 
4060  Label throw_normal_exception;
4061  Label throw_termination_exception;
4062  Label throw_out_of_memory_exception;
4063 
4064  // Call into the runtime system.
4065  GenerateCore(masm,
4066  &throw_normal_exception,
4067  &throw_termination_exception,
4068  &throw_out_of_memory_exception,
4069  false,
4070  false);
4071 
4072  // Do space-specific GC and retry runtime call.
4073  GenerateCore(masm,
4074  &throw_normal_exception,
4075  &throw_termination_exception,
4076  &throw_out_of_memory_exception,
4077  true,
4078  false);
4079 
4080  // Do full GC and retry runtime call one final time.
4081  Failure* failure = Failure::InternalError();
4082  __ movq(rax, failure, RelocInfo::NONE);
4083  GenerateCore(masm,
4084  &throw_normal_exception,
4085  &throw_termination_exception,
4086  &throw_out_of_memory_exception,
4087  true,
4088  true);
4089 
4090  __ bind(&throw_out_of_memory_exception);
4091  // Set external caught exception to false.
4092  Isolate* isolate = masm->isolate();
4093  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4094  isolate);
4095  __ Set(rax, static_cast<int64_t>(false));
4096  __ Store(external_caught, rax);
4097 
4098  // Set pending exception and rax to out of memory exception.
4099  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4100  isolate);
4102  __ Store(pending_exception, rax);
4103  // Fall through to the next label.
4104 
4105  __ bind(&throw_termination_exception);
4106  __ ThrowUncatchable(rax);
4107 
4108  __ bind(&throw_normal_exception);
4109  __ Throw(rax);
4110 }
4111 
4112 
4113 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4114  Label invoke, handler_entry, exit;
4115  Label not_outermost_js, not_outermost_js_2;
4116  { // NOLINT. Scope block confuses linter.
4117  MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
4118  // Set up frame.
4119  __ push(rbp);
4120  __ movq(rbp, rsp);
4121 
4122  // Push the stack frame type marker twice.
4123  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4124  // Scratch register is neither callee-save, nor an argument register on any
4125  // platform. It's free to use at this point.
4126  // Cannot use smi-register for loading yet.
4127  __ movq(kScratchRegister,
4128  reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
4129  RelocInfo::NONE);
4130  __ push(kScratchRegister); // context slot
4131  __ push(kScratchRegister); // function slot
4132  // Save callee-saved registers (X64/Win64 calling conventions).
4133  __ push(r12);
4134  __ push(r13);
4135  __ push(r14);
4136  __ push(r15);
4137 #ifdef _WIN64
4138  __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
4139  __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
4140 #endif
4141  __ push(rbx);
4142  // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
4143  // callee save as well.
4144 
4145  // Set up the roots and smi constant registers.
4146  // Needs to be done before any further smi loads.
4147  __ InitializeSmiConstantRegister();
4148  __ InitializeRootRegister();
4149  }
4150 
4151  Isolate* isolate = masm->isolate();
4152 
4153  // Save copies of the top frame descriptor on the stack.
4154  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
4155  {
4156  Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4157  __ push(c_entry_fp_operand);
4158  }
4159 
4160  // If this is the outermost JS call, set js_entry_sp value.
4161  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4162  __ Load(rax, js_entry_sp);
4163  __ testq(rax, rax);
4164  __ j(not_zero, &not_outermost_js);
4165  __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4166  __ movq(rax, rbp);
4167  __ Store(js_entry_sp, rax);
4168  Label cont;
4169  __ jmp(&cont);
4170  __ bind(&not_outermost_js);
4171  __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
4172  __ bind(&cont);
4173 
4174  // Jump to a faked try block that does the invoke, with a faked catch
4175  // block that sets the pending exception.
4176  __ jmp(&invoke);
4177  __ bind(&handler_entry);
4178  handler_offset_ = handler_entry.pos();
4179  // Caught exception: Store result (exception) in the pending exception
4180  // field in the JSEnv and return a failure sentinel.
4181  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4182  isolate);
4183  __ Store(pending_exception, rax);
4185  __ jmp(&exit);
4186 
4187  // Invoke: Link this frame into the handler chain. There's only one
4188  // handler block in this code object, so its index is 0.
4189  __ bind(&invoke);
4190  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4191 
4192  // Clear any pending exceptions.
4193  __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
4194  __ Store(pending_exception, rax);
4195 
4196  // Fake a receiver (NULL).
4197  __ push(Immediate(0)); // receiver
4198 
4199  // Invoke the function by calling through JS entry trampoline builtin and
4200  // pop the faked function when we return. We load the address from an
4201  // external reference instead of inlining the call target address directly
4202  // in the code, because the builtin stubs may not have been generated yet
4203  // at the time this code is generated.
4204  if (is_construct) {
4205  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4206  isolate);
4207  __ Load(rax, construct_entry);
4208  } else {
4209  ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4210  __ Load(rax, entry);
4211  }
4213  __ call(kScratchRegister);
4214 
4215  // Unlink this frame from the handler chain.
4216  __ PopTryHandler();
4217 
4218  __ bind(&exit);
4219  // Check if the current stack frame is marked as the outermost JS frame.
4220  __ pop(rbx);
4221  __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4222  __ j(not_equal, &not_outermost_js_2);
4223  __ movq(kScratchRegister, js_entry_sp);
4224  __ movq(Operand(kScratchRegister, 0), Immediate(0));
4225  __ bind(&not_outermost_js_2);
4226 
4227  // Restore the top frame descriptor from the stack.
4228  { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4229  __ pop(c_entry_fp_operand);
4230  }
4231 
4232  // Restore callee-saved registers (X64 conventions).
4233  __ pop(rbx);
4234 #ifdef _WIN64
4235  // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
4236  __ pop(rsi);
4237  __ pop(rdi);
4238 #endif
4239  __ pop(r15);
4240  __ pop(r14);
4241  __ pop(r13);
4242  __ pop(r12);
4243  __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
4244 
4245  // Restore frame pointer and return.
4246  __ pop(rbp);
4247  __ ret(0);
4248 }
4249 
4250 
4251 void InstanceofStub::Generate(MacroAssembler* masm) {
4252  // Implements "value instanceof function" operator.
4253  // Expected input state with no inline cache:
4254  // rsp[0] : return address
4255  // rsp[1] : function pointer
4256  // rsp[2] : value
4257  // Expected input state with an inline one-element cache:
4258  // rsp[0] : return address
4259  // rsp[1] : offset from return address to location of inline cache
4260  // rsp[2] : function pointer
4261  // rsp[3] : value
4262  // Returns a bitwise zero to indicate that the value
4263  // is and instance of the function and anything else to
4264  // indicate that the value is not an instance.
4265 
4266  static const int kOffsetToMapCheckValue = 2;
4267  static const int kOffsetToResultValue = 18;
4268  // The last 4 bytes of the instruction sequence
4269  // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
4270  // Move(kScratchRegister, FACTORY->the_hole_value())
4271  // in front of the hole value address.
4272  static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
4273  // The last 4 bytes of the instruction sequence
4274  // __ j(not_equal, &cache_miss);
4275  // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
4276  // before the offset of the hole value in the root array.
4277  static const unsigned int kWordBeforeResultValue = 0x458B4909;
4278  // Only the inline check flag is supported on X64.
4279  ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
4280  int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
4281 
4282  // Get the object - go slow case if it's a smi.
4283  Label slow;
4284 
4285  __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
4286  __ JumpIfSmi(rax, &slow);
4287 
4288  // Check that the left hand is a JS object. Leave its map in rax.
4289  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
4290  __ j(below, &slow);
4291  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
4292  __ j(above, &slow);
4293 
4294  // Get the prototype of the function.
4295  __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
4296  // rdx is function, rax is map.
4297 
4298  // If there is a call site cache don't look in the global cache, but do the
4299  // real lookup and update the call site cache.
4300  if (!HasCallSiteInlineCheck()) {
4301  // Look up the function and the map in the instanceof cache.
4302  Label miss;
4303  __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4304  __ j(not_equal, &miss, Label::kNear);
4305  __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4306  __ j(not_equal, &miss, Label::kNear);
4307  __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4308  __ ret(2 * kPointerSize);
4309  __ bind(&miss);
4310  }
4311 
4312  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
4313 
4314  // Check that the function prototype is a JS object.
4315  __ JumpIfSmi(rbx, &slow);
4316  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
4317  __ j(below, &slow);
4318  __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
4319  __ j(above, &slow);
4320 
4321  // Register mapping:
4322  // rax is object map.
4323  // rdx is function.
4324  // rbx is function prototype.
4325  if (!HasCallSiteInlineCheck()) {
4326  __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4327  __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4328  } else {
4329  // Get return address and delta to inlined map check.
4330  __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4331  __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4332  if (FLAG_debug_code) {
4333  __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
4334  __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
4335  __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
4336  }
4337  __ movq(kScratchRegister,
4338  Operand(kScratchRegister, kOffsetToMapCheckValue));
4339  __ movq(Operand(kScratchRegister, 0), rax);
4340  }
4341 
4343 
4344  // Loop through the prototype chain looking for the function prototype.
4345  Label loop, is_instance, is_not_instance;
4346  __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
4347  __ bind(&loop);
4348  __ cmpq(rcx, rbx);
4349  __ j(equal, &is_instance, Label::kNear);
4350  __ cmpq(rcx, kScratchRegister);
4351  // The code at is_not_instance assumes that kScratchRegister contains a
4352  // non-zero GCable value (the null object in this case).
4353  __ j(equal, &is_not_instance, Label::kNear);
4356  __ jmp(&loop);
4357 
4358  __ bind(&is_instance);
4359  if (!HasCallSiteInlineCheck()) {
4360  __ xorl(rax, rax);
4361  // Store bitwise zero in the cache. This is a Smi in GC terms.
4362  STATIC_ASSERT(kSmiTag == 0);
4363  __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4364  } else {
4365  // Store offset of true in the root array at the inline check site.
4366  int true_offset = 0x100 +
4367  (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4368  // Assert it is a 1-byte signed value.
4369  ASSERT(true_offset >= 0 && true_offset < 0x100);
4370  __ movl(rax, Immediate(true_offset));
4371  __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4372  __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4373  __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4374  if (FLAG_debug_code) {
4375  __ movl(rax, Immediate(kWordBeforeResultValue));
4376  __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4377  __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
4378  }
4379  __ Set(rax, 0);
4380  }
4381  __ ret(2 * kPointerSize + extra_stack_space);
4382 
4383  __ bind(&is_not_instance);
4384  if (!HasCallSiteInlineCheck()) {
4385  // We have to store a non-zero value in the cache.
4386  __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
4387  } else {
4388  // Store offset of false in the root array at the inline check site.
4389  int false_offset = 0x100 +
4390  (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4391  // Assert it is a 1-byte signed value.
4392  ASSERT(false_offset >= 0 && false_offset < 0x100);
4393  __ movl(rax, Immediate(false_offset));
4394  __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4395  __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4396  __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4397  if (FLAG_debug_code) {
4398  __ movl(rax, Immediate(kWordBeforeResultValue));
4399  __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4400  __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4401  }
4402  }
4403  __ ret(2 * kPointerSize + extra_stack_space);
4404 
4405  // Slow-case: Go through the JavaScript implementation.
4406  __ bind(&slow);
4407  if (HasCallSiteInlineCheck()) {
4408  // Remove extra value from the stack.
4409  __ pop(rcx);
4410  __ pop(rax);
4411  __ push(rcx);
4412  }
4413  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4414 }
4415 
4416 
4417 // Passing arguments in registers is not supported.
4418 Register InstanceofStub::left() { return no_reg; }
4419 
4420 
4421 Register InstanceofStub::right() { return no_reg; }
4422 
4423 
4424 int CompareStub::MinorKey() {
4425  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4426  // stubs the never NaN NaN condition is only taken into account if the
4427  // condition is equals.
4428  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4429  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4430  return ConditionField::encode(static_cast<unsigned>(cc_))
4431  | RegisterField::encode(false) // lhs_ and rhs_ are not used
4432  | StrictField::encode(strict_)
4433  | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
4434  | IncludeNumberCompareField::encode(include_number_compare_)
4435  | IncludeSmiCompareField::encode(include_smi_compare_);
4436 }
4437 
4438 
4439 // Unfortunately you have to run without snapshots to see most of these
4440 // names in the profile since most compare stubs end up in the snapshot.
4441 void CompareStub::PrintName(StringStream* stream) {
4442  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4443  const char* cc_name;
4444  switch (cc_) {
4445  case less: cc_name = "LT"; break;
4446  case greater: cc_name = "GT"; break;
4447  case less_equal: cc_name = "LE"; break;
4448  case greater_equal: cc_name = "GE"; break;
4449  case equal: cc_name = "EQ"; break;
4450  case not_equal: cc_name = "NE"; break;
4451  default: cc_name = "UnknownCondition"; break;
4452  }
4453  bool is_equality = cc_ == equal || cc_ == not_equal;
4454  stream->Add("CompareStub_%s", cc_name);
4455  if (strict_ && is_equality) stream->Add("_STRICT");
4456  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4457  if (!include_number_compare_) stream->Add("_NO_NUMBER");
4458  if (!include_smi_compare_) stream->Add("_NO_SMI");
4459 }
4460 
4461 
4462 // -------------------------------------------------------------------------
4463 // StringCharCodeAtGenerator
4464 
4465 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4466  Label flat_string;
4467  Label ascii_string;
4468  Label got_char_code;
4469  Label sliced_string;
4470 
4471  // If the receiver is a smi trigger the non-string case.
4472  __ JumpIfSmi(object_, receiver_not_string_);
4473 
4474  // Fetch the instance type of the receiver into result register.
4475  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4476  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4477  // If the receiver is not a string trigger the non-string case.
4478  __ testb(result_, Immediate(kIsNotStringMask));
4479  __ j(not_zero, receiver_not_string_);
4480 
4481  // If the index is non-smi trigger the non-smi case.
4482  __ JumpIfNotSmi(index_, &index_not_smi_);
4483  __ bind(&got_smi_index_);
4484 
4485  // Check for index out of range.
4486  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
4487  __ j(above_equal, index_out_of_range_);
4488 
4489  __ SmiToInteger32(index_, index_);
4490 
4492  masm, object_, index_, result_, &call_runtime_);
4493 
4494  __ Integer32ToSmi(result_, result_);
4495  __ bind(&exit_);
4496 }
4497 
4498 
4500  MacroAssembler* masm,
4501  const RuntimeCallHelper& call_helper) {
4502  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4503 
4504  Factory* factory = masm->isolate()->factory();
4505  // Index is not a smi.
4506  __ bind(&index_not_smi_);
4507  // If index is a heap number, try converting it to an integer.
4508  __ CheckMap(index_,
4509  factory->heap_number_map(),
4510  index_not_number_,
4512  call_helper.BeforeCall(masm);
4513  __ push(object_);
4514  __ push(index_); // Consumed by runtime conversion function.
4515  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4516  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4517  } else {
4518  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4519  // NumberToSmi discards numbers that are not exact integers.
4520  __ CallRuntime(Runtime::kNumberToSmi, 1);
4521  }
4522  if (!index_.is(rax)) {
4523  // Save the conversion result before the pop instructions below
4524  // have a chance to overwrite it.
4525  __ movq(index_, rax);
4526  }
4527  __ pop(object_);
4528  // Reload the instance type.
4529  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4530  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4531  call_helper.AfterCall(masm);
4532  // If index is still not a smi, it must be out of range.
4533  __ JumpIfNotSmi(index_, index_out_of_range_);
4534  // Otherwise, return to the fast path.
4535  __ jmp(&got_smi_index_);
4536 
4537  // Call runtime. We get here when the receiver is a string and the
4538  // index is a number, but the code of getting the actual character
4539  // is too complex (e.g., when the string needs to be flattened).
4540  __ bind(&call_runtime_);
4541  call_helper.BeforeCall(masm);
4542  __ push(object_);
4543  __ Integer32ToSmi(index_, index_);
4544  __ push(index_);
4545  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4546  if (!result_.is(rax)) {
4547  __ movq(result_, rax);
4548  }
4549  call_helper.AfterCall(masm);
4550  __ jmp(&exit_);
4551 
4552  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
4553 }
4554 
4555 
4556 // -------------------------------------------------------------------------
4557 // StringCharFromCodeGenerator
4558 
4559 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4560  // Fast case of Heap::LookupSingleCharacterStringFromCode.
4561  __ JumpIfNotSmi(code_, &slow_case_);
4562  __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
4563  __ j(above, &slow_case_);
4564 
4565  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4566  SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
4567  __ movq(result_, FieldOperand(result_, index.reg, index.scale,
4569  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
4570  __ j(equal, &slow_case_);
4571  __ bind(&exit_);
4572 }
4573 
4574 
4576  MacroAssembler* masm,
4577  const RuntimeCallHelper& call_helper) {
4578  __ Abort("Unexpected fallthrough to CharFromCode slow case");
4579 
4580  __ bind(&slow_case_);
4581  call_helper.BeforeCall(masm);
4582  __ push(code_);
4583  __ CallRuntime(Runtime::kCharFromCode, 1);
4584  if (!result_.is(rax)) {
4585  __ movq(result_, rax);
4586  }
4587  call_helper.AfterCall(masm);
4588  __ jmp(&exit_);
4589 
4590  __ Abort("Unexpected fallthrough from CharFromCode slow case");
4591 }
4592 
4593 
4594 // -------------------------------------------------------------------------
4595 // StringCharAtGenerator
4596 
4597 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
4598  char_code_at_generator_.GenerateFast(masm);
4599  char_from_code_generator_.GenerateFast(masm);
4600 }
4601 
4602 
4604  MacroAssembler* masm,
4605  const RuntimeCallHelper& call_helper) {
4606  char_code_at_generator_.GenerateSlow(masm, call_helper);
4607  char_from_code_generator_.GenerateSlow(masm, call_helper);
4608 }
4609 
4610 
4611 void StringAddStub::Generate(MacroAssembler* masm) {
4612  Label call_runtime, call_builtin;
4613  Builtins::JavaScript builtin_id = Builtins::ADD;
4614 
4615  // Load the two arguments.
4616  __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
4617  __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
4618 
4619  // Make sure that both arguments are strings if not known in advance.
4620  if (flags_ == NO_STRING_ADD_FLAGS) {
4621  __ JumpIfSmi(rax, &call_runtime);
4622  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
4623  __ j(above_equal, &call_runtime);
4624 
4625  // First argument is a a string, test second.
4626  __ JumpIfSmi(rdx, &call_runtime);
4627  __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
4628  __ j(above_equal, &call_runtime);
4629  } else {
4630  // Here at least one of the arguments is definitely a string.
4631  // We convert the one that is not known to be a string.
4632  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
4633  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
4634  GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
4635  &call_builtin);
4636  builtin_id = Builtins::STRING_ADD_RIGHT;
4637  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
4638  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
4639  GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
4640  &call_builtin);
4641  builtin_id = Builtins::STRING_ADD_LEFT;
4642  }
4643  }
4644 
4645  // Both arguments are strings.
4646  // rax: first string
4647  // rdx: second string
4648  // Check if either of the strings are empty. In that case return the other.
4649  Label second_not_zero_length, both_not_zero_length;
4651  __ SmiTest(rcx);
4652  __ j(not_zero, &second_not_zero_length, Label::kNear);
4653  // Second string is empty, result is first string which is already in rax.
4654  Counters* counters = masm->isolate()->counters();
4655  __ IncrementCounter(counters->string_add_native(), 1);
4656  __ ret(2 * kPointerSize);
4657  __ bind(&second_not_zero_length);
4659  __ SmiTest(rbx);
4660  __ j(not_zero, &both_not_zero_length, Label::kNear);
4661  // First string is empty, result is second string which is in rdx.
4662  __ movq(rax, rdx);
4663  __ IncrementCounter(counters->string_add_native(), 1);
4664  __ ret(2 * kPointerSize);
4665 
4666  // Both strings are non-empty.
4667  // rax: first string
4668  // rbx: length of first string
4669  // rcx: length of second string
4670  // rdx: second string
4671  // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
4672  // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
4673  Label string_add_flat_result, longer_than_two;
4674  __ bind(&both_not_zero_length);
4675 
4676  // If arguments where known to be strings, maps are not loaded to r8 and r9
4677  // by the code above.
4678  if (flags_ != NO_STRING_ADD_FLAGS) {
4681  }
4682  // Get the instance types of the two strings as they will be needed soon.
4685 
4686  // Look at the length of the result of adding the two strings.
4688  __ SmiAdd(rbx, rbx, rcx);
4689  // Use the symbol table when adding two one character strings, as it
4690  // helps later optimizations to return a symbol here.
4691  __ SmiCompare(rbx, Smi::FromInt(2));
4692  __ j(not_equal, &longer_than_two);
4693 
4694  // Check that both strings are non-external ASCII strings.
4695  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
4696  &call_runtime);
4697 
4698  // Get the two characters forming the sub string.
4701 
4702  // Try to lookup two character string in symbol table. If it is not found
4703  // just allocate a new one.
4704  Label make_two_character_string, make_flat_ascii_string;
4706  masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
4707  __ IncrementCounter(counters->string_add_native(), 1);
4708  __ ret(2 * kPointerSize);
4709 
4710  __ bind(&make_two_character_string);
4711  __ Set(rdi, 2);
4712  __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
4713  // rbx - first byte: first character
4714  // rbx - second byte: *maybe* second character
4715  // Make sure that the second byte of rbx contains the second character.
4717  __ shll(rcx, Immediate(kBitsPerByte));
4718  __ orl(rbx, rcx);
4719  // Write both characters to the new string.
4721  __ IncrementCounter(counters->string_add_native(), 1);
4722  __ ret(2 * kPointerSize);
4723 
4724  __ bind(&longer_than_two);
4725  // Check if resulting string will be flat.
4726  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
4727  __ j(below, &string_add_flat_result);
4728  // Handle exceptionally long strings in the runtime system.
4729  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4730  __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
4731  __ j(above, &call_runtime);
4732 
4733  // If result is not supposed to be flat, allocate a cons string object. If
4734  // both strings are ASCII the result is an ASCII cons string.
4735  // rax: first string
4736  // rbx: length of resulting flat string
4737  // rdx: second string
4738  // r8: instance type of first string
4739  // r9: instance type of second string
4740  Label non_ascii, allocated, ascii_data;
4741  __ movl(rcx, r8);
4742  __ and_(rcx, r9);
4745  __ testl(rcx, Immediate(kStringEncodingMask));
4746  __ j(zero, &non_ascii);
4747  __ bind(&ascii_data);
4748  // Allocate an ASCII cons string.
4749  __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
4750  __ bind(&allocated);
4751  // Fill the fields of the cons string.
4754  Immediate(String::kEmptyHashField));
4757  __ movq(rax, rcx);
4758  __ IncrementCounter(counters->string_add_native(), 1);
4759  __ ret(2 * kPointerSize);
4760  __ bind(&non_ascii);
4761  // At least one of the strings is two-byte. Check whether it happens
4762  // to contain only ASCII characters.
4763  // rcx: first instance type AND second instance type.
4764  // r8: first instance type.
4765  // r9: second instance type.
4766  __ testb(rcx, Immediate(kAsciiDataHintMask));
4767  __ j(not_zero, &ascii_data);
4768  __ xor_(r8, r9);
4770  __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4771  __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4772  __ j(equal, &ascii_data);
4773  // Allocate a two byte cons string.
4774  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
4775  __ jmp(&allocated);
4776 
4777  // We cannot encounter sliced strings or cons strings here since:
4779  // Handle creating a flat result from either external or sequential strings.
4780  // Locate the first characters' locations.
4781  // rax: first string
4782  // rbx: length of resulting flat string as smi
4783  // rdx: second string
4784  // r8: instance type of first string
4785  // r9: instance type of first string
4786  Label first_prepared, second_prepared;
4787  Label first_is_sequential, second_is_sequential;
4788  __ bind(&string_add_flat_result);
4789 
4790  __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
4791  // r14: length of first string
4793  __ testb(r8, Immediate(kStringRepresentationMask));
4794  __ j(zero, &first_is_sequential, Label::kNear);
4795  // Rule out short external string and load string resource.
4797  __ testb(r8, Immediate(kShortExternalStringMask));
4798  __ j(not_zero, &call_runtime);
4800  __ jmp(&first_prepared, Label::kNear);
4801  __ bind(&first_is_sequential);
4804  __ bind(&first_prepared);
4805 
4806  // Check whether both strings have same encoding.
4807  __ xorl(r8, r9);
4808  __ testb(r8, Immediate(kStringEncodingMask));
4809  __ j(not_zero, &call_runtime);
4810 
4811  __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
4812  // r15: length of second string
4814  __ testb(r9, Immediate(kStringRepresentationMask));
4815  __ j(zero, &second_is_sequential, Label::kNear);
4816  // Rule out short external string and load string resource.
4818  __ testb(r9, Immediate(kShortExternalStringMask));
4819  __ j(not_zero, &call_runtime);
4821  __ jmp(&second_prepared, Label::kNear);
4822  __ bind(&second_is_sequential);
4825  __ bind(&second_prepared);
4826 
4827  Label non_ascii_string_add_flat_result;
4828  // r9: instance type of second string
4829  // First string and second string have the same encoding.
4831  __ SmiToInteger32(rbx, rbx);
4832  __ testb(r9, Immediate(kStringEncodingMask));
4833  __ j(zero, &non_ascii_string_add_flat_result);
4834 
4835  __ bind(&make_flat_ascii_string);
4836  // Both strings are ASCII strings. As they are short they are both flat.
4837  __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
4838  // rax: result string
4839  // Locate first character of result.
4841  // rcx: first char of first string
4842  // rbx: first character of result
4843  // r14: length of first string
4845  // rbx: next character of result
4846  // rdx: first char of second string
4847  // r15: length of second string
4849  __ IncrementCounter(counters->string_add_native(), 1);
4850  __ ret(2 * kPointerSize);
4851 
4852  __ bind(&non_ascii_string_add_flat_result);
4853  // Both strings are ASCII strings. As they are short they are both flat.
4854  __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
4855  // rax: result string
4856  // Locate first character of result.
4858  // rcx: first char of first string
4859  // rbx: first character of result
4860  // r14: length of first string
4862  // rbx: next character of result
4863  // rdx: first char of second string
4864  // r15: length of second string
4866  __ IncrementCounter(counters->string_add_native(), 1);
4867  __ ret(2 * kPointerSize);
4868 
4869  // Just jump to runtime to add the two strings.
4870  __ bind(&call_runtime);
4871  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4872 
4873  if (call_builtin.is_linked()) {
4874  __ bind(&call_builtin);
4875  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4876  }
4877 }
4878 
4879 
4880 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4881  int stack_offset,
4882  Register arg,
4883  Register scratch1,
4884  Register scratch2,
4885  Register scratch3,
4886  Label* slow) {
4887  // First check if the argument is already a string.
4888  Label not_string, done;
4889  __ JumpIfSmi(arg, &not_string);
4890  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
4891  __ j(below, &done);
4892 
4893  // Check the number to string cache.
4894  Label not_cached;
4895  __ bind(&not_string);
4896  // Puts the cached result into scratch1.
4898  arg,
4899  scratch1,
4900  scratch2,
4901  scratch3,
4902  false,
4903  &not_cached);
4904  __ movq(arg, scratch1);
4905  __ movq(Operand(rsp, stack_offset), arg);
4906  __ jmp(&done);
4907 
4908  // Check if the argument is a safe string wrapper.
4909  __ bind(&not_cached);
4910  __ JumpIfSmi(arg, slow);
4911  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
4912  __ j(not_equal, slow);
4913  __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
4915  __ j(zero, slow);
4916  __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
4917  __ movq(Operand(rsp, stack_offset), arg);
4918 
4919  __ bind(&done);
4920 }
4921 
4922 
4923 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
4924  Register dest,
4925  Register src,
4926  Register count,
4927  bool ascii) {
4928  Label loop;
4929  __ bind(&loop);
4930  // This loop just copies one character at a time, as it is only used for very
4931  // short strings.
4932  if (ascii) {
4933  __ movb(kScratchRegister, Operand(src, 0));
4934  __ movb(Operand(dest, 0), kScratchRegister);
4935  __ incq(src);
4936  __ incq(dest);
4937  } else {
4938  __ movzxwl(kScratchRegister, Operand(src, 0));
4939  __ movw(Operand(dest, 0), kScratchRegister);
4940  __ addq(src, Immediate(2));
4941  __ addq(dest, Immediate(2));
4942  }
4943  __ decl(count);
4944  __ j(not_zero, &loop);
4945 }
4946 
4947 
4948 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
4949  Register dest,
4950  Register src,
4951  Register count,
4952  bool ascii) {
4953  // Copy characters using rep movs of doublewords. Align destination on 4 byte
4954  // boundary before starting rep movs. Copy remaining characters after running
4955  // rep movs.
4956  // Count is positive int32, dest and src are character pointers.
4957  ASSERT(dest.is(rdi)); // rep movs destination
4958  ASSERT(src.is(rsi)); // rep movs source
4959  ASSERT(count.is(rcx)); // rep movs count
4960 
4961  // Nothing to do for zero characters.
4962  Label done;
4963  __ testl(count, count);
4964  __ j(zero, &done, Label::kNear);
4965 
4966  // Make count the number of bytes to copy.
4967  if (!ascii) {
4968  STATIC_ASSERT(2 == sizeof(uc16));
4969  __ addl(count, count);
4970  }
4971 
4972  // Don't enter the rep movs if there are less than 4 bytes to copy.
4973  Label last_bytes;
4974  __ testl(count, Immediate(~7));
4975  __ j(zero, &last_bytes, Label::kNear);
4976 
4977  // Copy from edi to esi using rep movs instruction.
4978  __ movl(kScratchRegister, count);
4979  __ shr(count, Immediate(3)); // Number of doublewords to copy.
4980  __ repmovsq();
4981 
4982  // Find number of bytes left.
4983  __ movl(count, kScratchRegister);
4984  __ and_(count, Immediate(7));
4985 
4986  // Check if there are more bytes to copy.
4987  __ bind(&last_bytes);
4988  __ testl(count, count);
4989  __ j(zero, &done, Label::kNear);
4990 
4991  // Copy remaining characters.
4992  Label loop;
4993  __ bind(&loop);
4994  __ movb(kScratchRegister, Operand(src, 0));
4995  __ movb(Operand(dest, 0), kScratchRegister);
4996  __ incq(src);
4997  __ incq(dest);
4998  __ decl(count);
4999  __ j(not_zero, &loop);
5000 
5001  __ bind(&done);
5002 }
5003 
5004 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5005  Register c1,
5006  Register c2,
5007  Register scratch1,
5008  Register scratch2,
5009  Register scratch3,
5010  Register scratch4,
5011  Label* not_found) {
5012  // Register scratch3 is the general scratch register in this function.
5013  Register scratch = scratch3;
5014 
5015  // Make sure that both characters are not digits as such strings has a
5016  // different hash algorithm. Don't try to look for these in the symbol table.
5017  Label not_array_index;
5018  __ leal(scratch, Operand(c1, -'0'));
5019  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
5020  __ j(above, &not_array_index, Label::kNear);
5021  __ leal(scratch, Operand(c2, -'0'));
5022  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
5023  __ j(below_equal, not_found);
5024 
5025  __ bind(&not_array_index);
5026  // Calculate the two character string hash.
5027  Register hash = scratch1;
5028  GenerateHashInit(masm, hash, c1, scratch);
5029  GenerateHashAddCharacter(masm, hash, c2, scratch);
5030  GenerateHashGetHash(masm, hash, scratch);
5031 
5032  // Collect the two characters in a register.
5033  Register chars = c1;
5034  __ shl(c2, Immediate(kBitsPerByte));
5035  __ orl(chars, c2);
5036 
5037  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5038  // hash: hash of two character string.
5039 
5040  // Load the symbol table.
5041  Register symbol_table = c2;
5042  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5043 
5044  // Calculate capacity mask from the symbol table capacity.
5045  Register mask = scratch2;
5046  __ SmiToInteger32(mask,
5048  __ decl(mask);
5049 
5050  Register map = scratch4;
5051 
5052  // Registers
5053  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5054  // hash: hash of two character string (32-bit int)
5055  // symbol_table: symbol table
5056  // mask: capacity mask (32-bit int)
5057  // map: -
5058  // scratch: -
5059 
5060  // Perform a number of probes in the symbol table.
5061  static const int kProbes = 4;
5062  Label found_in_symbol_table;
5063  Label next_probe[kProbes];
5064  Register candidate = scratch; // Scratch register contains candidate.
5065  for (int i = 0; i < kProbes; i++) {
5066  // Calculate entry in symbol table.
5067  __ movl(scratch, hash);
5068  if (i > 0) {
5069  __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
5070  }
5071  __ andl(scratch, mask);
5072 
5073  // Load the entry from the symbol table.
5075  __ movq(candidate,
5076  FieldOperand(symbol_table,
5077  scratch,
5080 
5081  // If entry is undefined no string with this hash can be found.
5082  Label is_string;
5083  __ CmpObjectType(candidate, ODDBALL_TYPE, map);
5084  __ j(not_equal, &is_string, Label::kNear);
5085 
5086  __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
5087  __ j(equal, not_found);
5088  // Must be the hole (deleted entry).
5089  if (FLAG_debug_code) {
5090  __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
5091  __ cmpq(kScratchRegister, candidate);
5092  __ Assert(equal, "oddball in symbol table is not undefined or the hole");
5093  }
5094  __ jmp(&next_probe[i]);
5095 
5096  __ bind(&is_string);
5097 
5098  // If length is not 2 the string is not a candidate.
5099  __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
5100  Smi::FromInt(2));
5101  __ j(not_equal, &next_probe[i]);
5102 
5103  // We use kScratchRegister as a temporary register in assumption that
5104  // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
5105  Register temp = kScratchRegister;
5106 
5107  // Check that the candidate is a non-external ASCII string.
5108  __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
5109  __ JumpIfInstanceTypeIsNotSequentialAscii(
5110  temp, temp, &next_probe[i]);
5111 
5112  // Check if the two characters match.
5113  __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5114  __ andl(temp, Immediate(0x0000ffff));
5115  __ cmpl(chars, temp);
5116  __ j(equal, &found_in_symbol_table);
5117  __ bind(&next_probe[i]);
5118  }
5119 
5120  // No matching 2 character string found by probing.
5121  __ jmp(not_found);
5122 
5123  // Scratch register contains result when we fall through to here.
5124  Register result = candidate;
5125  __ bind(&found_in_symbol_table);
5126  if (!result.is(rax)) {
5127  __ movq(rax, result);
5128  }
5129 }
5130 
5131 
5132 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5133  Register hash,
5134  Register character,
5135  Register scratch) {
5136  // hash = (seed + character) + ((seed + character) << 10);
5137  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
5138  __ SmiToInteger32(scratch, scratch);
5139  __ addl(scratch, character);
5140  __ movl(hash, scratch);
5141  __ shll(scratch, Immediate(10));
5142  __ addl(hash, scratch);
5143  // hash ^= hash >> 6;
5144  __ movl(scratch, hash);
5145  __ shrl(scratch, Immediate(6));
5146  __ xorl(hash, scratch);
5147 }
5148 
5149 
5150 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5151  Register hash,
5152  Register character,
5153  Register scratch) {
5154  // hash += character;
5155  __ addl(hash, character);
5156  // hash += hash << 10;
5157  __ movl(scratch, hash);
5158  __ shll(scratch, Immediate(10));
5159  __ addl(hash, scratch);
5160  // hash ^= hash >> 6;
5161  __ movl(scratch, hash);
5162  __ shrl(scratch, Immediate(6));
5163  __ xorl(hash, scratch);
5164 }
5165 
5166 
5167 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5168  Register hash,
5169  Register scratch) {
5170  // hash += hash << 3;
5171  __ leal(hash, Operand(hash, hash, times_8, 0));
5172  // hash ^= hash >> 11;
5173  __ movl(scratch, hash);
5174  __ shrl(scratch, Immediate(11));
5175  __ xorl(hash, scratch);
5176  // hash += hash << 15;
5177  __ movl(scratch, hash);
5178  __ shll(scratch, Immediate(15));
5179  __ addl(hash, scratch);
5180 
5181  __ andl(hash, Immediate(String::kHashBitMask));
5182 
5183  // if (hash == 0) hash = 27;
5184  Label hash_not_zero;
5185  __ j(not_zero, &hash_not_zero);
5186  __ Set(hash, StringHasher::kZeroHash);
5187  __ bind(&hash_not_zero);
5188 }
5189 
5190 void SubStringStub::Generate(MacroAssembler* masm) {
5191  Label runtime;
5192 
5193  // Stack frame on entry.
5194  // rsp[0]: return address
5195  // rsp[8]: to
5196  // rsp[16]: from
5197  // rsp[24]: string
5198 
5199  const int kToOffset = 1 * kPointerSize;
5200  const int kFromOffset = kToOffset + kPointerSize;
5201  const int kStringOffset = kFromOffset + kPointerSize;
5202  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
5203 
5204  // Make sure first argument is a string.
5205  __ movq(rax, Operand(rsp, kStringOffset));
5206  STATIC_ASSERT(kSmiTag == 0);
5207  __ testl(rax, Immediate(kSmiTagMask));
5208  __ j(zero, &runtime);
5209  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
5210  __ j(NegateCondition(is_string), &runtime);
5211 
5212  // rax: string
5213  // rbx: instance type
5214  // Calculate length of sub string using the smi values.
5215  __ movq(rcx, Operand(rsp, kToOffset));
5216  __ movq(rdx, Operand(rsp, kFromOffset));
5217  __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
5218 
5219  __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
5221  Label not_original_string;
5222  // Shorter than original string's length: an actual substring.
5223  __ j(below, &not_original_string, Label::kNear);
5224  // Longer than original string's length or negative: unsafe arguments.
5225  __ j(above, &runtime);
5226  // Return original string.
5227  Counters* counters = masm->isolate()->counters();
5228  __ IncrementCounter(counters->sub_string_native(), 1);
5229  __ ret(kArgumentsSize);
5230  __ bind(&not_original_string);
5231  __ SmiToInteger32(rcx, rcx);
5232 
5233  // rax: string
5234  // rbx: instance type
5235  // rcx: sub string length
5236  // rdx: from index (smi)
5237  // Deal with different string types: update the index if necessary
5238  // and put the underlying string into edi.
5239  Label underlying_unpacked, sliced_string, seq_or_external_string;
5240  // If the string is not indirect, it can only be sequential or external.
5243  __ testb(rbx, Immediate(kIsIndirectStringMask));
5244  __ j(zero, &seq_or_external_string, Label::kNear);
5245 
5246  __ testb(rbx, Immediate(kSlicedNotConsMask));
5247  __ j(not_zero, &sliced_string, Label::kNear);
5248  // Cons string. Check whether it is flat, then fetch first part.
5249  // Flat cons strings have an empty second part.
5251  Heap::kEmptyStringRootIndex);
5252  __ j(not_equal, &runtime);
5254  // Update instance type.
5257  __ jmp(&underlying_unpacked, Label::kNear);
5258 
5259  __ bind(&sliced_string);
5260  // Sliced string. Fetch parent and correct start index by offset.
5263  // Update instance type.
5266  __ jmp(&underlying_unpacked, Label::kNear);
5267 
5268  __ bind(&seq_or_external_string);
5269  // Sequential or external string. Just move string to the correct register.
5270  __ movq(rdi, rax);
5271 
5272  __ bind(&underlying_unpacked);
5273 
5274  if (FLAG_string_slices) {
5275  Label copy_routine;
5276  // rdi: underlying subject string
5277  // rbx: instance type of underlying subject string
5278  // rdx: adjusted start index (smi)
5279  // rcx: length
5280  // If coming from the make_two_character_string path, the string
5281  // is too short to be sliced anyways.
5282  __ cmpq(rcx, Immediate(SlicedString::kMinLength));
5283  // Short slice. Copy instead of slicing.
5284  __ j(less, &copy_routine);
5285  // Allocate new sliced string. At this point we do not reload the instance
5286  // type including the string encoding because we simply rely on the info
5287  // provided by the original string. It does not matter if the original
5288  // string's encoding is wrong because we always have to recheck encoding of
5289  // the newly created string's parent anyways due to externalized strings.
5290  Label two_byte_slice, set_slice_header;
5293  __ testb(rbx, Immediate(kStringEncodingMask));
5294  __ j(zero, &two_byte_slice, Label::kNear);
5295  __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
5296  __ jmp(&set_slice_header, Label::kNear);
5297  __ bind(&two_byte_slice);
5298  __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
5299  __ bind(&set_slice_header);
5300  __ Integer32ToSmi(rcx, rcx);
5303  Immediate(String::kEmptyHashField));
5306  __ IncrementCounter(counters->sub_string_native(), 1);
5307  __ ret(kArgumentsSize);
5308 
5309  __ bind(&copy_routine);
5310  }
5311 
5312  // rdi: underlying subject string
5313  // rbx: instance type of underlying subject string
5314  // rdx: adjusted start index (smi)
5315  // rcx: length
5316  // The subject string can only be external or sequential string of either
5317  // encoding at this point.
5318  Label two_byte_sequential, sequential_string;
5321  __ testb(rbx, Immediate(kExternalStringTag));
5322  __ j(zero, &sequential_string);
5323 
5324  // Handle external string.
5325  // Rule out short external strings.
5327  __ testb(rbx, Immediate(kShortExternalStringMask));
5328  __ j(not_zero, &runtime);
5330  // Move the pointer so that offset-wise, it looks like a sequential string.
5332  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5333 
5334  __ bind(&sequential_string);
5336  __ testb(rbx, Immediate(kStringEncodingMask));
5337  __ j(zero, &two_byte_sequential);
5338 
5339  // Allocate the result.
5340  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
5341 
5342  // rax: result string
5343  // rcx: result string length
5344  __ movq(r14, rsi); // esi used by following code.
5345  { // Locate character of sub string start.
5346  SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
5347  __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5349  }
5350  // Locate first character of result.
5352 
5353  // rax: result string
5354  // rcx: result length
5355  // rdi: first character of result
5356  // rsi: character of sub string start
5357  // r14: original value of rsi
5359  __ movq(rsi, r14); // Restore rsi.
5360  __ IncrementCounter(counters->sub_string_native(), 1);
5361  __ ret(kArgumentsSize);
5362 
5363  __ bind(&two_byte_sequential);
5364  // Allocate the result.
5365  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
5366 
5367  // rax: result string
5368  // rcx: result string length
5369  __ movq(r14, rsi); // esi used by following code.
5370  { // Locate character of sub string start.
5371  SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
5372  __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5374  }
5375  // Locate first character of result.
5377 
5378  // rax: result string
5379  // rcx: result length
5380  // rdi: first character of result
5381  // rsi: character of sub string start
5382  // r14: original value of rsi
5384  __ movq(rsi, r14); // Restore esi.
5385  __ IncrementCounter(counters->sub_string_native(), 1);
5386  __ ret(kArgumentsSize);
5387 
5388  // Just jump to runtime to create the sub string.
5389  __ bind(&runtime);
5390  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5391 }
5392 
5393 
5394 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5395  Register left,
5396  Register right,
5397  Register scratch1,
5398  Register scratch2) {
5399  Register length = scratch1;
5400 
5401  // Compare lengths.
5402  Label check_zero_length;
5403  __ movq(length, FieldOperand(left, String::kLengthOffset));
5404  __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
5405  __ j(equal, &check_zero_length, Label::kNear);
5406  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5407  __ ret(0);
5408 
5409  // Check if the length is zero.
5410  Label compare_chars;
5411  __ bind(&check_zero_length);
5412  STATIC_ASSERT(kSmiTag == 0);
5413  __ SmiTest(length);
5414  __ j(not_zero, &compare_chars, Label::kNear);
5415  __ Move(rax, Smi::FromInt(EQUAL));
5416  __ ret(0);
5417 
5418  // Compare characters.
5419  __ bind(&compare_chars);
5420  Label strings_not_equal;
5421  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5422  &strings_not_equal, Label::kNear);
5423 
5424  // Characters are equal.
5425  __ Move(rax, Smi::FromInt(EQUAL));
5426  __ ret(0);
5427 
5428  // Characters are not equal.
5429  __ bind(&strings_not_equal);
5430  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5431  __ ret(0);
5432 }
5433 
5434 
5435 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5436  Register left,
5437  Register right,
5438  Register scratch1,
5439  Register scratch2,
5440  Register scratch3,
5441  Register scratch4) {
5442  // Ensure that you can always subtract a string length from a non-negative
5443  // number (e.g. another length).
5444  STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
5445 
5446  // Find minimum length and length difference.
5447  __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
5448  __ movq(scratch4, scratch1);
5449  __ SmiSub(scratch4,
5450  scratch4,
5452  // Register scratch4 now holds left.length - right.length.
5453  const Register length_difference = scratch4;
5454  Label left_shorter;
5455  __ j(less, &left_shorter, Label::kNear);
5456  // The right string isn't longer that the left one.
5457  // Get the right string's length by subtracting the (non-negative) difference
5458  // from the left string's length.
5459  __ SmiSub(scratch1, scratch1, length_difference);
5460  __ bind(&left_shorter);
5461  // Register scratch1 now holds Min(left.length, right.length).
5462  const Register min_length = scratch1;
5463 
5464  Label compare_lengths;
5465  // If min-length is zero, go directly to comparing lengths.
5466  __ SmiTest(min_length);
5467  __ j(zero, &compare_lengths, Label::kNear);
5468 
5469  // Compare loop.
5470  Label result_not_equal;
5471  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5472  &result_not_equal, Label::kNear);
5473 
5474  // Completed loop without finding different characters.
5475  // Compare lengths (precomputed).
5476  __ bind(&compare_lengths);
5477  __ SmiTest(length_difference);
5478  __ j(not_zero, &result_not_equal, Label::kNear);
5479 
5480  // Result is EQUAL.
5481  __ Move(rax, Smi::FromInt(EQUAL));
5482  __ ret(0);
5483 
5484  Label result_greater;
5485  __ bind(&result_not_equal);
5486  // Unequal comparison of left to right, either character or length.
5487  __ j(greater, &result_greater, Label::kNear);
5488 
5489  // Result is LESS.
5490  __ Move(rax, Smi::FromInt(LESS));
5491  __ ret(0);
5492 
5493  // Result is GREATER.
5494  __ bind(&result_greater);
5495  __ Move(rax, Smi::FromInt(GREATER));
5496  __ ret(0);
5497 }
5498 
5499 
5500 void StringCompareStub::GenerateAsciiCharsCompareLoop(
5501  MacroAssembler* masm,
5502  Register left,
5503  Register right,
5504  Register length,
5505  Register scratch,
5506  Label* chars_not_equal,
5507  Label::Distance near_jump) {
5508  // Change index to run from -length to -1 by adding length to string
5509  // start. This means that loop ends when index reaches zero, which
5510  // doesn't need an additional compare.
5511  __ SmiToInteger32(length, length);
5512  __ lea(left,
5514  __ lea(right,
5516  __ neg(length);
5517  Register index = length; // index = -length;
5518 
5519  // Compare loop.
5520  Label loop;
5521  __ bind(&loop);
5522  __ movb(scratch, Operand(left, index, times_1, 0));
5523  __ cmpb(scratch, Operand(right, index, times_1, 0));
5524  __ j(not_equal, chars_not_equal, near_jump);
5525  __ incq(index);
5526  __ j(not_zero, &loop);
5527 }
5528 
5529 
5530 void StringCompareStub::Generate(MacroAssembler* masm) {
5531  Label runtime;
5532 
5533  // Stack frame on entry.
5534  // rsp[0]: return address
5535  // rsp[8]: right string
5536  // rsp[16]: left string
5537 
5538  __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
5539  __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
5540 
5541  // Check for identity.
5542  Label not_same;
5543  __ cmpq(rdx, rax);
5544  __ j(not_equal, &not_same, Label::kNear);
5545  __ Move(rax, Smi::FromInt(EQUAL));
5546  Counters* counters = masm->isolate()->counters();
5547  __ IncrementCounter(counters->string_compare_native(), 1);
5548  __ ret(2 * kPointerSize);
5549 
5550  __ bind(&not_same);
5551 
5552  // Check that both are sequential ASCII strings.
5553  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
5554 
5555  // Inline comparison of ASCII strings.
5556  __ IncrementCounter(counters->string_compare_native(), 1);
5557  // Drop arguments from the stack
5558  __ pop(rcx);
5559  __ addq(rsp, Immediate(2 * kPointerSize));
5560  __ push(rcx);
5562 
5563  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5564  // tagged as a small integer.
5565  __ bind(&runtime);
5566  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5567 }
5568 
5569 
5570 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5571  ASSERT(state_ == CompareIC::SMIS);
5572  Label miss;
5573  __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
5574 
5575  if (GetCondition() == equal) {
5576  // For equality we do not care about the sign of the result.
5577  __ subq(rax, rdx);
5578  } else {
5579  Label done;
5580  __ subq(rdx, rax);
5581  __ j(no_overflow, &done, Label::kNear);
5582  // Correct sign of result in case of overflow.
5583  __ SmiNot(rdx, rdx);
5584  __ bind(&done);
5585  __ movq(rax, rdx);
5586  }
5587  __ ret(0);
5588 
5589  __ bind(&miss);
5590  GenerateMiss(masm);
5591 }
5592 
5593 
5594 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
5595  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
5596 
5597  Label generic_stub;
5598  Label unordered, maybe_undefined1, maybe_undefined2;
5599  Label miss;
5600  Condition either_smi = masm->CheckEitherSmi(rax, rdx);
5601  __ j(either_smi, &generic_stub, Label::kNear);
5602 
5603  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
5604  __ j(not_equal, &maybe_undefined1, Label::kNear);
5605  __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5606  __ j(not_equal, &maybe_undefined2, Label::kNear);
5607 
5608  // Load left and right operand
5611 
5612  // Compare operands
5613  __ ucomisd(xmm0, xmm1);
5614 
5615  // Don't base result on EFLAGS when a NaN is involved.
5616  __ j(parity_even, &unordered, Label::kNear);
5617 
5618  // Return a result of -1, 0, or 1, based on EFLAGS.
5619  // Performing mov, because xor would destroy the flag register.
5620  __ movl(rax, Immediate(0));
5621  __ movl(rcx, Immediate(0));
5622  __ setcc(above, rax); // Add one to zero if carry clear and not equal.
5623  __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
5624  __ ret(0);
5625 
5626  __ bind(&unordered);
5627  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
5628  __ bind(&generic_stub);
5629  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
5630 
5631  __ bind(&maybe_undefined1);
5633  __ Cmp(rax, masm->isolate()->factory()->undefined_value());
5634  __ j(not_equal, &miss);
5635  __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5636  __ j(not_equal, &maybe_undefined2, Label::kNear);
5637  __ jmp(&unordered);
5638  }
5639 
5640  __ bind(&maybe_undefined2);
5642  __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
5643  __ j(equal, &unordered);
5644  }
5645 
5646  __ bind(&miss);
5647  GenerateMiss(masm);
5648 }
5649 
5650 
5651 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
5652  ASSERT(state_ == CompareIC::SYMBOLS);
5653  ASSERT(GetCondition() == equal);
5654 
5655  // Registers containing left and right operands respectively.
5656  Register left = rdx;
5657  Register right = rax;
5658  Register tmp1 = rcx;
5659  Register tmp2 = rbx;
5660 
5661  // Check that both operands are heap objects.
5662  Label miss;
5663  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5664  __ j(cond, &miss, Label::kNear);
5665 
5666  // Check that both operands are symbols.
5667  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5668  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5669  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5670  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5671  STATIC_ASSERT(kSymbolTag != 0);
5672  __ and_(tmp1, tmp2);
5673  __ testb(tmp1, Immediate(kIsSymbolMask));
5674  __ j(zero, &miss, Label::kNear);
5675 
5676  // Symbols are compared by identity.
5677  Label done;
5678  __ cmpq(left, right);
5679  // Make sure rax is non-zero. At this point input operands are
5680  // guaranteed to be non-zero.
5681  ASSERT(right.is(rax));
5682  __ j(not_equal, &done, Label::kNear);
5683  STATIC_ASSERT(EQUAL == 0);
5684  STATIC_ASSERT(kSmiTag == 0);
5685  __ Move(rax, Smi::FromInt(EQUAL));
5686  __ bind(&done);
5687  __ ret(0);
5688 
5689  __ bind(&miss);
5690  GenerateMiss(masm);
5691 }
5692 
5693 
5694 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5695  ASSERT(state_ == CompareIC::STRINGS);
5696  Label miss;
5697 
5698  bool equality = Token::IsEqualityOp(op_);
5699 
5700  // Registers containing left and right operands respectively.
5701  Register left = rdx;
5702  Register right = rax;
5703  Register tmp1 = rcx;
5704  Register tmp2 = rbx;
5705  Register tmp3 = rdi;
5706 
5707  // Check that both operands are heap objects.
5708  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5709  __ j(cond, &miss);
5710 
5711  // Check that both operands are strings. This leaves the instance
5712  // types loaded in tmp1 and tmp2.
5713  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5714  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5715  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5716  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5717  __ movq(tmp3, tmp1);
5719  __ or_(tmp3, tmp2);
5720  __ testb(tmp3, Immediate(kIsNotStringMask));
5721  __ j(not_zero, &miss);
5722 
5723  // Fast check for identical strings.
5724  Label not_same;
5725  __ cmpq(left, right);
5726  __ j(not_equal, &not_same, Label::kNear);
5727  STATIC_ASSERT(EQUAL == 0);
5728  STATIC_ASSERT(kSmiTag == 0);
5729  __ Move(rax, Smi::FromInt(EQUAL));
5730  __ ret(0);
5731 
5732  // Handle not identical strings.
5733  __ bind(&not_same);
5734 
5735  // Check that both strings are symbols. If they are, we're done
5736  // because we already know they are not identical.
5737  if (equality) {
5738  Label do_compare;
5739  STATIC_ASSERT(kSymbolTag != 0);
5740  __ and_(tmp1, tmp2);
5741  __ testb(tmp1, Immediate(kIsSymbolMask));
5742  __ j(zero, &do_compare, Label::kNear);
5743  // Make sure rax is non-zero. At this point input operands are
5744  // guaranteed to be non-zero.
5745  ASSERT(right.is(rax));
5746  __ ret(0);
5747  __ bind(&do_compare);
5748  }
5749 
5750  // Check that both strings are sequential ASCII.
5751  Label runtime;
5752  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
5753 
5754  // Compare flat ASCII strings. Returns when done.
5755  if (equality) {
5757  masm, left, right, tmp1, tmp2);
5758  } else {
5760  masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
5761  }
5762 
5763  // Handle more complex cases in runtime.
5764  __ bind(&runtime);
5765  __ pop(tmp1); // Return address.
5766  __ push(left);
5767  __ push(right);
5768  __ push(tmp1);
5769  if (equality) {
5770  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5771  } else {
5772  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5773  }
5774 
5775  __ bind(&miss);
5776  GenerateMiss(masm);
5777 }
5778 
5779 
5780 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5781  ASSERT(state_ == CompareIC::OBJECTS);
5782  Label miss;
5783  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5784  __ j(either_smi, &miss, Label::kNear);
5785 
5786  __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
5787  __ j(not_equal, &miss, Label::kNear);
5788  __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
5789  __ j(not_equal, &miss, Label::kNear);
5790 
5791  ASSERT(GetCondition() == equal);
5792  __ subq(rax, rdx);
5793  __ ret(0);
5794 
5795  __ bind(&miss);
5796  GenerateMiss(masm);
5797 }
5798 
5799 
5800 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5801  Label miss;
5802  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5803  __ j(either_smi, &miss, Label::kNear);
5804 
5807  __ Cmp(rcx, known_map_);
5808  __ j(not_equal, &miss, Label::kNear);
5809  __ Cmp(rbx, known_map_);
5810  __ j(not_equal, &miss, Label::kNear);
5811 
5812  __ subq(rax, rdx);
5813  __ ret(0);
5814 
5815  __ bind(&miss);
5816  GenerateMiss(masm);
5817 }
5818 
5819 
5820 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5821  {
5822  // Call the runtime system in a fresh internal frame.
5823  ExternalReference miss =
5824  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5825 
5826  FrameScope scope(masm, StackFrame::INTERNAL);
5827  __ push(rdx);
5828  __ push(rax);
5829  __ push(rdx);
5830  __ push(rax);
5831  __ Push(Smi::FromInt(op_));
5832  __ CallExternalReference(miss, 3);
5833 
5834  // Compute the entry point of the rewritten stub.
5836  __ pop(rax);
5837  __ pop(rdx);
5838  }
5839 
5840  // Do a tail call to the rewritten stub.
5841  __ jmp(rdi);
5842 }
5843 
5844 
5845 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5846  Label* miss,
5847  Label* done,
5848  Register properties,
5849  Handle<String> name,
5850  Register r0) {
5851  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5852  // not equal to the name and kProbes-th slot is not used (its name is the
5853  // undefined value), it guarantees the hash table doesn't contain the
5854  // property. It's true even if some slots represent deleted properties
5855  // (their names are the hole value).
5856  for (int i = 0; i < kInlinedProbes; i++) {
5857  // r0 points to properties hash.
5858  // Compute the masked index: (hash + i + i * i) & mask.
5859  Register index = r0;
5860  // Capacity is smi 2^n.
5861  __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
5862  __ decl(index);
5863  __ and_(index,
5864  Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
5865 
5866  // Scale the index by multiplying by the entry size.
5868  __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
5869 
5870  Register entity_name = r0;
5871  // Having undefined at this place means the name is not contained.
5872  ASSERT_EQ(kSmiTagSize, 1);
5873  __ movq(entity_name, Operand(properties,
5874  index,
5876  kElementsStartOffset - kHeapObjectTag));
5877  __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
5878  __ j(equal, done);
5879 
5880  // Stop if found the property.
5881  __ Cmp(entity_name, Handle<String>(name));
5882  __ j(equal, miss);
5883 
5884  Label the_hole;
5885  // Check for the hole and skip.
5886  __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
5887  __ j(equal, &the_hole, Label::kNear);
5888 
5889  // Check if the entry name is not a symbol.
5890  __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
5891  __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
5892  Immediate(kIsSymbolMask));
5893  __ j(zero, miss);
5894 
5895  __ bind(&the_hole);
5896  }
5897 
5898  StringDictionaryLookupStub stub(properties,
5899  r0,
5900  r0,
5902  __ Push(Handle<Object>(name));
5903  __ push(Immediate(name->Hash()));
5904  __ CallStub(&stub);
5905  __ testq(r0, r0);
5906  __ j(not_zero, miss);
5907  __ jmp(done);
5908 }
5909 
5910 
5911 // Probe the string dictionary in the |elements| register. Jump to the
5912 // |done| label if a property with the given name is found leaving the
5913 // index into the dictionary in |r1|. Jump to the |miss| label
5914 // otherwise.
5915 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5916  Label* miss,
5917  Label* done,
5918  Register elements,
5919  Register name,
5920  Register r0,
5921  Register r1) {
5922  ASSERT(!elements.is(r0));
5923  ASSERT(!elements.is(r1));
5924  ASSERT(!name.is(r0));
5925  ASSERT(!name.is(r1));
5926 
5927  __ AssertString(name);
5928 
5929  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
5930  __ decl(r0);
5931 
5932  for (int i = 0; i < kInlinedProbes; i++) {
5933  // Compute the masked index: (hash + i + i * i) & mask.
5934  __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
5935  __ shrl(r1, Immediate(String::kHashShift));
5936  if (i > 0) {
5937  __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
5938  }
5939  __ and_(r1, r0);
5940 
5941  // Scale the index by multiplying by the entry size.
5943  __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
5944 
5945  // Check if the key is identical to the name.
5946  __ cmpq(name, Operand(elements, r1, times_pointer_size,
5947  kElementsStartOffset - kHeapObjectTag));
5948  __ j(equal, done);
5949  }
5950 
5951  StringDictionaryLookupStub stub(elements,
5952  r0,
5953  r1,
5954  POSITIVE_LOOKUP);
5955  __ push(name);
5956  __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
5957  __ shrl(r0, Immediate(String::kHashShift));
5958  __ push(r0);
5959  __ CallStub(&stub);
5960 
5961  __ testq(r0, r0);
5962  __ j(zero, miss);
5963  __ jmp(done);
5964 }
5965 
5966 
5967 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
5968  // This stub overrides SometimesSetsUpAFrame() to return false. That means
5969  // we cannot call anything that could cause a GC from this stub.
5970  // Stack frame on entry:
5971  // esp[0 * kPointerSize]: return address.
5972  // esp[1 * kPointerSize]: key's hash.
5973  // esp[2 * kPointerSize]: key.
5974  // Registers:
5975  // dictionary_: StringDictionary to probe.
5976  // result_: used as scratch.
5977  // index_: will hold an index of entry if lookup is successful.
5978  // might alias with result_.
5979  // Returns:
5980  // result_ is zero if lookup failed, non zero otherwise.
5981 
5982  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5983 
5984  Register scratch = result_;
5985 
5986  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
5987  __ decl(scratch);
5988  __ push(scratch);
5989 
5990  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5991  // not equal to the name and kProbes-th slot is not used (its name is the
5992  // undefined value), it guarantees the hash table doesn't contain the
5993  // property. It's true even if some slots represent deleted properties
5994  // (their names are the null value).
5995  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5996  // Compute the masked index: (hash + i + i * i) & mask.
5997  __ movq(scratch, Operand(rsp, 2 * kPointerSize));
5998  if (i > 0) {
5999  __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
6000  }
6001  __ and_(scratch, Operand(rsp, 0));
6002 
6003  // Scale the index by multiplying by the entry size.
6005  __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
6006 
6007  // Having undefined at this place means the name is not contained.
6008  __ movq(scratch, Operand(dictionary_,
6009  index_,
6011  kElementsStartOffset - kHeapObjectTag));
6012 
6013  __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
6014  __ j(equal, &not_in_dictionary);
6015 
6016  // Stop if found the property.
6017  __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
6018  __ j(equal, &in_dictionary);
6019 
6020  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6021  // If we hit a non symbol key during negative lookup
6022  // we have to bailout as this key might be equal to the
6023  // key we are looking for.
6024 
6025  // Check if the entry name is not a symbol.
6026  __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
6027  __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
6028  Immediate(kIsSymbolMask));
6029  __ j(zero, &maybe_in_dictionary);
6030  }
6031  }
6032 
6033  __ bind(&maybe_in_dictionary);
6034  // If we are doing negative lookup then probing failure should be
6035  // treated as a lookup success. For positive lookup probing failure
6036  // should be treated as lookup failure.
6037  if (mode_ == POSITIVE_LOOKUP) {
6038  __ movq(scratch, Immediate(0));
6039  __ Drop(1);
6040  __ ret(2 * kPointerSize);
6041  }
6042 
6043  __ bind(&in_dictionary);
6044  __ movq(scratch, Immediate(1));
6045  __ Drop(1);
6046  __ ret(2 * kPointerSize);
6047 
6048  __ bind(&not_in_dictionary);
6049  __ movq(scratch, Immediate(0));
6050  __ Drop(1);
6051  __ ret(2 * kPointerSize);
6052 }
6053 
6054 
6055 struct AheadOfTimeWriteBarrierStubList {
6056  Register object, value, address;
6057  RememberedSetAction action;
6058 };
6059 
6060 
6061 #define REG(Name) { kRegister_ ## Name ## _Code }
6062 
6063 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6064  // Used in RegExpExecStub.
6065  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
6066  // Used in CompileArrayPushCall.
6067  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
6068  // Used in CompileStoreGlobal.
6069  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
6070  // Used in StoreStubCompiler::CompileStoreField and
6071  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
6072  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
6073  // GenerateStoreField calls the stub with two different permutations of
6074  // registers. This is the second.
6075  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
6076  // StoreIC::GenerateNormal via GenerateDictionaryStore.
6077  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
6078  // KeyedStoreIC::GenerateGeneric.
6079  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
6080  // KeyedStoreStubCompiler::GenerateStoreFastElement.
6081  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
6082  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
6083  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
6084  // and ElementsTransitionGenerator::GenerateSmiToDouble
6085  // and ElementsTransitionGenerator::GenerateDoubleToObject
6086  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
6087  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
6088  // ElementsTransitionGenerator::GenerateSmiToDouble
6089  // and ElementsTransitionGenerator::GenerateDoubleToObject
6090  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
6091  // ElementsTransitionGenerator::GenerateDoubleToObject
6092  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
6093  // StoreArrayLiteralElementStub::Generate
6094  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
6095  // FastNewClosureStub::Generate
6096  { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
6097  // Null termination.
6098  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6099 };
6100 
6101 #undef REG
6102 
6104  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6105  !entry->object.is(no_reg);
6106  entry++) {
6107  if (object_.is(entry->object) &&
6108  value_.is(entry->value) &&
6109  address_.is(entry->address) &&
6110  remembered_set_action_ == entry->action &&
6111  save_fp_regs_mode_ == kDontSaveFPRegs) {
6112  return true;
6113  }
6114  }
6115  return false;
6116 }
6117 
6118 
6121  stub1.GetCode()->set_is_pregenerated(true);
6123  stub2.GetCode()->set_is_pregenerated(true);
6124 }
6125 
6126 
6128  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6129  !entry->object.is(no_reg);
6130  entry++) {
6131  RecordWriteStub stub(entry->object,
6132  entry->value,
6133  entry->address,
6134  entry->action,
6135  kDontSaveFPRegs);
6136  stub.GetCode()->set_is_pregenerated(true);
6137  }
6138 }
6139 
6140 
6141 bool CodeStub::CanUseFPRegisters() {
6142  return true; // Always have SSE2 on x64.
6143 }
6144 
6145 
6146 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
6147 // the value has just been written into the object, now this stub makes sure
6148 // we keep the GC informed. The word in the object where the value has been
6149 // written is in the address register.
6150 void RecordWriteStub::Generate(MacroAssembler* masm) {
6151  Label skip_to_incremental_noncompacting;
6152  Label skip_to_incremental_compacting;
6153 
6154  // The first two instructions are generated with labels so as to get the
6155  // offset fixed up correctly by the bind(Label*) call. We patch it back and
6156  // forth between a compare instructions (a nop in this position) and the
6157  // real branch when we start and stop incremental heap marking.
6158  // See RecordWriteStub::Patch for details.
6159  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
6160  __ jmp(&skip_to_incremental_compacting, Label::kFar);
6161 
6162  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6163  __ RememberedSetHelper(object_,
6164  address_,
6165  value_,
6166  save_fp_regs_mode_,
6168  } else {
6169  __ ret(0);
6170  }
6171 
6172  __ bind(&skip_to_incremental_noncompacting);
6173  GenerateIncremental(masm, INCREMENTAL);
6174 
6175  __ bind(&skip_to_incremental_compacting);
6176  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
6177 
6178  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
6179  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
6180  masm->set_byte_at(0, kTwoByteNopInstruction);
6181  masm->set_byte_at(2, kFiveByteNopInstruction);
6182 }
6183 
6184 
6185 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
6186  regs_.Save(masm);
6187 
6188  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6189  Label dont_need_remembered_set;
6190 
6191  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6192  __ JumpIfNotInNewSpace(regs_.scratch0(),
6193  regs_.scratch0(),
6194  &dont_need_remembered_set);
6195 
6196  __ CheckPageFlag(regs_.object(),
6197  regs_.scratch0(),
6199  not_zero,
6200  &dont_need_remembered_set);
6201 
6202  // First notify the incremental marker if necessary, then update the
6203  // remembered set.
6204  CheckNeedsToInformIncrementalMarker(
6205  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
6206  InformIncrementalMarker(masm, mode);
6207  regs_.Restore(masm);
6208  __ RememberedSetHelper(object_,
6209  address_,
6210  value_,
6211  save_fp_regs_mode_,
6213 
6214  __ bind(&dont_need_remembered_set);
6215  }
6216 
6217  CheckNeedsToInformIncrementalMarker(
6218  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
6219  InformIncrementalMarker(masm, mode);
6220  regs_.Restore(masm);
6221  __ ret(0);
6222 }
6223 
6224 
6225 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
6226  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
6227 #ifdef _WIN64
6228  Register arg3 = r8;
6229  Register arg2 = rdx;
6230  Register arg1 = rcx;
6231 #else
6232  Register arg3 = rdx;
6233  Register arg2 = rsi;
6234  Register arg1 = rdi;
6235 #endif
6236  Register address =
6237  arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
6238  ASSERT(!address.is(regs_.object()));
6239  ASSERT(!address.is(arg1));
6240  __ Move(address, regs_.address());
6241  __ Move(arg1, regs_.object());
6242  if (mode == INCREMENTAL_COMPACTION) {
6243  // TODO(gc) Can we just set address arg2 in the beginning?
6244  __ Move(arg2, address);
6245  } else {
6246  ASSERT(mode == INCREMENTAL);
6247  __ movq(arg2, Operand(address, 0));
6248  }
6249  __ LoadAddress(arg3, ExternalReference::isolate_address());
6250  int argument_count = 3;
6251 
6252  AllowExternalCallThatCantCauseGC scope(masm);
6253  __ PrepareCallCFunction(argument_count);
6254  if (mode == INCREMENTAL_COMPACTION) {
6255  __ CallCFunction(
6256  ExternalReference::incremental_evacuation_record_write_function(
6257  masm->isolate()),
6258  argument_count);
6259  } else {
6260  ASSERT(mode == INCREMENTAL);
6261  __ CallCFunction(
6262  ExternalReference::incremental_marking_record_write_function(
6263  masm->isolate()),
6264  argument_count);
6265  }
6266  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
6267 }
6268 
6269 
6270 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
6271  MacroAssembler* masm,
6272  OnNoNeedToInformIncrementalMarker on_no_need,
6273  Mode mode) {
6274  Label on_black;
6275  Label need_incremental;
6276  Label need_incremental_pop_object;
6277 
6278  __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
6279  __ and_(regs_.scratch0(), regs_.object());
6280  __ movq(regs_.scratch1(),
6281  Operand(regs_.scratch0(),
6283  __ subq(regs_.scratch1(), Immediate(1));
6284  __ movq(Operand(regs_.scratch0(),
6286  regs_.scratch1());
6287  __ j(negative, &need_incremental);
6288 
6289  // Let's look at the color of the object: If it is not black we don't have
6290  // to inform the incremental marker.
6291  __ JumpIfBlack(regs_.object(),
6292  regs_.scratch0(),
6293  regs_.scratch1(),
6294  &on_black,
6295  Label::kNear);
6296 
6297  regs_.Restore(masm);
6298  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6299  __ RememberedSetHelper(object_,
6300  address_,
6301  value_,
6302  save_fp_regs_mode_,
6304  } else {
6305  __ ret(0);
6306  }
6307 
6308  __ bind(&on_black);
6309 
6310  // Get the value from the slot.
6311  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6312 
6313  if (mode == INCREMENTAL_COMPACTION) {
6314  Label ensure_not_white;
6315 
6316  __ CheckPageFlag(regs_.scratch0(), // Contains value.
6317  regs_.scratch1(), // Scratch.
6319  zero,
6320  &ensure_not_white,
6321  Label::kNear);
6322 
6323  __ CheckPageFlag(regs_.object(),
6324  regs_.scratch1(), // Scratch.
6326  zero,
6327  &need_incremental);
6328 
6329  __ bind(&ensure_not_white);
6330  }
6331 
6332  // We need an extra register for this, so we push the object register
6333  // temporarily.
6334  __ push(regs_.object());
6335  __ EnsureNotWhite(regs_.scratch0(), // The value.
6336  regs_.scratch1(), // Scratch.
6337  regs_.object(), // Scratch.
6338  &need_incremental_pop_object,
6339  Label::kNear);
6340  __ pop(regs_.object());
6341 
6342  regs_.Restore(masm);
6343  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6344  __ RememberedSetHelper(object_,
6345  address_,
6346  value_,
6347  save_fp_regs_mode_,
6349  } else {
6350  __ ret(0);
6351  }
6352 
6353  __ bind(&need_incremental_pop_object);
6354  __ pop(regs_.object());
6355 
6356  __ bind(&need_incremental);
6357 
6358  // Fall through when we need to inform the incremental marker.
6359 }
6360 
6361 
6362 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
6363  // ----------- S t a t e -------------
6364  // -- rax : element value to store
6365  // -- rbx : array literal
6366  // -- rdi : map of array literal
6367  // -- rcx : element index as smi
6368  // -- rdx : array literal index in function
6369  // -- rsp[0] : return address
6370  // -----------------------------------
6371 
6372  Label element_done;
6373  Label double_elements;
6374  Label smi_element;
6375  Label slow_elements;
6376  Label fast_elements;
6377 
6378  __ CheckFastElements(rdi, &double_elements);
6379 
6380  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
6381  __ JumpIfSmi(rax, &smi_element);
6382  __ CheckFastSmiElements(rdi, &fast_elements);
6383 
6384  // Store into the array literal requires a elements transition. Call into
6385  // the runtime.
6386 
6387  __ bind(&slow_elements);
6388  __ pop(rdi); // Pop return address and remember to put back later for tail
6389  // call.
6390  __ push(rbx);
6391  __ push(rcx);
6392  __ push(rax);
6395  __ push(rdx);
6396  __ push(rdi); // Return return address so that tail call returns to right
6397  // place.
6398  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
6399 
6400  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
6401  __ bind(&fast_elements);
6402  __ SmiToInteger32(kScratchRegister, rcx);
6406  __ movq(Operand(rcx, 0), rax);
6407  // Update the write barrier for the array store.
6408  __ RecordWrite(rbx, rcx, rax,
6411  OMIT_SMI_CHECK);
6412  __ ret(0);
6413 
6414  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
6415  // FAST_*_ELEMENTS, and value is Smi.
6416  __ bind(&smi_element);
6417  __ SmiToInteger32(kScratchRegister, rcx);
6421  __ ret(0);
6422 
6423  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
6424  __ bind(&double_elements);
6425 
6427  __ SmiToInteger32(r11, rcx);
6428  __ StoreNumberToDoubleElements(rax,
6429  r9,
6430  r11,
6431  xmm0,
6432  &slow_elements);
6433  __ ret(0);
6434 }
6435 
6436 
6437 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
6438  if (entry_hook_ != NULL) {
6439  ProfileEntryHookStub stub;
6440  masm->CallStub(&stub);
6441  }
6442 }
6443 
6444 
6445 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
6446  // Save volatile registers.
6447  // Live registers at this point are the same as at the start of any
6448  // JS function:
6449  // o rdi: the JS function object being called (i.e. ourselves)
6450  // o rsi: our context
6451  // o rbp: our caller's frame pointer
6452  // o rsp: stack pointer (pointing to return address)
6453  // o rcx: rcx is zero for method calls and non-zero for function calls.
6454 #ifdef _WIN64
6455  const int kNumSavedRegisters = 1;
6456 
6457  __ push(rcx);
6458 #else
6459  const int kNumSavedRegisters = 3;
6460 
6461  __ push(rcx);
6462  __ push(rdi);
6463  __ push(rsi);
6464 #endif
6465 
6466  // Calculate the original stack pointer and store it in the second arg.
6467 #ifdef _WIN64
6468  __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
6469 #else
6470  __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
6471 #endif
6472 
6473  // Calculate the function address to the first arg.
6474 #ifdef _WIN64
6475  __ movq(rcx, Operand(rdx, 0));
6476  __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
6477 #else
6478  __ movq(rdi, Operand(rsi, 0));
6479  __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
6480 #endif
6481 
6482  // Call the entry hook function.
6483  __ movq(rax, &entry_hook_, RelocInfo::NONE);
6484  __ movq(rax, Operand(rax, 0));
6485 
6486  AllowExternalCallThatCantCauseGC scope(masm);
6487 
6488  const int kArgumentCount = 2;
6489  __ PrepareCallCFunction(kArgumentCount);
6490  __ CallCFunction(rax, kArgumentCount);
6491 
6492  // Restore volatile regs.
6493 #ifdef _WIN64
6494  __ pop(rcx);
6495 #else
6496  __ pop(rsi);
6497  __ pop(rdi);
6498  __ pop(rcx);
6499 #endif
6500 
6501  __ Ret();
6502 }
6503 
6504 #undef __
6505 
6506 } } // namespace v8::internal
6507 
6508 #endif // V8_TARGET_ARCH_X64
static const int kResourceDataOffset
Definition: objects.h:7747
const Register rdx
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:5160
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static const int kMaxLength
Definition: objects.h:2366
const intptr_t kSmiTagMask
Definition: v8.h:4016
static const int kCodeOffset
Definition: objects.h:5796
static const int kEvacuationCandidateMask
Definition: spaces.h:411
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:6182
const Register r14
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:425
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
const Register r11
static const int kArgumentsObjectSize
Definition: heap.h:895
const XMMRegister xmm4
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:469
const int kFailureTypeTagSize
Definition: objects.h:1081
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2241
static Failure * InternalError()
Definition: objects-inl.h:1019
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void PerformGC(Object *result)
Definition: runtime.cc:13279
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:981
const Register rbp
void Generate(MacroAssembler *masm)
static const byte kTwoByteNopInstruction
static const int kOptimizedCodeMapOffset
Definition: objects.h:5797
static const int kDataOffset
Definition: objects.h:6624
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:994
const int kSmiValueSize
Definition: v8.h:4061
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1029
const Register rsi
static const int kEmptyHashField
Definition: objects.h:7379
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
Definition: objects.h:1356
static const intptr_t kPageAlignmentMask
Definition: spaces.h:720
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5339
static Failure * Exception()
Definition: objects-inl.h:1024
const int kMaxInt
Definition: globals.h:210
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:270
const int kPointerSizeLog2
Definition: globals.h:232
static const int kShortCallInstructionLength
static const int kInstanceSizeOffset
Definition: objects.h:5147
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5344
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2306
const uint32_t kStringRepresentationMask
Definition: objects.h:474
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
MemOperand GlobalObjectOperand()
static const int kSize
Definition: objects.h:8355
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
const uint32_t kAsciiDataHintTag
Definition: objects.h:498
const uint32_t kShortExternalStringMask
Definition: objects.h:502
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:191
ProfileEntryHookStub()
Definition: code-stubs.h:1161
const int kIntSize
Definition: globals.h:217
static const int kZeroHash
Definition: objects.h:7017
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7319
static const int kSize
Definition: objects.h:8333
static const int kLastCaptureCountOffset
Definition: jsregexp.h:189
static const int kFirstOffset
Definition: objects.h:7653
static const int kMinLength
Definition: objects.h:7666
const uint32_t kNotStringTag
Definition: objects.h:457
static const int kParentOffset
Definition: objects.h:7705
static const int kLiteralsOffset
Definition: objects.h:6188
#define UNREACHABLE()
Definition: checks.h:50
static const int kArgumentsObjectSizeStrict
Definition: heap.h:898
static void GenerateCopyCharactersREP(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
const uint32_t kIsSymbolMask
Definition: objects.h:462
const intptr_t kFailureTagMask
Definition: v8globals.h:64
static const int kValueOffset
Definition: objects.h:1342
const int kFailureTagSize
Definition: v8globals.h:63
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:218
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6670
static const int kInputOffset
Definition: objects.h:8354
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:504
static bool IsBitOp(Value op)
Definition: token.h:256
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
void Generate(MacroAssembler *masm)
const Register r9
const int kPointerSize
Definition: globals.h:220
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5177
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
const int kHeapObjectTag
Definition: v8.h:4009
const Register rbx
const Register rsp
const uint32_t kAsciiDataHintMask
Definition: objects.h:497
#define __
const Register r12
Operand StackSpaceOperand(int index)
static const byte kFiveByteNopInstruction
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2171
const Register rax
static const int kMinLength
Definition: objects.h:7717
const uint32_t kShortExternalStringTag
Definition: objects.h:503
const Register r13
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const Register rdi
static const int kHeaderSize
Definition: objects.h:7517
static const int kNextFunctionLinkOffset
Definition: objects.h:6190
const int kBitsPerByte
Definition: globals.h:237
static int SizeFor(int length)
Definition: objects.h:2434
const Register r0
static const int kElementsOffset
Definition: objects.h:2172
bool IsPowerOf2(T x)
Definition: utils.h:50
const int kRootRegisterBias
const uint32_t kStringTag
Definition: objects.h:456
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7706
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
Definition: jsregexp.h:186
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2296
void Generate(MacroAssembler *masm)
static const int kMapOffset
Definition: objects.h:1261
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:414
const uint32_t kIsNotStringMask
Definition: objects.h:455
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:492
static const int kLengthOffset
Definition: objects.h:2295
static const int kSize
Definition: objects.h:1350
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:7654
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kArgumentsLengthIndex
Definition: heap.h:901
const Register kScratchRegister
static const int kFirstCaptureOffset
Definition: jsregexp.h:195
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7345
uint16_t uc16
Definition: globals.h:259
static const int kLastInputOffset
Definition: jsregexp.h:193
v8::Handle< v8::Value > Load(const v8::Arguments &args)
Definition: shell.cc:159
const int kSmiTagSize
Definition: v8.h:4015
static const int kHeaderSize
Definition: objects.h:4549
const Register r8
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6666
const Register rcx
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
static void GenerateAheadOfTime()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
static const int kArgumentsCalleeIndex
Definition: heap.h:903
const int kSmiTag
Definition: v8.h:4014
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
void Generate(MacroAssembler *masm)
static const int kEntryLength
Definition: objects.h:5403
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:62
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kDataTagOffset
Definition: objects.h:6664
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
static const int kSize
Definition: objects.h:6191
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
static const int kMaxLength
Definition: objects.h:7386
static const int kValueOffset
Definition: objects.h:6385
bool Contains(Type type) const
Definition: code-stubs.h:1055
const uint32_t kSymbolTag
Definition: objects.h:464
static const int kNativeContextOffset
Definition: objects.h:6286
const uint32_t kAsciiStringTag
Definition: objects.h:470
static const int kConstructStubOffset
Definition: objects.h:5799
static const int kExponentBits
Definition: objects.h:1355
static const int kHashShift
Definition: objects.h:7341
const XMMRegister xmm2
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
#define FUNCTION_ADDR(f)
Definition: globals.h:293
static const int kMaxValue
Definition: objects.h:1050
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:5161
static const int kMantissaBits
Definition: objects.h:1354
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:630
void check(i::Vector< const char > string)
const Register r15
static const int kDataUC16CodeOffset
Definition: objects.h:6668
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:468
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kIndexOffset
Definition: objects.h:8353
void Generate(MacroAssembler *masm)
const XMMRegister xmm0
void Generate(MacroAssembler *masm)