v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
41 void ToNumberStub::Generate(MacroAssembler* masm) {
42  // The ToNumber stub takes one argument in eax.
43  Label check_heap_number, call_builtin;
44  __ SmiTest(rax);
45  __ j(not_zero, &check_heap_number, Label::kNear);
46  __ Ret();
47 
48  __ bind(&check_heap_number);
50  Heap::kHeapNumberMapRootIndex);
51  __ j(not_equal, &call_builtin, Label::kNear);
52  __ Ret();
53 
54  __ bind(&call_builtin);
55  __ pop(rcx); // Pop return address.
56  __ push(rax);
57  __ push(rcx); // Push return address.
58  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
59 }
60 
61 
62 void FastNewClosureStub::Generate(MacroAssembler* masm) {
63  // Create a new closure from the given function info in new
64  // space. Set the context to the current context in rsi.
65  Label gc;
66  __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
67 
68  // Get the function info from the stack.
69  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
70 
71  int map_index = (language_mode_ == CLASSIC_MODE)
74 
75  // Compute the function map in the current global context and set that
76  // as the map of the allocated object.
79  __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
81 
82  // Initialize the rest of the function. We don't have to update the
83  // write barrier because the allocated object is in new space.
84  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
85  __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
86  __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
94 
95  // Initialize the code pointer in the function to be the one
96  // found in the shared function info object.
100 
101 
102  // Return and remove the on-stack parameter.
103  __ ret(1 * kPointerSize);
104 
105  // Create a new closure through the slower runtime call.
106  __ bind(&gc);
107  __ pop(rcx); // Temporarily remove return address.
108  __ pop(rdx);
109  __ push(rsi);
110  __ push(rdx);
111  __ PushRoot(Heap::kFalseValueRootIndex);
112  __ push(rcx); // Restore return address.
113  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
114 }
115 
116 
117 void FastNewContextStub::Generate(MacroAssembler* masm) {
118  // Try to allocate the context in new space.
119  Label gc;
120  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
121  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
122  rax, rbx, rcx, &gc, TAG_OBJECT);
123 
124  // Get the function from the stack.
125  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
126 
127  // Set up the object header.
128  __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
131 
132  // Set up the fixed slots.
133  __ Set(rbx, 0); // Set to NULL.
137 
138  // Copy the global object from the previous context.
141 
142  // Initialize the rest of the slots to undefined.
143  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
144  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
145  __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
146  }
147 
148  // Return and remove the on-stack parameter.
149  __ movq(rsi, rax);
150  __ ret(1 * kPointerSize);
151 
152  // Need to collect. Call into runtime system.
153  __ bind(&gc);
154  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
155 }
156 
157 
158 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
159  // Stack layout on entry:
160  //
161  // [rsp + (1 * kPointerSize)]: function
162  // [rsp + (2 * kPointerSize)]: serialized scope info
163 
164  // Try to allocate the context in new space.
165  Label gc;
166  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
167  __ AllocateInNewSpace(FixedArray::SizeFor(length),
168  rax, rbx, rcx, &gc, TAG_OBJECT);
169 
170  // Get the function from the stack.
171  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
172 
173  // Get the serialized scope info from the stack.
174  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
175 
176  // Set up the object header.
177  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
180 
181  // If this block context is nested in the global context we get a smi
182  // sentinel instead of a function. The block context should get the
183  // canonical empty function of the global context as its closure which
184  // we still have to look up.
185  Label after_sentinel;
186  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
187  if (FLAG_debug_code) {
188  const char* message = "Expected 0 as a Smi sentinel";
189  __ cmpq(rcx, Immediate(0));
190  __ Assert(equal, message);
191  }
192  __ movq(rcx, GlobalObjectOperand());
195  __ bind(&after_sentinel);
196 
197  // Set up the fixed slots.
201 
202  // Copy the global object from the previous context.
205 
206  // Initialize the rest of the slots to the hole value.
207  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
208  for (int i = 0; i < slots_; i++) {
209  __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
210  }
211 
212  // Return and remove the on-stack parameter.
213  __ movq(rsi, rax);
214  __ ret(2 * kPointerSize);
215 
216  // Need to collect. Call into runtime system.
217  __ bind(&gc);
218  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
219 }
220 
221 
222 static void GenerateFastCloneShallowArrayCommon(
223  MacroAssembler* masm,
224  int length,
226  Label* fail) {
227  // Registers on entry:
228  //
229  // rcx: boilerplate literal array.
231 
232  // All sizes here are multiples of kPointerSize.
233  int elements_size = 0;
234  if (length > 0) {
236  ? FixedDoubleArray::SizeFor(length)
237  : FixedArray::SizeFor(length);
238  }
239  int size = JSArray::kSize + elements_size;
240 
241  // Allocate both the JS array and the elements array in one big
242  // allocation. This avoids multiple limit checks.
243  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
244 
245  // Copy the JS array part.
246  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
247  if ((i != JSArray::kElementsOffset) || (length == 0)) {
248  __ movq(rbx, FieldOperand(rcx, i));
249  __ movq(FieldOperand(rax, i), rbx);
250  }
251  }
252 
253  if (length > 0) {
254  // Get hold of the elements array of the boilerplate and setup the
255  // elements pointer in the resulting object.
257  __ lea(rdx, Operand(rax, JSArray::kSize));
259 
260  // Copy the elements array.
262  for (int i = 0; i < elements_size; i += kPointerSize) {
263  __ movq(rbx, FieldOperand(rcx, i));
264  __ movq(FieldOperand(rdx, i), rbx);
265  }
266  } else {
268  int i;
269  for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
270  __ movq(rbx, FieldOperand(rcx, i));
271  __ movq(FieldOperand(rdx, i), rbx);
272  }
273  while (i < elements_size) {
274  __ movsd(xmm0, FieldOperand(rcx, i));
275  __ movsd(FieldOperand(rdx, i), xmm0);
276  i += kDoubleSize;
277  }
278  ASSERT(i == elements_size);
279  }
280  }
281 }
282 
283 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
284  // Stack layout on entry:
285  //
286  // [rsp + kPointerSize]: constant elements.
287  // [rsp + (2 * kPointerSize)]: literal index.
288  // [rsp + (3 * kPointerSize)]: literals array.
289 
290  // Load boilerplate object into rcx and check if we need to create a
291  // boilerplate.
292  __ movq(rcx, Operand(rsp, 3 * kPointerSize));
293  __ movq(rax, Operand(rsp, 2 * kPointerSize));
294  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
295  __ movq(rcx,
296  FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
297  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
298  Label slow_case;
299  __ j(equal, &slow_case);
300 
301  FastCloneShallowArrayStub::Mode mode = mode_;
302  // rcx is boilerplate object.
303  Factory* factory = masm->isolate()->factory();
304  if (mode == CLONE_ANY_ELEMENTS) {
305  Label double_elements, check_fast_elements;
308  factory->fixed_cow_array_map());
309  __ j(not_equal, &check_fast_elements);
310  GenerateFastCloneShallowArrayCommon(masm, 0,
311  COPY_ON_WRITE_ELEMENTS, &slow_case);
312  __ ret(3 * kPointerSize);
313 
314  __ bind(&check_fast_elements);
316  factory->fixed_array_map());
317  __ j(not_equal, &double_elements);
318  GenerateFastCloneShallowArrayCommon(masm, length_,
319  CLONE_ELEMENTS, &slow_case);
320  __ ret(3 * kPointerSize);
321 
322  __ bind(&double_elements);
323  mode = CLONE_DOUBLE_ELEMENTS;
324  // Fall through to generate the code to handle double elements.
325  }
326 
327  if (FLAG_debug_code) {
328  const char* message;
329  Heap::RootListIndex expected_map_index;
330  if (mode == CLONE_ELEMENTS) {
331  message = "Expected (writable) fixed array";
332  expected_map_index = Heap::kFixedArrayMapRootIndex;
333  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
334  message = "Expected (writable) fixed double array";
335  expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
336  } else {
338  message = "Expected copy-on-write fixed array";
339  expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
340  }
341  __ push(rcx);
344  expected_map_index);
345  __ Assert(equal, message);
346  __ pop(rcx);
347  }
348 
349  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
350  __ ret(3 * kPointerSize);
351 
352  __ bind(&slow_case);
353  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
354 }
355 
356 
357 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
358  // Stack layout on entry:
359  //
360  // [rsp + kPointerSize]: object literal flags.
361  // [rsp + (2 * kPointerSize)]: constant properties.
362  // [rsp + (3 * kPointerSize)]: literal index.
363  // [rsp + (4 * kPointerSize)]: literals array.
364 
365  // Load boilerplate object into ecx and check if we need to create a
366  // boilerplate.
367  Label slow_case;
368  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
369  __ movq(rax, Operand(rsp, 3 * kPointerSize));
370  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
371  __ movq(rcx,
372  FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
373  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
374  __ j(equal, &slow_case);
375 
376  // Check that the boilerplate contains only fast properties and we can
377  // statically determine the instance size.
378  int size = JSObject::kHeaderSize + length_ * kPointerSize;
381  __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
382  __ j(not_equal, &slow_case);
383 
384  // Allocate the JS object and copy header together with all in-object
385  // properties from the boilerplate.
386  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
387  for (int i = 0; i < size; i += kPointerSize) {
388  __ movq(rbx, FieldOperand(rcx, i));
389  __ movq(FieldOperand(rax, i), rbx);
390  }
391 
392  // Return and remove the on-stack parameters.
393  __ ret(4 * kPointerSize);
394 
395  __ bind(&slow_case);
396  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
397 }
398 
399 
400 // The stub expects its argument on the stack and returns its result in tos_:
401 // zero for false, and a non-zero value for true.
402 void ToBooleanStub::Generate(MacroAssembler* masm) {
403  // This stub overrides SometimesSetsUpAFrame() to return false. That means
404  // we cannot call anything that could cause a GC from this stub.
405  Label patch;
406  const Register argument = rax;
407  const Register map = rdx;
408 
409  if (!types_.IsEmpty()) {
410  __ movq(argument, Operand(rsp, 1 * kPointerSize));
411  }
412 
413  // undefined -> false
414  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
415 
416  // Boolean -> its value
417  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
418  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
419 
420  // 'null' -> false.
421  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
422 
423  if (types_.Contains(SMI)) {
424  // Smis: 0 -> false, all other -> true
425  Label not_smi;
426  __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
427  // argument contains the correct return value already
428  if (!tos_.is(argument)) {
429  __ movq(tos_, argument);
430  }
431  __ ret(1 * kPointerSize);
432  __ bind(&not_smi);
433  } else if (types_.NeedsMap()) {
434  // If we need a map later and have a Smi -> patch.
435  __ JumpIfSmi(argument, &patch, Label::kNear);
436  }
437 
438  if (types_.NeedsMap()) {
439  __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
440 
441  if (types_.CanBeUndetectable()) {
443  Immediate(1 << Map::kIsUndetectable));
444  // Undetectable -> false.
445  Label not_undetectable;
446  __ j(zero, &not_undetectable, Label::kNear);
447  __ Set(tos_, 0);
448  __ ret(1 * kPointerSize);
449  __ bind(&not_undetectable);
450  }
451  }
452 
453  if (types_.Contains(SPEC_OBJECT)) {
454  // spec object -> true.
455  Label not_js_object;
456  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
457  __ j(below, &not_js_object, Label::kNear);
458  // argument contains the correct return value already.
459  if (!tos_.is(argument)) {
460  __ Set(tos_, 1);
461  }
462  __ ret(1 * kPointerSize);
463  __ bind(&not_js_object);
464  }
465 
466  if (types_.Contains(STRING)) {
467  // String value -> false iff empty.
468  Label not_string;
469  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
470  __ j(above_equal, &not_string, Label::kNear);
471  __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
472  __ ret(1 * kPointerSize); // the string length is OK as the return value
473  __ bind(&not_string);
474  }
475 
476  if (types_.Contains(HEAP_NUMBER)) {
477  // heap number -> false iff +0, -0, or NaN.
478  Label not_heap_number, false_result;
479  __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
480  __ j(not_equal, &not_heap_number, Label::kNear);
481  __ xorps(xmm0, xmm0);
482  __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
483  __ j(zero, &false_result, Label::kNear);
484  // argument contains the correct return value already.
485  if (!tos_.is(argument)) {
486  __ Set(tos_, 1);
487  }
488  __ ret(1 * kPointerSize);
489  __ bind(&false_result);
490  __ Set(tos_, 0);
491  __ ret(1 * kPointerSize);
492  __ bind(&not_heap_number);
493  }
494 
495  __ bind(&patch);
496  GenerateTypeTransition(masm);
497 }
498 
499 
500 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
501  __ PushCallerSaved(save_doubles_);
502  const int argument_count = 1;
503  __ PrepareCallCFunction(argument_count);
504 #ifdef _WIN64
505  __ LoadAddress(rcx, ExternalReference::isolate_address());
506 #else
507  __ LoadAddress(rdi, ExternalReference::isolate_address());
508 #endif
509 
510  AllowExternalCallThatCantCauseGC scope(masm);
511  __ CallCFunction(
512  ExternalReference::store_buffer_overflow_function(masm->isolate()),
513  argument_count);
514  __ PopCallerSaved(save_doubles_);
515  __ ret(0);
516 }
517 
518 
519 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
520  Type type,
521  Heap::RootListIndex value,
522  bool result) {
523  const Register argument = rax;
524  if (types_.Contains(type)) {
525  // If we see an expected oddball, return its ToBoolean value tos_.
526  Label different_value;
527  __ CompareRoot(argument, value);
528  __ j(not_equal, &different_value, Label::kNear);
529  if (!result) {
530  // If we have to return zero, there is no way around clearing tos_.
531  __ Set(tos_, 0);
532  } else if (!tos_.is(argument)) {
533  // If we have to return non-zero, we can re-use the argument if it is the
534  // same register as the result, because we never see Smi-zero here.
535  __ Set(tos_, 1);
536  }
537  __ ret(1 * kPointerSize);
538  __ bind(&different_value);
539  }
540 }
541 
542 
543 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
544  __ pop(rcx); // Get return address, operand is now on top of stack.
545  __ Push(Smi::FromInt(tos_.code()));
546  __ Push(Smi::FromInt(types_.ToByte()));
547  __ push(rcx); // Push return address.
548  // Patch the caller to an appropriate specialized stub and return the
549  // operation result to the caller of the stub.
550  __ TailCallExternalReference(
551  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
552  3,
553  1);
554 }
555 
556 
557 class FloatingPointHelper : public AllStatic {
558  public:
559  // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
560  // If the operands are not both numbers, jump to not_numbers.
561  // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
562  // NumberOperands assumes both are smis or heap numbers.
563  static void LoadSSE2SmiOperands(MacroAssembler* masm);
564  static void LoadSSE2NumberOperands(MacroAssembler* masm);
565  static void LoadSSE2UnknownOperands(MacroAssembler* masm,
566  Label* not_numbers);
567 
568  // Takes the operands in rdx and rax and loads them as integers in rax
569  // and rcx.
570  static void LoadAsIntegers(MacroAssembler* masm,
571  Label* operand_conversion_failure,
572  Register heap_number_map);
573  // As above, but we know the operands to be numbers. In that case,
574  // conversion can't fail.
575  static void LoadNumbersAsIntegers(MacroAssembler* masm);
576 
577  // Tries to convert two values to smis losslessly.
578  // This fails if either argument is not a Smi nor a HeapNumber,
579  // or if it's a HeapNumber with a value that can't be converted
580  // losslessly to a Smi. In that case, control transitions to the
581  // on_not_smis label.
582  // On success, either control goes to the on_success label (if one is
583  // provided), or it falls through at the end of the code (if on_success
584  // is NULL).
585  // On success, both first and second holds Smi tagged values.
586  // One of first or second must be non-Smi when entering.
587  static void NumbersToSmis(MacroAssembler* masm,
588  Register first,
589  Register second,
590  Register scratch1,
591  Register scratch2,
592  Register scratch3,
593  Label* on_success,
594  Label* on_not_smis);
595 };
596 
597 
598 // Get the integer part of a heap number.
599 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
600 void IntegerConvert(MacroAssembler* masm,
601  Register result,
602  Register source) {
603  // Result may be rcx. If result and source are the same register, source will
604  // be overwritten.
605  ASSERT(!result.is(rdi) && !result.is(rbx));
606  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
607  // cvttsd2si (32-bit version) directly.
608  Register double_exponent = rbx;
609  Register double_value = rdi;
610  Label done, exponent_63_plus;
611  // Get double and extract exponent.
612  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
613  // Clear result preemptively, in case we need to return zero.
614  __ xorl(result, result);
615  __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
616  // Double to remove sign bit, shift exponent down to least significant bits.
617  // and subtract bias to get the unshifted, unbiased exponent.
618  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
619  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
620  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
621  // Check whether the exponent is too big for a 63 bit unsigned integer.
622  __ cmpl(double_exponent, Immediate(63));
623  __ j(above_equal, &exponent_63_plus, Label::kNear);
624  // Handle exponent range 0..62.
625  __ cvttsd2siq(result, xmm0);
626  __ jmp(&done, Label::kNear);
627 
628  __ bind(&exponent_63_plus);
629  // Exponent negative or 63+.
630  __ cmpl(double_exponent, Immediate(83));
631  // If exponent negative or above 83, number contains no significant bits in
632  // the range 0..2^31, so result is zero, and rcx already holds zero.
633  __ j(above, &done, Label::kNear);
634 
635  // Exponent in rage 63..83.
636  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
637  // the least significant exponent-52 bits.
638 
639  // Negate low bits of mantissa if value is negative.
640  __ addq(double_value, double_value); // Move sign bit to carry.
641  __ sbbl(result, result); // And convert carry to -1 in result register.
642  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
643  __ addl(double_value, result);
644  // Do xor in opposite directions depending on where we want the result
645  // (depending on whether result is rcx or not).
646 
647  if (result.is(rcx)) {
648  __ xorl(double_value, result);
649  // Left shift mantissa by (exponent - mantissabits - 1) to save the
650  // bits that have positional values below 2^32 (the extra -1 comes from the
651  // doubling done above to move the sign bit into the carry flag).
652  __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
653  __ shll_cl(double_value);
654  __ movl(result, double_value);
655  } else {
656  // As the then-branch, but move double-value to result before shifting.
657  __ xorl(result, double_value);
658  __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
659  __ shll_cl(result);
660  }
661 
662  __ bind(&done);
663 }
664 
665 
666 void UnaryOpStub::Generate(MacroAssembler* masm) {
667  switch (operand_type_) {
669  GenerateTypeTransition(masm);
670  break;
671  case UnaryOpIC::SMI:
672  GenerateSmiStub(masm);
673  break;
675  GenerateHeapNumberStub(masm);
676  break;
677  case UnaryOpIC::GENERIC:
678  GenerateGenericStub(masm);
679  break;
680  }
681 }
682 
683 
684 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
685  __ pop(rcx); // Save return address.
686 
687  __ push(rax); // the operand
688  __ Push(Smi::FromInt(op_));
689  __ Push(Smi::FromInt(mode_));
690  __ Push(Smi::FromInt(operand_type_));
691 
692  __ push(rcx); // Push return address.
693 
694  // Patch the caller to an appropriate specialized stub and return the
695  // operation result to the caller of the stub.
696  __ TailCallExternalReference(
697  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
698 }
699 
700 
701 // TODO(svenpanne): Use virtual functions instead of switch.
702 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
703  switch (op_) {
704  case Token::SUB:
705  GenerateSmiStubSub(masm);
706  break;
707  case Token::BIT_NOT:
708  GenerateSmiStubBitNot(masm);
709  break;
710  default:
711  UNREACHABLE();
712  }
713 }
714 
715 
716 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
717  Label slow;
718  GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
719  __ bind(&slow);
720  GenerateTypeTransition(masm);
721 }
722 
723 
724 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
725  Label non_smi;
726  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
727  __ bind(&non_smi);
728  GenerateTypeTransition(masm);
729 }
730 
731 
732 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
733  Label* non_smi,
734  Label* slow,
735  Label::Distance non_smi_near,
736  Label::Distance slow_near) {
737  Label done;
738  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
739  __ SmiNeg(rax, rax, &done, Label::kNear);
740  __ jmp(slow, slow_near);
741  __ bind(&done);
742  __ ret(0);
743 }
744 
745 
746 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
747  Label* non_smi,
748  Label::Distance non_smi_near) {
749  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
750  __ SmiNot(rax, rax);
751  __ ret(0);
752 }
753 
754 
755 // TODO(svenpanne): Use virtual functions instead of switch.
756 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
757  switch (op_) {
758  case Token::SUB:
759  GenerateHeapNumberStubSub(masm);
760  break;
761  case Token::BIT_NOT:
762  GenerateHeapNumberStubBitNot(masm);
763  break;
764  default:
765  UNREACHABLE();
766  }
767 }
768 
769 
770 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
771  Label non_smi, slow, call_builtin;
772  GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
773  __ bind(&non_smi);
774  GenerateHeapNumberCodeSub(masm, &slow);
775  __ bind(&slow);
776  GenerateTypeTransition(masm);
777  __ bind(&call_builtin);
778  GenerateGenericCodeFallback(masm);
779 }
780 
781 
782 void UnaryOpStub::GenerateHeapNumberStubBitNot(
783  MacroAssembler* masm) {
784  Label non_smi, slow;
785  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
786  __ bind(&non_smi);
787  GenerateHeapNumberCodeBitNot(masm, &slow);
788  __ bind(&slow);
789  GenerateTypeTransition(masm);
790 }
791 
792 
793 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
794  Label* slow) {
795  // Check if the operand is a heap number.
797  Heap::kHeapNumberMapRootIndex);
798  __ j(not_equal, slow);
799 
800  // Operand is a float, negate its value by flipping the sign bit.
801  if (mode_ == UNARY_OVERWRITE) {
802  __ Set(kScratchRegister, 0x01);
803  __ shl(kScratchRegister, Immediate(63));
805  } else {
806  // Allocate a heap number before calculating the answer,
807  // so we don't have an untagged double around during GC.
808  Label slow_allocate_heapnumber, heapnumber_allocated;
809  __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
810  __ jmp(&heapnumber_allocated);
811 
812  __ bind(&slow_allocate_heapnumber);
813  {
814  FrameScope scope(masm, StackFrame::INTERNAL);
815  __ push(rax);
816  __ CallRuntime(Runtime::kNumberAlloc, 0);
817  __ movq(rcx, rax);
818  __ pop(rax);
819  }
820  __ bind(&heapnumber_allocated);
821  // rcx: allocated 'empty' number
822 
823  // Copy the double value to the new heap number, flipping the sign.
825  __ Set(kScratchRegister, 0x01);
826  __ shl(kScratchRegister, Immediate(63));
827  __ xor_(rdx, kScratchRegister); // Flip sign.
829  __ movq(rax, rcx);
830  }
831  __ ret(0);
832 }
833 
834 
835 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
836  Label* slow) {
837  // Check if the operand is a heap number.
839  Heap::kHeapNumberMapRootIndex);
840  __ j(not_equal, slow);
841 
842  // Convert the heap number in rax to an untagged integer in rcx.
843  IntegerConvert(masm, rax, rax);
844 
845  // Do the bitwise operation and smi tag the result.
846  __ notl(rax);
847  __ Integer32ToSmi(rax, rax);
848  __ ret(0);
849 }
850 
851 
852 // TODO(svenpanne): Use virtual functions instead of switch.
853 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
854  switch (op_) {
855  case Token::SUB:
856  GenerateGenericStubSub(masm);
857  break;
858  case Token::BIT_NOT:
859  GenerateGenericStubBitNot(masm);
860  break;
861  default:
862  UNREACHABLE();
863  }
864 }
865 
866 
867 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
868  Label non_smi, slow;
869  GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
870  __ bind(&non_smi);
871  GenerateHeapNumberCodeSub(masm, &slow);
872  __ bind(&slow);
873  GenerateGenericCodeFallback(masm);
874 }
875 
876 
877 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
878  Label non_smi, slow;
879  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
880  __ bind(&non_smi);
881  GenerateHeapNumberCodeBitNot(masm, &slow);
882  __ bind(&slow);
883  GenerateGenericCodeFallback(masm);
884 }
885 
886 
887 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
888  // Handle the slow case by jumping to the JavaScript builtin.
889  __ pop(rcx); // pop return address
890  __ push(rax);
891  __ push(rcx); // push return address
892  switch (op_) {
893  case Token::SUB:
894  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
895  break;
896  case Token::BIT_NOT:
897  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
898  break;
899  default:
900  UNREACHABLE();
901  }
902 }
903 
904 
905 void UnaryOpStub::PrintName(StringStream* stream) {
906  const char* op_name = Token::Name(op_);
907  const char* overwrite_name = NULL; // Make g++ happy.
908  switch (mode_) {
909  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
910  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
911  }
912  stream->Add("UnaryOpStub_%s_%s_%s",
913  op_name,
914  overwrite_name,
915  UnaryOpIC::GetName(operand_type_));
916 }
917 
918 
919 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
920  __ pop(rcx); // Save return address.
921  __ push(rdx);
922  __ push(rax);
923  // Left and right arguments are now on top.
924  // Push this stub's key. Although the operation and the type info are
925  // encoded into the key, the encoding is opaque, so push them too.
926  __ Push(Smi::FromInt(MinorKey()));
927  __ Push(Smi::FromInt(op_));
928  __ Push(Smi::FromInt(operands_type_));
929 
930  __ push(rcx); // Push return address.
931 
932  // Patch the caller to an appropriate specialized stub and return the
933  // operation result to the caller of the stub.
934  __ TailCallExternalReference(
935  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
936  masm->isolate()),
937  5,
938  1);
939 }
940 
941 
942 void BinaryOpStub::Generate(MacroAssembler* masm) {
943  // Explicitly allow generation of nested stubs. It is safe here because
944  // generation code does not use any raw pointers.
945  AllowStubCallsScope allow_stub_calls(masm, true);
946 
947  switch (operands_type_) {
949  GenerateTypeTransition(masm);
950  break;
951  case BinaryOpIC::SMI:
952  GenerateSmiStub(masm);
953  break;
954  case BinaryOpIC::INT32:
955  UNREACHABLE();
956  // The int32 case is identical to the Smi case. We avoid creating this
957  // ic state on x64.
958  break;
960  GenerateHeapNumberStub(masm);
961  break;
962  case BinaryOpIC::ODDBALL:
963  GenerateOddballStub(masm);
964  break;
966  GenerateBothStringStub(masm);
967  break;
968  case BinaryOpIC::STRING:
969  GenerateStringStub(masm);
970  break;
971  case BinaryOpIC::GENERIC:
972  GenerateGeneric(masm);
973  break;
974  default:
975  UNREACHABLE();
976  }
977 }
978 
979 
980 void BinaryOpStub::PrintName(StringStream* stream) {
981  const char* op_name = Token::Name(op_);
982  const char* overwrite_name;
983  switch (mode_) {
984  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
985  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
986  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
987  default: overwrite_name = "UnknownOverwrite"; break;
988  }
989  stream->Add("BinaryOpStub_%s_%s_%s",
990  op_name,
991  overwrite_name,
992  BinaryOpIC::GetName(operands_type_));
993 }
994 
995 
996 void BinaryOpStub::GenerateSmiCode(
997  MacroAssembler* masm,
998  Label* slow,
999  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1000 
1001  // Arguments to BinaryOpStub are in rdx and rax.
1002  Register left = rdx;
1003  Register right = rax;
1004 
1005  // We only generate heapnumber answers for overflowing calculations
1006  // for the four basic arithmetic operations and logical right shift by 0.
1007  bool generate_inline_heapnumber_results =
1008  (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
1009  (op_ == Token::ADD || op_ == Token::SUB ||
1010  op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
1011 
1012  // Smi check of both operands. If op is BIT_OR, the check is delayed
1013  // until after the OR operation.
1014  Label not_smis;
1015  Label use_fp_on_smis;
1016  Label fail;
1017 
1018  if (op_ != Token::BIT_OR) {
1019  Comment smi_check_comment(masm, "-- Smi check arguments");
1020  __ JumpIfNotBothSmi(left, right, &not_smis);
1021  }
1022 
1023  Label smi_values;
1024  __ bind(&smi_values);
1025  // Perform the operation.
1026  Comment perform_smi(masm, "-- Perform smi operation");
1027  switch (op_) {
1028  case Token::ADD:
1029  ASSERT(right.is(rax));
1030  __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
1031  break;
1032 
1033  case Token::SUB:
1034  __ SmiSub(left, left, right, &use_fp_on_smis);
1035  __ movq(rax, left);
1036  break;
1037 
1038  case Token::MUL:
1039  ASSERT(right.is(rax));
1040  __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
1041  break;
1042 
1043  case Token::DIV:
1044  // SmiDiv will not accept left in rdx or right in rax.
1045  left = rcx;
1046  right = rbx;
1047  __ movq(rbx, rax);
1048  __ movq(rcx, rdx);
1049  __ SmiDiv(rax, left, right, &use_fp_on_smis);
1050  break;
1051 
1052  case Token::MOD:
1053  // SmiMod will not accept left in rdx or right in rax.
1054  left = rcx;
1055  right = rbx;
1056  __ movq(rbx, rax);
1057  __ movq(rcx, rdx);
1058  __ SmiMod(rax, left, right, &use_fp_on_smis);
1059  break;
1060 
1061  case Token::BIT_OR: {
1062  ASSERT(right.is(rax));
1063  __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
1064  break;
1065  }
1066  case Token::BIT_XOR:
1067  ASSERT(right.is(rax));
1068  __ SmiXor(right, right, left); // BIT_XOR is commutative.
1069  break;
1070 
1071  case Token::BIT_AND:
1072  ASSERT(right.is(rax));
1073  __ SmiAnd(right, right, left); // BIT_AND is commutative.
1074  break;
1075 
1076  case Token::SHL:
1077  __ SmiShiftLeft(left, left, right);
1078  __ movq(rax, left);
1079  break;
1080 
1081  case Token::SAR:
1082  __ SmiShiftArithmeticRight(left, left, right);
1083  __ movq(rax, left);
1084  break;
1085 
1086  case Token::SHR:
1087  __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
1088  __ movq(rax, left);
1089  break;
1090 
1091  default:
1092  UNREACHABLE();
1093  }
1094 
1095  // 5. Emit return of result in rax. Some operations have registers pushed.
1096  __ ret(0);
1097 
1098  if (use_fp_on_smis.is_linked()) {
1099  // 6. For some operations emit inline code to perform floating point
1100  // operations on known smis (e.g., if the result of the operation
1101  // overflowed the smi range).
1102  __ bind(&use_fp_on_smis);
1103  if (op_ == Token::DIV || op_ == Token::MOD) {
1104  // Restore left and right to rdx and rax.
1105  __ movq(rdx, rcx);
1106  __ movq(rax, rbx);
1107  }
1108 
1109  if (generate_inline_heapnumber_results) {
1110  __ AllocateHeapNumber(rcx, rbx, slow);
1111  Comment perform_float(masm, "-- Perform float operation on smis");
1112  if (op_ == Token::SHR) {
1113  __ SmiToInteger32(left, left);
1114  __ cvtqsi2sd(xmm0, left);
1115  } else {
1116  FloatingPointHelper::LoadSSE2SmiOperands(masm);
1117  switch (op_) {
1118  case Token::ADD: __ addsd(xmm0, xmm1); break;
1119  case Token::SUB: __ subsd(xmm0, xmm1); break;
1120  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1121  case Token::DIV: __ divsd(xmm0, xmm1); break;
1122  default: UNREACHABLE();
1123  }
1124  }
1126  __ movq(rax, rcx);
1127  __ ret(0);
1128  } else {
1129  __ jmp(&fail);
1130  }
1131  }
1132 
1133  // 7. Non-smi operands reach the end of the code generated by
1134  // GenerateSmiCode, and fall through to subsequent code,
1135  // with the operands in rdx and rax.
1136  // But first we check if non-smi values are HeapNumbers holding
1137  // values that could be smi.
1138  __ bind(&not_smis);
1139  Comment done_comment(masm, "-- Enter non-smi code");
1140  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
1141  &smi_values, &fail);
1142  __ jmp(&smi_values);
1143  __ bind(&fail);
1144 }
1145 
1146 
1147 void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
1148  Label* allocation_failure,
1149  Label* non_numeric_failure) {
1150  switch (op_) {
1151  case Token::ADD:
1152  case Token::SUB:
1153  case Token::MUL:
1154  case Token::DIV: {
1155  FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
1156 
1157  switch (op_) {
1158  case Token::ADD: __ addsd(xmm0, xmm1); break;
1159  case Token::SUB: __ subsd(xmm0, xmm1); break;
1160  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1161  case Token::DIV: __ divsd(xmm0, xmm1); break;
1162  default: UNREACHABLE();
1163  }
1164  GenerateHeapResultAllocation(masm, allocation_failure);
1166  __ ret(0);
1167  break;
1168  }
1169  case Token::MOD: {
1170  // For MOD we jump to the allocation_failure label, to call runtime.
1171  __ jmp(allocation_failure);
1172  break;
1173  }
1174  case Token::BIT_OR:
1175  case Token::BIT_AND:
1176  case Token::BIT_XOR:
1177  case Token::SAR:
1178  case Token::SHL:
1179  case Token::SHR: {
1180  Label non_smi_shr_result;
1181  Register heap_number_map = r9;
1182  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1183  FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
1184  heap_number_map);
1185  switch (op_) {
1186  case Token::BIT_OR: __ orl(rax, rcx); break;
1187  case Token::BIT_AND: __ andl(rax, rcx); break;
1188  case Token::BIT_XOR: __ xorl(rax, rcx); break;
1189  case Token::SAR: __ sarl_cl(rax); break;
1190  case Token::SHL: __ shll_cl(rax); break;
1191  case Token::SHR: {
1192  __ shrl_cl(rax);
1193  // Check if result is negative. This can only happen for a shift
1194  // by zero.
1195  __ testl(rax, rax);
1196  __ j(negative, &non_smi_shr_result);
1197  break;
1198  }
1199  default: UNREACHABLE();
1200  }
1202  // Tag smi result and return.
1203  __ Integer32ToSmi(rax, rax);
1204  __ Ret();
1205 
1206  // Logical shift right can produce an unsigned int32 that is not
1207  // an int32, and so is not in the smi range. Allocate a heap number
1208  // in that case.
1209  if (op_ == Token::SHR) {
1210  __ bind(&non_smi_shr_result);
1211  Label allocation_failed;
1212  __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
1213  // Allocate heap number in new space.
1214  // Not using AllocateHeapNumber macro in order to reuse
1215  // already loaded heap_number_map.
1216  __ AllocateInNewSpace(HeapNumber::kSize,
1217  rax,
1218  rdx,
1219  no_reg,
1220  &allocation_failed,
1221  TAG_OBJECT);
1222  // Set the map.
1223  if (FLAG_debug_code) {
1224  __ AbortIfNotRootValue(heap_number_map,
1225  Heap::kHeapNumberMapRootIndex,
1226  "HeapNumberMap register clobbered.");
1227  }
1229  heap_number_map);
1230  __ cvtqsi2sd(xmm0, rbx);
1232  __ Ret();
1233 
1234  __ bind(&allocation_failed);
1235  // We need tagged values in rdx and rax for the following code,
1236  // not int32 in rax and rcx.
1237  __ Integer32ToSmi(rax, rcx);
1238  __ Integer32ToSmi(rdx, rbx);
1239  __ jmp(allocation_failure);
1240  }
1241  break;
1242  }
1243  default: UNREACHABLE(); break;
1244  }
1245  // No fall-through from this generated code.
1246  if (FLAG_debug_code) {
1247  __ Abort("Unexpected fall-through in "
1248  "BinaryStub::GenerateFloatingPointCode.");
1249  }
1250 }
1251 
1252 
1253 void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
1254  ASSERT(op_ == Token::ADD);
1255  Label left_not_string, call_runtime;
1256 
1257  // Registers containing left and right operands respectively.
1258  Register left = rdx;
1259  Register right = rax;
1260 
1261  // Test if left operand is a string.
1262  __ JumpIfSmi(left, &left_not_string, Label::kNear);
1263  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1264  __ j(above_equal, &left_not_string, Label::kNear);
1265  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1266  GenerateRegisterArgsPush(masm);
1267  __ TailCallStub(&string_add_left_stub);
1268 
1269  // Left operand is not a string, test right.
1270  __ bind(&left_not_string);
1271  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1272  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1273  __ j(above_equal, &call_runtime, Label::kNear);
1274 
1275  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1276  GenerateRegisterArgsPush(masm);
1277  __ TailCallStub(&string_add_right_stub);
1278 
1279  // Neither argument is a string.
1280  __ bind(&call_runtime);
1281 }
1282 
1283 
1284 void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
1285  GenerateRegisterArgsPush(masm);
1286  switch (op_) {
1287  case Token::ADD:
1288  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1289  break;
1290  case Token::SUB:
1291  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1292  break;
1293  case Token::MUL:
1294  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1295  break;
1296  case Token::DIV:
1297  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1298  break;
1299  case Token::MOD:
1300  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1301  break;
1302  case Token::BIT_OR:
1303  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1304  break;
1305  case Token::BIT_AND:
1306  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1307  break;
1308  case Token::BIT_XOR:
1309  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1310  break;
1311  case Token::SAR:
1312  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1313  break;
1314  case Token::SHL:
1315  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1316  break;
1317  case Token::SHR:
1318  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1319  break;
1320  default:
1321  UNREACHABLE();
1322  }
1323 }
1324 
1325 
1326 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1327  Label call_runtime;
1328  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1329  result_type_ == BinaryOpIC::SMI) {
1330  // Only allow smi results.
1331  GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
1332  } else {
1333  // Allow heap number result and don't make a transition if a heap number
1334  // cannot be allocated.
1335  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1336  }
1337 
1338  // Code falls through if the result is not returned as either a smi or heap
1339  // number.
1340  GenerateTypeTransition(masm);
1341 
1342  if (call_runtime.is_linked()) {
1343  __ bind(&call_runtime);
1344  GenerateCallRuntimeCode(masm);
1345  }
1346 }
1347 
1348 
1349 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1350  ASSERT(operands_type_ == BinaryOpIC::STRING);
1351  ASSERT(op_ == Token::ADD);
1352  GenerateStringAddCode(masm);
1353  // Try to add arguments as strings, otherwise, transition to the generic
1354  // BinaryOpIC type.
1355  GenerateTypeTransition(masm);
1356 }
1357 
1358 
1359 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1360  Label call_runtime;
1361  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1362  ASSERT(op_ == Token::ADD);
1363  // If both arguments are strings, call the string add stub.
1364  // Otherwise, do a transition.
1365 
1366  // Registers containing left and right operands respectively.
1367  Register left = rdx;
1368  Register right = rax;
1369 
1370  // Test if left operand is a string.
1371  __ JumpIfSmi(left, &call_runtime);
1372  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1373  __ j(above_equal, &call_runtime);
1374 
1375  // Test if right operand is a string.
1376  __ JumpIfSmi(right, &call_runtime);
1377  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1378  __ j(above_equal, &call_runtime);
1379 
1380  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1381  GenerateRegisterArgsPush(masm);
1382  __ TailCallStub(&string_add_stub);
1383 
1384  __ bind(&call_runtime);
1385  GenerateTypeTransition(masm);
1386 }
1387 
1388 
1389 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1390  Label call_runtime;
1391 
1392  if (op_ == Token::ADD) {
1393  // Handle string addition here, because it is the only operation
1394  // that does not do a ToNumber conversion on the operands.
1395  GenerateStringAddCode(masm);
1396  }
1397 
1398  // Convert oddball arguments to numbers.
1399  Label check, done;
1400  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1401  __ j(not_equal, &check, Label::kNear);
1402  if (Token::IsBitOp(op_)) {
1403  __ xor_(rdx, rdx);
1404  } else {
1405  __ LoadRoot(rdx, Heap::kNanValueRootIndex);
1406  }
1407  __ jmp(&done, Label::kNear);
1408  __ bind(&check);
1409  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1410  __ j(not_equal, &done, Label::kNear);
1411  if (Token::IsBitOp(op_)) {
1412  __ xor_(rax, rax);
1413  } else {
1414  __ LoadRoot(rax, Heap::kNanValueRootIndex);
1415  }
1416  __ bind(&done);
1417 
1418  GenerateHeapNumberStub(masm);
1419 }
1420 
1421 
1422 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1423  Label gc_required, not_number;
1424  GenerateFloatingPointCode(masm, &gc_required, &not_number);
1425 
1426  __ bind(&not_number);
1427  GenerateTypeTransition(masm);
1428 
1429  __ bind(&gc_required);
1430  GenerateCallRuntimeCode(masm);
1431 }
1432 
1433 
1434 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1435  Label call_runtime, call_string_add_or_runtime;
1436 
1437  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1438 
1439  GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
1440 
1441  __ bind(&call_string_add_or_runtime);
1442  if (op_ == Token::ADD) {
1443  GenerateStringAddCode(masm);
1444  }
1445 
1446  __ bind(&call_runtime);
1447  GenerateCallRuntimeCode(masm);
1448 }
1449 
1450 
1451 void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1452  Label* alloc_failure) {
1453  Label skip_allocation;
1454  OverwriteMode mode = mode_;
1455  switch (mode) {
1456  case OVERWRITE_LEFT: {
1457  // If the argument in rdx is already an object, we skip the
1458  // allocation of a heap number.
1459  __ JumpIfNotSmi(rdx, &skip_allocation);
1460  // Allocate a heap number for the result. Keep eax and edx intact
1461  // for the possible runtime call.
1462  __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1463  // Now rdx can be overwritten losing one of the arguments as we are
1464  // now done and will not need it any more.
1465  __ movq(rdx, rbx);
1466  __ bind(&skip_allocation);
1467  // Use object in rdx as a result holder
1468  __ movq(rax, rdx);
1469  break;
1470  }
1471  case OVERWRITE_RIGHT:
1472  // If the argument in rax is already an object, we skip the
1473  // allocation of a heap number.
1474  __ JumpIfNotSmi(rax, &skip_allocation);
1475  // Fall through!
1476  case NO_OVERWRITE:
1477  // Allocate a heap number for the result. Keep rax and rdx intact
1478  // for the possible runtime call.
1479  __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1480  // Now rax can be overwritten losing one of the arguments as we are
1481  // now done and will not need it any more.
1482  __ movq(rax, rbx);
1483  __ bind(&skip_allocation);
1484  break;
1485  default: UNREACHABLE();
1486  }
1487 }
1488 
1489 
1490 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1491  __ pop(rcx);
1492  __ push(rdx);
1493  __ push(rax);
1494  __ push(rcx);
1495 }
1496 
1497 
1498 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1499  // TAGGED case:
1500  // Input:
1501  // rsp[8]: argument (should be number).
1502  // rsp[0]: return address.
1503  // Output:
1504  // rax: tagged double result.
1505  // UNTAGGED case:
1506  // Input::
1507  // rsp[0]: return address.
1508  // xmm1: untagged double input argument
1509  // Output:
1510  // xmm1: untagged double result.
1511 
1512  Label runtime_call;
1513  Label runtime_call_clear_stack;
1514  Label skip_cache;
1515  const bool tagged = (argument_type_ == TAGGED);
1516  if (tagged) {
1517  Label input_not_smi, loaded;
1518  // Test that rax is a number.
1519  __ movq(rax, Operand(rsp, kPointerSize));
1520  __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
1521  // Input is a smi. Untag and load it onto the FPU stack.
1522  // Then load the bits of the double into rbx.
1523  __ SmiToInteger32(rax, rax);
1524  __ subq(rsp, Immediate(kDoubleSize));
1525  __ cvtlsi2sd(xmm1, rax);
1526  __ movsd(Operand(rsp, 0), xmm1);
1527  __ movq(rbx, xmm1);
1528  __ movq(rdx, xmm1);
1529  __ fld_d(Operand(rsp, 0));
1530  __ addq(rsp, Immediate(kDoubleSize));
1531  __ jmp(&loaded, Label::kNear);
1532 
1533  __ bind(&input_not_smi);
1534  // Check if input is a HeapNumber.
1535  __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
1537  __ j(not_equal, &runtime_call);
1538  // Input is a HeapNumber. Push it on the FPU stack and load its
1539  // bits into rbx.
1542  __ movq(rdx, rbx);
1543 
1544  __ bind(&loaded);
1545  } else { // UNTAGGED.
1546  __ movq(rbx, xmm1);
1547  __ movq(rdx, xmm1);
1548  }
1549 
1550  // ST[0] == double value, if TAGGED.
1551  // rbx = bits of double value.
1552  // rdx = also bits of double value.
1553  // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
1554  // h = h0 = bits ^ (bits >> 32);
1555  // h ^= h >> 16;
1556  // h ^= h >> 8;
1557  // h = h & (cacheSize - 1);
1558  // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
1559  __ sar(rdx, Immediate(32));
1560  __ xorl(rdx, rbx);
1561  __ movl(rcx, rdx);
1562  __ movl(rax, rdx);
1563  __ movl(rdi, rdx);
1564  __ sarl(rdx, Immediate(8));
1565  __ sarl(rcx, Immediate(16));
1566  __ sarl(rax, Immediate(24));
1567  __ xorl(rcx, rdx);
1568  __ xorl(rax, rdi);
1569  __ xorl(rcx, rax);
1570  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1571  __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
1572 
1573  // ST[0] == double value.
1574  // rbx = bits of double value.
1575  // rcx = TranscendentalCache::hash(double value).
1576  ExternalReference cache_array =
1577  ExternalReference::transcendental_cache_array_address(masm->isolate());
1578  __ movq(rax, cache_array);
1579  int cache_array_index =
1580  type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
1581  __ movq(rax, Operand(rax, cache_array_index));
1582  // rax points to the cache for the type type_.
1583  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1584  __ testq(rax, rax);
1585  __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
1586 #ifdef DEBUG
1587  // Check that the layout of cache elements match expectations.
1588  { // NOLINT - doesn't like a single brace on a line.
1589  TranscendentalCache::SubCache::Element test_elem[2];
1590  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1591  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1592  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1593  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1594  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1595  // Two uint_32's and a pointer per element.
1596  CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
1597  CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
1598  CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
1599  CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
1600  }
1601 #endif
1602  // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
1603  __ addl(rcx, rcx);
1604  __ lea(rcx, Operand(rax, rcx, times_8, 0));
1605  // Check if cache matches: Double value is stored in uint32_t[2] array.
1606  Label cache_miss;
1607  __ cmpq(rbx, Operand(rcx, 0));
1608  __ j(not_equal, &cache_miss, Label::kNear);
1609  // Cache hit!
1610  Counters* counters = masm->isolate()->counters();
1611  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
1612  __ movq(rax, Operand(rcx, 2 * kIntSize));
1613  if (tagged) {
1614  __ fstp(0); // Clear FPU stack.
1615  __ ret(kPointerSize);
1616  } else { // UNTAGGED.
1618  __ Ret();
1619  }
1620 
1621  __ bind(&cache_miss);
1622  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
1623  // Update cache with new value.
1624  if (tagged) {
1625  __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1626  } else { // UNTAGGED.
1627  __ AllocateHeapNumber(rax, rdi, &skip_cache);
1630  }
1631  GenerateOperation(masm, type_);
1632  __ movq(Operand(rcx, 0), rbx);
1633  __ movq(Operand(rcx, 2 * kIntSize), rax);
1635  if (tagged) {
1636  __ ret(kPointerSize);
1637  } else { // UNTAGGED.
1639  __ Ret();
1640 
1641  // Skip cache and return answer directly, only in untagged case.
1642  __ bind(&skip_cache);
1643  __ subq(rsp, Immediate(kDoubleSize));
1644  __ movsd(Operand(rsp, 0), xmm1);
1645  __ fld_d(Operand(rsp, 0));
1646  GenerateOperation(masm, type_);
1647  __ fstp_d(Operand(rsp, 0));
1648  __ movsd(xmm1, Operand(rsp, 0));
1649  __ addq(rsp, Immediate(kDoubleSize));
1650  // We return the value in xmm1 without adding it to the cache, but
1651  // we cause a scavenging GC so that future allocations will succeed.
1652  {
1653  FrameScope scope(masm, StackFrame::INTERNAL);
1654  // Allocate an unused object bigger than a HeapNumber.
1655  __ Push(Smi::FromInt(2 * kDoubleSize));
1656  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1657  }
1658  __ Ret();
1659  }
1660 
1661  // Call runtime, doing whatever allocation and cleanup is necessary.
1662  if (tagged) {
1663  __ bind(&runtime_call_clear_stack);
1664  __ fstp(0);
1665  __ bind(&runtime_call);
1666  __ TailCallExternalReference(
1667  ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1668  } else { // UNTAGGED.
1669  __ bind(&runtime_call_clear_stack);
1670  __ bind(&runtime_call);
1671  __ AllocateHeapNumber(rax, rdi, &skip_cache);
1673  {
1674  FrameScope scope(masm, StackFrame::INTERNAL);
1675  __ push(rax);
1676  __ CallRuntime(RuntimeFunction(), 1);
1677  }
1679  __ Ret();
1680  }
1681 }
1682 
1683 
1684 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1685  switch (type_) {
1686  // Add more cases when necessary.
1687  case TranscendentalCache::SIN: return Runtime::kMath_sin;
1688  case TranscendentalCache::COS: return Runtime::kMath_cos;
1689  case TranscendentalCache::TAN: return Runtime::kMath_tan;
1690  case TranscendentalCache::LOG: return Runtime::kMath_log;
1691  default:
1692  UNIMPLEMENTED();
1693  return Runtime::kAbort;
1694  }
1695 }
1696 
1697 
1699  MacroAssembler* masm, TranscendentalCache::Type type) {
1700  // Registers:
1701  // rax: Newly allocated HeapNumber, which must be preserved.
1702  // rbx: Bits of input double. Must be preserved.
1703  // rcx: Pointer to cache entry. Must be preserved.
1704  // st(0): Input double
1705  Label done;
1706  if (type == TranscendentalCache::SIN ||
1707  type == TranscendentalCache::COS ||
1708  type == TranscendentalCache::TAN) {
1709  // Both fsin and fcos require arguments in the range +/-2^63 and
1710  // return NaN for infinities and NaN. They can share all code except
1711  // the actual fsin/fcos operation.
1712  Label in_range;
1713  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1714  // work. We must reduce it to the appropriate range.
1715  __ movq(rdi, rbx);
1716  // Move exponent and sign bits to low bits.
1717  __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1718  // Remove sign bit.
1719  __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1720  int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1721  __ cmpl(rdi, Immediate(supported_exponent_limit));
1722  __ j(below, &in_range);
1723  // Check for infinity and NaN. Both return NaN for sin.
1724  __ cmpl(rdi, Immediate(0x7ff));
1725  Label non_nan_result;
1726  __ j(not_equal, &non_nan_result, Label::kNear);
1727  // Input is +/-Infinity or NaN. Result is NaN.
1728  __ fstp(0);
1729  // NaN is represented by 0x7ff8000000000000.
1730  __ subq(rsp, Immediate(kPointerSize));
1731  __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
1732  __ movl(Operand(rsp, 0), Immediate(0x00000000));
1733  __ fld_d(Operand(rsp, 0));
1734  __ addq(rsp, Immediate(kPointerSize));
1735  __ jmp(&done);
1736 
1737  __ bind(&non_nan_result);
1738 
1739  // Use fpmod to restrict argument to the range +/-2*PI.
1740  __ movq(rdi, rax); // Save rax before using fnstsw_ax.
1741  __ fldpi();
1742  __ fadd(0);
1743  __ fld(1);
1744  // FPU Stack: input, 2*pi, input.
1745  {
1746  Label no_exceptions;
1747  __ fwait();
1748  __ fnstsw_ax();
1749  // Clear if Illegal Operand or Zero Division exceptions are set.
1750  __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
1751  __ j(zero, &no_exceptions);
1752  __ fnclex();
1753  __ bind(&no_exceptions);
1754  }
1755 
1756  // Compute st(0) % st(1)
1757  {
1758  Label partial_remainder_loop;
1759  __ bind(&partial_remainder_loop);
1760  __ fprem1();
1761  __ fwait();
1762  __ fnstsw_ax();
1763  __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
1764  // If C2 is set, computation only has partial result. Loop to
1765  // continue computation.
1766  __ j(not_zero, &partial_remainder_loop);
1767  }
1768  // FPU Stack: input, 2*pi, input % 2*pi
1769  __ fstp(2);
1770  // FPU Stack: input % 2*pi, 2*pi,
1771  __ fstp(0);
1772  // FPU Stack: input % 2*pi
1773  __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
1774  __ bind(&in_range);
1775  switch (type) {
1777  __ fsin();
1778  break;
1780  __ fcos();
1781  break;
1783  // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
1784  // FP register stack.
1785  __ fptan();
1786  __ fstp(0); // Pop FP register stack.
1787  break;
1788  default:
1789  UNREACHABLE();
1790  }
1791  __ bind(&done);
1792  } else {
1794  __ fldln2();
1795  __ fxch();
1796  __ fyl2x();
1797  }
1798 }
1799 
1800 
1801 // Input: rdx, rax are the left and right objects of a bit op.
1802 // Output: rax, rcx are left and right integers for a bit op.
1803 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1804  // Check float operands.
1805  Label done;
1806  Label rax_is_smi;
1807  Label rax_is_object;
1808  Label rdx_is_object;
1809 
1810  __ JumpIfNotSmi(rdx, &rdx_is_object);
1811  __ SmiToInteger32(rdx, rdx);
1812  __ JumpIfSmi(rax, &rax_is_smi);
1813 
1814  __ bind(&rax_is_object);
1815  IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
1816  __ jmp(&done);
1817 
1818  __ bind(&rdx_is_object);
1819  IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
1820  __ JumpIfNotSmi(rax, &rax_is_object);
1821  __ bind(&rax_is_smi);
1822  __ SmiToInteger32(rcx, rax);
1823 
1824  __ bind(&done);
1825  __ movl(rax, rdx);
1826 }
1827 
1828 
1829 // Input: rdx, rax are the left and right objects of a bit op.
1830 // Output: rax, rcx are left and right integers for a bit op.
1831 // Jump to conversion_failure: rdx and rax are unchanged.
1832 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1833  Label* conversion_failure,
1834  Register heap_number_map) {
1835  // Check float operands.
1836  Label arg1_is_object, check_undefined_arg1;
1837  Label arg2_is_object, check_undefined_arg2;
1838  Label load_arg2, done;
1839 
1840  __ JumpIfNotSmi(rdx, &arg1_is_object);
1841  __ SmiToInteger32(r8, rdx);
1842  __ jmp(&load_arg2);
1843 
1844  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1845  __ bind(&check_undefined_arg1);
1846  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1847  __ j(not_equal, conversion_failure);
1848  __ Set(r8, 0);
1849  __ jmp(&load_arg2);
1850 
1851  __ bind(&arg1_is_object);
1852  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1853  __ j(not_equal, &check_undefined_arg1);
1854  // Get the untagged integer version of the rdx heap number in rcx.
1855  IntegerConvert(masm, r8, rdx);
1856 
1857  // Here r8 has the untagged integer, rax has a Smi or a heap number.
1858  __ bind(&load_arg2);
1859  // Test if arg2 is a Smi.
1860  __ JumpIfNotSmi(rax, &arg2_is_object);
1861  __ SmiToInteger32(rcx, rax);
1862  __ jmp(&done);
1863 
1864  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1865  __ bind(&check_undefined_arg2);
1866  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1867  __ j(not_equal, conversion_failure);
1868  __ Set(rcx, 0);
1869  __ jmp(&done);
1870 
1871  __ bind(&arg2_is_object);
1872  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1873  __ j(not_equal, &check_undefined_arg2);
1874  // Get the untagged integer version of the rax heap number in rcx.
1875  IntegerConvert(masm, rcx, rax);
1876  __ bind(&done);
1877  __ movl(rax, r8);
1878 }
1879 
1880 
1881 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1882  __ SmiToInteger32(kScratchRegister, rdx);
1883  __ cvtlsi2sd(xmm0, kScratchRegister);
1884  __ SmiToInteger32(kScratchRegister, rax);
1885  __ cvtlsi2sd(xmm1, kScratchRegister);
1886 }
1887 
1888 
1889 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1890  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1891  // Load operand in rdx into xmm0.
1892  __ JumpIfSmi(rdx, &load_smi_rdx);
1894  // Load operand in rax into xmm1.
1895  __ JumpIfSmi(rax, &load_smi_rax);
1896  __ bind(&load_nonsmi_rax);
1898  __ jmp(&done);
1899 
1900  __ bind(&load_smi_rdx);
1901  __ SmiToInteger32(kScratchRegister, rdx);
1902  __ cvtlsi2sd(xmm0, kScratchRegister);
1903  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1904 
1905  __ bind(&load_smi_rax);
1906  __ SmiToInteger32(kScratchRegister, rax);
1907  __ cvtlsi2sd(xmm1, kScratchRegister);
1908 
1909  __ bind(&done);
1910 }
1911 
1912 
1913 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1914  Label* not_numbers) {
1915  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1916  // Load operand in rdx into xmm0, or branch to not_numbers.
1917  __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1918  __ JumpIfSmi(rdx, &load_smi_rdx);
1920  __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1922  // Load operand in rax into xmm1, or branch to not_numbers.
1923  __ JumpIfSmi(rax, &load_smi_rax);
1924 
1925  __ bind(&load_nonsmi_rax);
1927  __ j(not_equal, not_numbers);
1929  __ jmp(&done);
1930 
1931  __ bind(&load_smi_rdx);
1932  __ SmiToInteger32(kScratchRegister, rdx);
1933  __ cvtlsi2sd(xmm0, kScratchRegister);
1934  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1935 
1936  __ bind(&load_smi_rax);
1937  __ SmiToInteger32(kScratchRegister, rax);
1938  __ cvtlsi2sd(xmm1, kScratchRegister);
1939  __ bind(&done);
1940 }
1941 
1942 
1943 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1944  Register first,
1945  Register second,
1946  Register scratch1,
1947  Register scratch2,
1948  Register scratch3,
1949  Label* on_success,
1950  Label* on_not_smis) {
1951  Register heap_number_map = scratch3;
1952  Register smi_result = scratch1;
1953  Label done;
1954 
1955  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1956 
1957  Label first_smi;
1958  __ JumpIfSmi(first, &first_smi, Label::kNear);
1959  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1960  __ j(not_equal, on_not_smis);
1961  // Convert HeapNumber to smi if possible.
1962  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1963  __ movq(scratch2, xmm0);
1964  __ cvttsd2siq(smi_result, xmm0);
1965  // Check if conversion was successful by converting back and
1966  // comparing to the original double's bits.
1967  __ cvtlsi2sd(xmm1, smi_result);
1968  __ movq(kScratchRegister, xmm1);
1969  __ cmpq(scratch2, kScratchRegister);
1970  __ j(not_equal, on_not_smis);
1971  __ Integer32ToSmi(first, smi_result);
1972 
1973  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1974  __ bind(&first_smi);
1975  if (FLAG_debug_code) {
1976  // Second should be non-smi if we get here.
1977  __ AbortIfSmi(second);
1978  }
1979  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1980  __ j(not_equal, on_not_smis);
1981  // Convert second to smi, if possible.
1982  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1983  __ movq(scratch2, xmm0);
1984  __ cvttsd2siq(smi_result, xmm0);
1985  __ cvtlsi2sd(xmm1, smi_result);
1986  __ movq(kScratchRegister, xmm1);
1987  __ cmpq(scratch2, kScratchRegister);
1988  __ j(not_equal, on_not_smis);
1989  __ Integer32ToSmi(second, smi_result);
1990  if (on_success != NULL) {
1991  __ jmp(on_success);
1992  } else {
1993  __ bind(&done);
1994  }
1995 }
1996 
1997 
1998 void MathPowStub::Generate(MacroAssembler* masm) {
1999  // Choose register conforming to calling convention (when bailing out).
2000 #ifdef _WIN64
2001  const Register exponent = rdx;
2002 #else
2003  const Register exponent = rdi;
2004 #endif
2005  const Register base = rax;
2006  const Register scratch = rcx;
2007  const XMMRegister double_result = xmm3;
2008  const XMMRegister double_base = xmm2;
2009  const XMMRegister double_exponent = xmm1;
2010  const XMMRegister double_scratch = xmm4;
2011 
2012  Label call_runtime, done, exponent_not_smi, int_exponent;
2013 
2014  // Save 1 in double_result - we need this several times later on.
2015  __ movq(scratch, Immediate(1));
2016  __ cvtlsi2sd(double_result, scratch);
2017 
2018  if (exponent_type_ == ON_STACK) {
2019  Label base_is_smi, unpack_exponent;
2020  // The exponent and base are supplied as arguments on the stack.
2021  // This can only happen if the stub is called from non-optimized code.
2022  // Load input parameters from stack.
2023  __ movq(base, Operand(rsp, 2 * kPointerSize));
2024  __ movq(exponent, Operand(rsp, 1 * kPointerSize));
2025  __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2026  __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
2027  Heap::kHeapNumberMapRootIndex);
2028  __ j(not_equal, &call_runtime);
2029 
2030  __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2031  __ jmp(&unpack_exponent, Label::kNear);
2032 
2033  __ bind(&base_is_smi);
2034  __ SmiToInteger32(base, base);
2035  __ cvtlsi2sd(double_base, base);
2036  __ bind(&unpack_exponent);
2037 
2038  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2039  __ SmiToInteger32(exponent, exponent);
2040  __ jmp(&int_exponent);
2041 
2042  __ bind(&exponent_not_smi);
2043  __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
2044  Heap::kHeapNumberMapRootIndex);
2045  __ j(not_equal, &call_runtime);
2046  __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2047  } else if (exponent_type_ == TAGGED) {
2048  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2049  __ SmiToInteger32(exponent, exponent);
2050  __ jmp(&int_exponent);
2051 
2052  __ bind(&exponent_not_smi);
2053  __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2054  }
2055 
2056  if (exponent_type_ != INTEGER) {
2057  Label fast_power;
2058  // Detect integer exponents stored as double.
2059  __ cvttsd2si(exponent, double_exponent);
2060  // Skip to runtime if possibly NaN (indicated by the indefinite integer).
2061  __ cmpl(exponent, Immediate(0x80000000u));
2062  __ j(equal, &call_runtime);
2063  __ cvtlsi2sd(double_scratch, exponent);
2064  // Already ruled out NaNs for exponent.
2065  __ ucomisd(double_exponent, double_scratch);
2066  __ j(equal, &int_exponent);
2067 
2068  if (exponent_type_ == ON_STACK) {
2069  // Detect square root case. Crankshaft detects constant +/-0.5 at
2070  // compile time and uses DoMathPowHalf instead. We then skip this check
2071  // for non-constant cases of +/-0.5 as these hardly occur.
2072  Label continue_sqrt, continue_rsqrt, not_plus_half;
2073  // Test for 0.5.
2074  // Load double_scratch with 0.5.
2075  __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
2076  __ movq(double_scratch, scratch);
2077  // Already ruled out NaNs for exponent.
2078  __ ucomisd(double_scratch, double_exponent);
2079  __ j(not_equal, &not_plus_half, Label::kNear);
2080 
2081  // Calculates square root of base. Check for the special case of
2082  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
2083  // According to IEEE-754, double-precision -Infinity has the highest
2084  // 12 bits set and the lowest 52 bits cleared.
2085  __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2086  __ movq(double_scratch, scratch);
2087  __ ucomisd(double_scratch, double_base);
2088  // Comparing -Infinity with NaN results in "unordered", which sets the
2089  // zero flag as if both were equal. However, it also sets the carry flag.
2090  __ j(not_equal, &continue_sqrt, Label::kNear);
2091  __ j(carry, &continue_sqrt, Label::kNear);
2092 
2093  // Set result to Infinity in the special case.
2094  __ xorps(double_result, double_result);
2095  __ subsd(double_result, double_scratch);
2096  __ jmp(&done);
2097 
2098  __ bind(&continue_sqrt);
2099  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2100  __ xorps(double_scratch, double_scratch);
2101  __ addsd(double_scratch, double_base); // Convert -0 to 0.
2102  __ sqrtsd(double_result, double_scratch);
2103  __ jmp(&done);
2104 
2105  // Test for -0.5.
2106  __ bind(&not_plus_half);
2107  // Load double_scratch with -0.5 by substracting 1.
2108  __ subsd(double_scratch, double_result);
2109  // Already ruled out NaNs for exponent.
2110  __ ucomisd(double_scratch, double_exponent);
2111  __ j(not_equal, &fast_power, Label::kNear);
2112 
2113  // Calculates reciprocal of square root of base. Check for the special
2114  // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
2115  // According to IEEE-754, double-precision -Infinity has the highest
2116  // 12 bits set and the lowest 52 bits cleared.
2117  __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2118  __ movq(double_scratch, scratch);
2119  __ ucomisd(double_scratch, double_base);
2120  // Comparing -Infinity with NaN results in "unordered", which sets the
2121  // zero flag as if both were equal. However, it also sets the carry flag.
2122  __ j(not_equal, &continue_rsqrt, Label::kNear);
2123  __ j(carry, &continue_rsqrt, Label::kNear);
2124 
2125  // Set result to 0 in the special case.
2126  __ xorps(double_result, double_result);
2127  __ jmp(&done);
2128 
2129  __ bind(&continue_rsqrt);
2130  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2131  __ xorps(double_exponent, double_exponent);
2132  __ addsd(double_exponent, double_base); // Convert -0 to +0.
2133  __ sqrtsd(double_exponent, double_exponent);
2134  __ divsd(double_result, double_exponent);
2135  __ jmp(&done);
2136  }
2137 
2138  // Using FPU instructions to calculate power.
2139  Label fast_power_failed;
2140  __ bind(&fast_power);
2141  __ fnclex(); // Clear flags to catch exceptions later.
2142  // Transfer (B)ase and (E)xponent onto the FPU register stack.
2143  __ subq(rsp, Immediate(kDoubleSize));
2144  __ movsd(Operand(rsp, 0), double_exponent);
2145  __ fld_d(Operand(rsp, 0)); // E
2146  __ movsd(Operand(rsp, 0), double_base);
2147  __ fld_d(Operand(rsp, 0)); // B, E
2148 
2149  // Exponent is in st(1) and base is in st(0)
2150  // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
2151  // FYL2X calculates st(1) * log2(st(0))
2152  __ fyl2x(); // X
2153  __ fld(0); // X, X
2154  __ frndint(); // rnd(X), X
2155  __ fsub(1); // rnd(X), X-rnd(X)
2156  __ fxch(1); // X - rnd(X), rnd(X)
2157  // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
2158  __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
2159  __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
2160  __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
2161  // FSCALE calculates st(0) * 2^st(1)
2162  __ fscale(); // 2^X, rnd(X)
2163  __ fstp(1);
2164  // Bail out to runtime in case of exceptions in the status word.
2165  __ fnstsw_ax();
2166  __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
2167  __ j(not_zero, &fast_power_failed, Label::kNear);
2168  __ fstp_d(Operand(rsp, 0));
2169  __ movsd(double_result, Operand(rsp, 0));
2170  __ addq(rsp, Immediate(kDoubleSize));
2171  __ jmp(&done);
2172 
2173  __ bind(&fast_power_failed);
2174  __ fninit();
2175  __ addq(rsp, Immediate(kDoubleSize));
2176  __ jmp(&call_runtime);
2177  }
2178 
2179  // Calculate power with integer exponent.
2180  __ bind(&int_exponent);
2181  const XMMRegister double_scratch2 = double_exponent;
2182  // Back up exponent as we need to check if exponent is negative later.
2183  __ movq(scratch, exponent); // Back up exponent.
2184  __ movsd(double_scratch, double_base); // Back up base.
2185  __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
2186 
2187  // Get absolute value of exponent.
2188  Label no_neg, while_true, no_multiply;
2189  __ testl(scratch, scratch);
2190  __ j(positive, &no_neg, Label::kNear);
2191  __ negl(scratch);
2192  __ bind(&no_neg);
2193 
2194  __ bind(&while_true);
2195  __ shrl(scratch, Immediate(1));
2196  __ j(not_carry, &no_multiply, Label::kNear);
2197  __ mulsd(double_result, double_scratch);
2198  __ bind(&no_multiply);
2199 
2200  __ mulsd(double_scratch, double_scratch);
2201  __ j(not_zero, &while_true);
2202 
2203  // If the exponent is negative, return 1/result.
2204  __ testl(exponent, exponent);
2205  __ j(greater, &done);
2206  __ divsd(double_scratch2, double_result);
2207  __ movsd(double_result, double_scratch2);
2208  // Test whether result is zero. Bail out to check for subnormal result.
2209  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2210  __ xorps(double_scratch2, double_scratch2);
2211  __ ucomisd(double_scratch2, double_result);
2212  // double_exponent aliased as double_scratch2 has already been overwritten
2213  // and may not have contained the exponent value in the first place when the
2214  // input was a smi. We reset it with exponent value before bailing out.
2215  __ j(not_equal, &done);
2216  __ cvtlsi2sd(double_exponent, exponent);
2217 
2218  // Returning or bailing out.
2219  Counters* counters = masm->isolate()->counters();
2220  if (exponent_type_ == ON_STACK) {
2221  // The arguments are still on the stack.
2222  __ bind(&call_runtime);
2223  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2224 
2225  // The stub is called from non-optimized code, which expects the result
2226  // as heap number in eax.
2227  __ bind(&done);
2228  __ AllocateHeapNumber(rax, rcx, &call_runtime);
2229  __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
2230  __ IncrementCounter(counters->math_pow(), 1);
2231  __ ret(2 * kPointerSize);
2232  } else {
2233  __ bind(&call_runtime);
2234  // Move base to the correct argument register. Exponent is already in xmm1.
2235  __ movsd(xmm0, double_base);
2236  ASSERT(double_exponent.is(xmm1));
2237  {
2238  AllowExternalCallThatCantCauseGC scope(masm);
2239  __ PrepareCallCFunction(2);
2240  __ CallCFunction(
2241  ExternalReference::power_double_double_function(masm->isolate()), 2);
2242  }
2243  // Return value is in xmm0.
2244  __ movsd(double_result, xmm0);
2245  // Restore context register.
2247 
2248  __ bind(&done);
2249  __ IncrementCounter(counters->math_pow(), 1);
2250  __ ret(0);
2251  }
2252 }
2253 
2254 
2255 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2256  // The key is in rdx and the parameter count is in rax.
2257 
2258  // The displacement is used for skipping the frame pointer on the
2259  // stack. It is the offset of the last parameter (if any) relative
2260  // to the frame pointer.
2261  static const int kDisplacement = 1 * kPointerSize;
2262 
2263  // Check that the key is a smi.
2264  Label slow;
2265  __ JumpIfNotSmi(rdx, &slow);
2266 
2267  // Check if the calling frame is an arguments adaptor frame. We look at the
2268  // context offset, and if the frame is not a regular one, then we find a
2269  // Smi instead of the context. We can't use SmiCompare here, because that
2270  // only works for comparing two smis.
2271  Label adaptor;
2275  __ j(equal, &adaptor);
2276 
2277  // Check index against formal parameters count limit passed in
2278  // through register rax. Use unsigned comparison to get negative
2279  // check for free.
2280  __ cmpq(rdx, rax);
2281  __ j(above_equal, &slow);
2282 
2283  // Read the argument from the stack and return it.
2284  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
2285  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
2286  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2287  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2288  __ Ret();
2289 
2290  // Arguments adaptor case: Check index against actual arguments
2291  // limit found in the arguments adaptor frame. Use unsigned
2292  // comparison to get negative check for free.
2293  __ bind(&adaptor);
2295  __ cmpq(rdx, rcx);
2296  __ j(above_equal, &slow);
2297 
2298  // Read the argument from the stack and return it.
2299  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
2300  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
2301  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2302  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2303  __ Ret();
2304 
2305  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2306  // by calling the runtime system.
2307  __ bind(&slow);
2308  __ pop(rbx); // Return address.
2309  __ push(rdx);
2310  __ push(rbx);
2311  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2312 }
2313 
2314 
2315 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2316  // Stack layout:
2317  // rsp[0] : return address
2318  // rsp[8] : number of parameters (tagged)
2319  // rsp[16] : receiver displacement
2320  // rsp[24] : function
2321  // Registers used over the whole function:
2322  // rbx: the mapped parameter count (untagged)
2323  // rax: the allocated object (tagged).
2324 
2325  Factory* factory = masm->isolate()->factory();
2326 
2327  __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
2328  // rbx = parameter count (untagged)
2329 
2330  // Check if the calling frame is an arguments adaptor frame.
2331  Label runtime;
2332  Label adaptor_frame, try_allocate;
2336  __ j(equal, &adaptor_frame);
2337 
2338  // No adaptor, parameter count = argument count.
2339  __ movq(rcx, rbx);
2340  __ jmp(&try_allocate, Label::kNear);
2341 
2342  // We have an adaptor frame. Patch the parameters pointer.
2343  __ bind(&adaptor_frame);
2344  __ SmiToInteger64(rcx,
2345  Operand(rdx,
2347  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2349  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2350 
2351  // rbx = parameter count (untagged)
2352  // rcx = argument count (untagged)
2353  // Compute the mapped parameter count = min(rbx, rcx) in rbx.
2354  __ cmpq(rbx, rcx);
2355  __ j(less_equal, &try_allocate, Label::kNear);
2356  __ movq(rbx, rcx);
2357 
2358  __ bind(&try_allocate);
2359 
2360  // Compute the sizes of backing store, parameter map, and arguments object.
2361  // 1. Parameter map, has 2 extra words containing context and backing store.
2362  const int kParameterMapHeaderSize =
2364  Label no_parameter_map;
2365  __ xor_(r8, r8);
2366  __ testq(rbx, rbx);
2367  __ j(zero, &no_parameter_map, Label::kNear);
2368  __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
2369  __ bind(&no_parameter_map);
2370 
2371  // 2. Backing store.
2373 
2374  // 3. Arguments object.
2375  __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
2376 
2377  // Do the allocation of all three objects in one go.
2378  __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
2379 
2380  // rax = address of new object(s) (tagged)
2381  // rcx = argument count (untagged)
2382  // Get the arguments boilerplate from the current (global) context into rdi.
2383  Label has_mapped_parameters, copy;
2386  __ testq(rbx, rbx);
2387  __ j(not_zero, &has_mapped_parameters, Label::kNear);
2388 
2389  const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
2390  __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
2391  __ jmp(&copy, Label::kNear);
2392 
2393  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
2394  __ bind(&has_mapped_parameters);
2395  __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
2396  __ bind(&copy);
2397 
2398  // rax = address of new object (tagged)
2399  // rbx = mapped parameter count (untagged)
2400  // rcx = argument count (untagged)
2401  // rdi = address of boilerplate object (tagged)
2402  // Copy the JS object part.
2403  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2404  __ movq(rdx, FieldOperand(rdi, i));
2405  __ movq(FieldOperand(rax, i), rdx);
2406  }
2407 
2408  // Set up the callee in-object property.
2410  __ movq(rdx, Operand(rsp, 3 * kPointerSize));
2411  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2412  Heap::kArgumentsCalleeIndex * kPointerSize),
2413  rdx);
2414 
2415  // Use the length (smi tagged) and set that as an in-object property too.
2416  // Note: rcx is tagged from here on.
2418  __ Integer32ToSmi(rcx, rcx);
2419  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2420  Heap::kArgumentsLengthIndex * kPointerSize),
2421  rcx);
2422 
2423  // Set up the elements pointer in the allocated arguments object.
2424  // If we allocated a parameter map, edi will point there, otherwise to the
2425  // backing store.
2426  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
2428 
2429  // rax = address of new object (tagged)
2430  // rbx = mapped parameter count (untagged)
2431  // rcx = argument count (tagged)
2432  // rdi = address of parameter map or backing store (tagged)
2433 
2434  // Initialize parameter map. If there are no mapped arguments, we're done.
2435  Label skip_parameter_map;
2436  __ testq(rbx, rbx);
2437  __ j(zero, &skip_parameter_map);
2438 
2439  __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
2440  // rbx contains the untagged argument count. Add 2 and tag to write.
2442  __ Integer64PlusConstantToSmi(r9, rbx, 2);
2444  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
2445  __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2446  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
2447 
2448  // Copy the parameter slots and the holes in the arguments.
2449  // We need to fill in mapped_parameter_count slots. They index the context,
2450  // where parameters are stored in reverse order, at
2451  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2452  // The mapped parameter thus need to get indices
2453  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2454  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2455  // We loop from right to left.
2456  Label parameters_loop, parameters_test;
2457 
2458  // Load tagged parameter count into r9.
2459  __ Integer32ToSmi(r9, rbx);
2460  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
2461  __ addq(r8, Operand(rsp, 1 * kPointerSize));
2462  __ subq(r8, r9);
2463  __ Move(r11, factory->the_hole_value());
2464  __ movq(rdx, rdi);
2465  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2466  // r9 = loop variable (tagged)
2467  // r8 = mapping index (tagged)
2468  // r11 = the hole value
2469  // rdx = address of parameter map (tagged)
2470  // rdi = address of backing store (tagged)
2471  __ jmp(&parameters_test, Label::kNear);
2472 
2473  __ bind(&parameters_loop);
2474  __ SmiSubConstant(r9, r9, Smi::FromInt(1));
2475  __ SmiToInteger64(kScratchRegister, r9);
2478  kParameterMapHeaderSize),
2479  r8);
2483  r11);
2484  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
2485  __ bind(&parameters_test);
2486  __ SmiTest(r9);
2487  __ j(not_zero, &parameters_loop, Label::kNear);
2488 
2489  __ bind(&skip_parameter_map);
2490 
2491  // rcx = argument count (tagged)
2492  // rdi = address of backing store (tagged)
2493  // Copy arguments header and remaining slots (if there are any).
2495  factory->fixed_array_map());
2497 
2498  Label arguments_loop, arguments_test;
2499  __ movq(r8, rbx);
2500  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2501  // Untag rcx for the loop below.
2502  __ SmiToInteger64(rcx, rcx);
2503  __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
2504  __ subq(rdx, kScratchRegister);
2505  __ jmp(&arguments_test, Label::kNear);
2506 
2507  __ bind(&arguments_loop);
2508  __ subq(rdx, Immediate(kPointerSize));
2509  __ movq(r9, Operand(rdx, 0));
2510  __ movq(FieldOperand(rdi, r8,
2513  r9);
2514  __ addq(r8, Immediate(1));
2515 
2516  __ bind(&arguments_test);
2517  __ cmpq(r8, rcx);
2518  __ j(less, &arguments_loop, Label::kNear);
2519 
2520  // Return and remove the on-stack parameters.
2521  __ ret(3 * kPointerSize);
2522 
2523  // Do the runtime call to allocate the arguments object.
2524  // rcx = argument count (untagged)
2525  __ bind(&runtime);
2526  __ Integer32ToSmi(rcx, rcx);
2527  __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
2528  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2529 }
2530 
2531 
2532 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2533  // esp[0] : return address
2534  // esp[8] : number of parameters
2535  // esp[16] : receiver displacement
2536  // esp[24] : function
2537 
2538  // Check if the calling frame is an arguments adaptor frame.
2539  Label runtime;
2543  __ j(not_equal, &runtime);
2544 
2545  // Patch the arguments.length and the parameters pointer.
2547  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2548  __ SmiToInteger64(rcx, rcx);
2549  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2551  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2552 
2553  __ bind(&runtime);
2554  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2555 }
2556 
2557 
2558 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2559  // rsp[0] : return address
2560  // rsp[8] : number of parameters
2561  // rsp[16] : receiver displacement
2562  // rsp[24] : function
2563 
2564  // Check if the calling frame is an arguments adaptor frame.
2565  Label adaptor_frame, try_allocate, runtime;
2569  __ j(equal, &adaptor_frame);
2570 
2571  // Get the length from the frame.
2572  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2573  __ SmiToInteger64(rcx, rcx);
2574  __ jmp(&try_allocate);
2575 
2576  // Patch the arguments.length and the parameters pointer.
2577  __ bind(&adaptor_frame);
2579  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2580  __ SmiToInteger64(rcx, rcx);
2581  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2583  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2584 
2585  // Try the new space allocation. Start out with computing the size of
2586  // the arguments object and the elements array.
2587  Label add_arguments_object;
2588  __ bind(&try_allocate);
2589  __ testq(rcx, rcx);
2590  __ j(zero, &add_arguments_object, Label::kNear);
2592  __ bind(&add_arguments_object);
2593  __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
2594 
2595  // Do the allocation of both objects in one go.
2596  __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
2597 
2598  // Get the arguments boilerplate from the current (global) context.
2601  const int offset =
2603  __ movq(rdi, Operand(rdi, offset));
2604 
2605  // Copy the JS object part.
2606  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2607  __ movq(rbx, FieldOperand(rdi, i));
2608  __ movq(FieldOperand(rax, i), rbx);
2609  }
2610 
2611  // Get the length (smi tagged) and set that as an in-object property too.
2613  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2614  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2615  Heap::kArgumentsLengthIndex * kPointerSize),
2616  rcx);
2617 
2618  // If there are no actual arguments, we're done.
2619  Label done;
2620  __ testq(rcx, rcx);
2621  __ j(zero, &done);
2622 
2623  // Get the parameters pointer from the stack.
2624  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2625 
2626  // Set up the elements pointer in the allocated arguments object and
2627  // initialize the header in the elements fixed array.
2628  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
2630  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
2632 
2633 
2635  // Untag the length for the loop below.
2636  __ SmiToInteger64(rcx, rcx);
2637 
2638  // Copy the fixed array slots.
2639  Label loop;
2640  __ bind(&loop);
2641  __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
2643  __ addq(rdi, Immediate(kPointerSize));
2644  __ subq(rdx, Immediate(kPointerSize));
2645  __ decq(rcx);
2646  __ j(not_zero, &loop);
2647 
2648  // Return and remove the on-stack parameters.
2649  __ bind(&done);
2650  __ ret(3 * kPointerSize);
2651 
2652  // Do the runtime call to allocate the arguments object.
2653  __ bind(&runtime);
2654  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2655 }
2656 
2657 
2658 void RegExpExecStub::Generate(MacroAssembler* masm) {
2659  // Just jump directly to runtime if native RegExp is not selected at compile
2660  // time or if regexp entry in generated code is turned off runtime switch or
2661  // at compilation.
2662 #ifdef V8_INTERPRETED_REGEXP
2663  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2664 #else // V8_INTERPRETED_REGEXP
2665 
2666  // Stack frame on entry.
2667  // rsp[0]: return address
2668  // rsp[8]: last_match_info (expected JSArray)
2669  // rsp[16]: previous index
2670  // rsp[24]: subject string
2671  // rsp[32]: JSRegExp object
2672 
2673  static const int kLastMatchInfoOffset = 1 * kPointerSize;
2674  static const int kPreviousIndexOffset = 2 * kPointerSize;
2675  static const int kSubjectOffset = 3 * kPointerSize;
2676  static const int kJSRegExpOffset = 4 * kPointerSize;
2677 
2678  Label runtime;
2679  // Ensure that a RegExp stack is allocated.
2680  Isolate* isolate = masm->isolate();
2681  ExternalReference address_of_regexp_stack_memory_address =
2682  ExternalReference::address_of_regexp_stack_memory_address(isolate);
2683  ExternalReference address_of_regexp_stack_memory_size =
2684  ExternalReference::address_of_regexp_stack_memory_size(isolate);
2685  __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
2687  __ j(zero, &runtime);
2688 
2689  // Check that the first argument is a JSRegExp object.
2690  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2691  __ JumpIfSmi(rax, &runtime);
2692  __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
2693  __ j(not_equal, &runtime);
2694  // Check that the RegExp has been compiled (data contains a fixed array).
2696  if (FLAG_debug_code) {
2697  Condition is_smi = masm->CheckSmi(rax);
2698  __ Check(NegateCondition(is_smi),
2699  "Unexpected type for RegExp data, FixedArray expected");
2700  __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
2701  __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
2702  }
2703 
2704  // rax: RegExp data (FixedArray)
2705  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2706  __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
2707  __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
2708  __ j(not_equal, &runtime);
2709 
2710  // rax: RegExp data (FixedArray)
2711  // Check that the number of captures fit in the static offsets vector buffer.
2712  __ SmiToInteger32(rdx,
2714  // Calculate number of capture registers (number_of_captures + 1) * 2.
2715  __ leal(rdx, Operand(rdx, rdx, times_1, 2));
2716  // Check that the static offsets vector buffer is large enough.
2718  __ j(above, &runtime);
2719 
2720  // rax: RegExp data (FixedArray)
2721  // rdx: Number of capture registers
2722  // Check that the second argument is a string.
2723  __ movq(rdi, Operand(rsp, kSubjectOffset));
2724  __ JumpIfSmi(rdi, &runtime);
2725  Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
2726  __ j(NegateCondition(is_string), &runtime);
2727 
2728  // rdi: Subject string.
2729  // rax: RegExp data (FixedArray).
2730  // rdx: Number of capture registers.
2731  // Check that the third argument is a positive smi less than the string
2732  // length. A negative value will be greater (unsigned comparison).
2733  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
2734  __ JumpIfNotSmi(rbx, &runtime);
2735  __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
2736  __ j(above_equal, &runtime);
2737 
2738  // rax: RegExp data (FixedArray)
2739  // rdx: Number of capture registers
2740  // Check that the fourth object is a JSArray object.
2741  __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
2742  __ JumpIfSmi(rdi, &runtime);
2743  __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
2744  __ j(not_equal, &runtime);
2745  // Check that the JSArray is in fast case.
2748  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
2749  Heap::kFixedArrayMapRootIndex);
2750  __ j(not_equal, &runtime);
2751  // Check that the last match info has space for the capture registers and the
2752  // additional information. Ensure no overflow in add.
2754  __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
2755  __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
2756  __ cmpl(rdx, rdi);
2757  __ j(greater, &runtime);
2758 
2759  // Reset offset for possibly sliced string.
2760  __ Set(r14, 0);
2761  // rax: RegExp data (FixedArray)
2762  // Check the representation and encoding of the subject string.
2763  Label seq_ascii_string, seq_two_byte_string, check_code;
2764  __ movq(rdi, Operand(rsp, kSubjectOffset));
2765  // Make a copy of the original subject string.
2766  __ movq(r15, rdi);
2769  // First check for flat two byte string.
2770  __ andb(rbx, Immediate(kIsNotStringMask |
2775  __ j(zero, &seq_two_byte_string, Label::kNear);
2776  // Any other flat string must be a flat ASCII string. None of the following
2777  // string type tests will succeed if subject is not a string or a short
2778  // external string.
2779  __ andb(rbx, Immediate(kIsNotStringMask |
2782  __ j(zero, &seq_ascii_string, Label::kNear);
2783 
2784  // rbx: whether subject is a string and if yes, its string representation
2785  // Check for flat cons string or sliced string.
2786  // A flat cons string is a cons string where the second part is the empty
2787  // string. In that case the subject string is just the first part of the cons
2788  // string. Also in this case the first part of the cons string is known to be
2789  // a sequential string or an external string.
2790  // In the case of a sliced string its offset has to be taken into account.
2791  Label cons_string, external_string, check_encoding;
2796  __ cmpq(rbx, Immediate(kExternalStringTag));
2797  __ j(less, &cons_string, Label::kNear);
2798  __ j(equal, &external_string);
2799 
2800  // Catch non-string subject or short external string.
2802  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
2803  __ j(not_zero, &runtime);
2804 
2805  // String is sliced.
2806  __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
2808  // r14: slice offset
2809  // r15: original subject string
2810  // rdi: parent string
2811  __ jmp(&check_encoding, Label::kNear);
2812  // String is a cons string, check whether it is flat.
2813  __ bind(&cons_string);
2815  Heap::kEmptyStringRootIndex);
2816  __ j(not_equal, &runtime);
2818  // rdi: first part of cons string or parent of sliced string.
2819  // rbx: map of first part of cons string or map of parent of sliced string.
2820  // Is first part of cons or parent of slice a flat two byte string?
2821  __ bind(&check_encoding);
2826  __ j(zero, &seq_two_byte_string, Label::kNear);
2827  // Any other flat string must be sequential ASCII or external.
2829  Immediate(kStringRepresentationMask));
2830  __ j(not_zero, &external_string);
2831 
2832  __ bind(&seq_ascii_string);
2833  // rdi: subject string (sequential ASCII)
2834  // rax: RegExp data (FixedArray)
2836  __ Set(rcx, 1); // Type is ASCII.
2837  __ jmp(&check_code, Label::kNear);
2838 
2839  __ bind(&seq_two_byte_string);
2840  // rdi: subject string (flat two-byte)
2841  // rax: RegExp data (FixedArray)
2843  __ Set(rcx, 0); // Type is two byte.
2844 
2845  __ bind(&check_code);
2846  // Check that the irregexp code has been generated for the actual string
2847  // encoding. If it has, the field contains a code object otherwise it contains
2848  // smi (code flushing support)
2849  __ JumpIfSmi(r11, &runtime);
2850 
2851  // rdi: subject string
2852  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
2853  // r11: code
2854  // Load used arguments before starting to push arguments for call to native
2855  // RegExp code to avoid handling changing stack height.
2856  __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
2857 
2858  // rdi: subject string
2859  // rbx: previous index
2860  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2861  // r11: code
2862  // All checks done. Now push arguments for native regexp code.
2863  Counters* counters = masm->isolate()->counters();
2864  __ IncrementCounter(counters->regexp_entry_native(), 1);
2865 
2866  // Isolates: note we add an additional parameter here (isolate pointer).
2867  static const int kRegExpExecuteArguments = 9;
2868  int argument_slots_on_stack =
2869  masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
2870  __ EnterApiExitFrame(argument_slots_on_stack);
2871 
2872  // Argument 9: Pass current isolate address.
2873  // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2874  // Immediate(ExternalReference::isolate_address()));
2875  __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
2876  __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2878 
2879  // Argument 8: Indicate that this is a direct call from JavaScript.
2880  __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
2881  Immediate(1));
2882 
2883  // Argument 7: Start (high end) of backtracking stack memory area.
2884  __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
2885  __ movq(r9, Operand(kScratchRegister, 0));
2886  __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
2887  __ addq(r9, Operand(kScratchRegister, 0));
2888  __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
2889 
2890  // Argument 6: Set the number of capture registers to zero to force global
2891  // regexps to behave as non-global. This does not affect non-global regexps.
2892  // Argument 6 is passed in r9 on Linux and on the stack on Windows.
2893 #ifdef _WIN64
2894  __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
2895  Immediate(0));
2896 #else
2897  __ Set(r9, 0);
2898 #endif
2899 
2900  // Argument 5: static offsets vector buffer.
2901  __ LoadAddress(r8,
2902  ExternalReference::address_of_static_offsets_vector(isolate));
2903  // Argument 5 passed in r8 on Linux and on the stack on Windows.
2904 #ifdef _WIN64
2905  __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
2906 #endif
2907 
2908  // First four arguments are passed in registers on both Linux and Windows.
2909 #ifdef _WIN64
2910  Register arg4 = r9;
2911  Register arg3 = r8;
2912  Register arg2 = rdx;
2913  Register arg1 = rcx;
2914 #else
2915  Register arg4 = rcx;
2916  Register arg3 = rdx;
2917  Register arg2 = rsi;
2918  Register arg1 = rdi;
2919 #endif
2920 
2921  // Keep track on aliasing between argX defined above and the registers used.
2922  // rdi: subject string
2923  // rbx: previous index
2924  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2925  // r11: code
2926  // r14: slice offset
2927  // r15: original subject string
2928 
2929  // Argument 2: Previous index.
2930  __ movq(arg2, rbx);
2931 
2932  // Argument 4: End of string data
2933  // Argument 3: Start of string data
2934  Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
2935  // Prepare start and end index of the input.
2936  // Load the length from the original sliced string if that is the case.
2937  __ addq(rbx, r14);
2938  __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
2939  __ addq(r14, arg3); // Using arg3 as scratch.
2940 
2941  // rbx: start index of the input
2942  // r14: end index of the input
2943  // r15: original subject string
2944  __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
2945  __ j(zero, &setup_two_byte, Label::kNear);
2948  __ jmp(&setup_rest, Label::kNear);
2949  __ bind(&setup_two_byte);
2952  __ bind(&setup_rest);
2953 
2954  // Argument 1: Original subject string.
2955  // The original subject is in the previous stack frame. Therefore we have to
2956  // use rbp, which points exactly to one pointer size below the previous rsp.
2957  // (Because creating a new stack frame pushes the previous rbp onto the stack
2958  // and thereby moves up rsp by one kPointerSize.)
2959  __ movq(arg1, r15);
2960 
2961  // Locate the code entry and call it.
2962  __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
2963  __ call(r11);
2964 
2965  __ LeaveApiExitFrame();
2966 
2967  // Check the result.
2968  Label success;
2969  Label exception;
2970  __ cmpl(rax, Immediate(1));
2971  // We expect exactly one result since we force the called regexp to behave
2972  // as non-global.
2973  __ j(equal, &success, Label::kNear);
2974  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
2975  __ j(equal, &exception);
2976  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
2977  // If none of the above, it can only be retry.
2978  // Handle that in the runtime system.
2979  __ j(not_equal, &runtime);
2980 
2981  // For failure return null.
2982  __ LoadRoot(rax, Heap::kNullValueRootIndex);
2983  __ ret(4 * kPointerSize);
2984 
2985  // Load RegExp data.
2986  __ bind(&success);
2987  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2989  __ SmiToInteger32(rax,
2991  // Calculate number of capture registers (number_of_captures + 1) * 2.
2992  __ leal(rdx, Operand(rax, rax, times_1, 2));
2993 
2994  // rdx: Number of capture registers
2995  // Load last_match_info which is still known to be a fast case JSArray.
2996  __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
2998 
2999  // rbx: last_match_info backing store (FixedArray)
3000  // rdx: number of capture registers
3001  // Store the capture count.
3002  __ Integer32ToSmi(kScratchRegister, rdx);
3005  // Store last subject and last input.
3006  __ movq(rax, Operand(rsp, kSubjectOffset));
3008  __ RecordWriteField(rbx,
3010  rax,
3011  rdi,
3012  kDontSaveFPRegs);
3013  __ movq(rax, Operand(rsp, kSubjectOffset));
3015  __ RecordWriteField(rbx,
3017  rax,
3018  rdi,
3019  kDontSaveFPRegs);
3020 
3021  // Get the static offsets vector filled by the native regexp code.
3022  __ LoadAddress(rcx,
3023  ExternalReference::address_of_static_offsets_vector(isolate));
3024 
3025  // rbx: last_match_info backing store (FixedArray)
3026  // rcx: offsets vector
3027  // rdx: number of capture registers
3028  Label next_capture, done;
3029  // Capture register counter starts from number of capture registers and
3030  // counts down until wraping after zero.
3031  __ bind(&next_capture);
3032  __ subq(rdx, Immediate(1));
3033  __ j(negative, &done, Label::kNear);
3034  // Read the value from the static offsets vector buffer and make it a smi.
3035  __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
3036  __ Integer32ToSmi(rdi, rdi);
3037  // Store the smi value in the last match info.
3038  __ movq(FieldOperand(rbx,
3039  rdx,
3042  rdi);
3043  __ jmp(&next_capture);
3044  __ bind(&done);
3045 
3046  // Return last match info.
3047  __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
3048  __ ret(4 * kPointerSize);
3049 
3050  __ bind(&exception);
3051  // Result must now be exception. If there is no pending exception already a
3052  // stack overflow (on the backtrack stack) was detected in RegExp code but
3053  // haven't created the exception yet. Handle that in the runtime system.
3054  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3055  ExternalReference pending_exception_address(
3056  Isolate::kPendingExceptionAddress, isolate);
3057  Operand pending_exception_operand =
3058  masm->ExternalOperand(pending_exception_address, rbx);
3059  __ movq(rax, pending_exception_operand);
3060  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3061  __ cmpq(rax, rdx);
3062  __ j(equal, &runtime);
3063  __ movq(pending_exception_operand, rdx);
3064 
3065  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3066  Label termination_exception;
3067  __ j(equal, &termination_exception, Label::kNear);
3068  __ Throw(rax);
3069 
3070  __ bind(&termination_exception);
3071  __ ThrowUncatchable(rax);
3072 
3073  // External string. Short external strings have already been ruled out.
3074  // rdi: subject string (expected to be external)
3075  // rbx: scratch
3076  __ bind(&external_string);
3079  if (FLAG_debug_code) {
3080  // Assert that we do not have a cons or slice (indirect strings) here.
3081  // Sequential strings have already been ruled out.
3082  __ testb(rbx, Immediate(kIsIndirectStringMask));
3083  __ Assert(zero, "external string expected, but not found");
3084  }
3086  // Move the pointer so that offset-wise, it looks like a sequential string.
3088  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3090  __ testb(rbx, Immediate(kStringEncodingMask));
3091  __ j(not_zero, &seq_ascii_string);
3092  __ jmp(&seq_two_byte_string);
3093 
3094  // Do the runtime call to execute the regexp.
3095  __ bind(&runtime);
3096  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3097 #endif // V8_INTERPRETED_REGEXP
3098 }
3099 
3100 
3101 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3102  const int kMaxInlineLength = 100;
3103  Label slowcase;
3104  Label done;
3105  __ movq(r8, Operand(rsp, kPointerSize * 3));
3106  __ JumpIfNotSmi(r8, &slowcase);
3107  __ SmiToInteger32(rbx, r8);
3108  __ cmpl(rbx, Immediate(kMaxInlineLength));
3109  __ j(above, &slowcase);
3110  // Smi-tagging is equivalent to multiplying by 2.
3111  STATIC_ASSERT(kSmiTag == 0);
3112  STATIC_ASSERT(kSmiTagSize == 1);
3113  // Allocate RegExpResult followed by FixedArray with size in rbx.
3114  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
3115  // Elements: [Map][Length][..elements..]
3116  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3118  rbx, // In: Number of elements.
3119  rax, // Out: Start of allocation (tagged).
3120  rcx, // Out: End of allocation.
3121  rdx, // Scratch register
3122  &slowcase,
3123  TAG_OBJECT);
3124  // rax: Start of allocated area, object-tagged.
3125  // rbx: Number of array elements as int32.
3126  // r8: Number of array elements as smi.
3127 
3128  // Set JSArray map to global.regexp_result_map().
3133 
3134  // Set empty properties FixedArray.
3135  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
3137 
3138  // Set elements to point to FixedArray allocated right after the JSArray.
3139  __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
3141 
3142  // Set input, index and length fields from arguments.
3143  __ movq(r8, Operand(rsp, kPointerSize * 1));
3145  __ movq(r8, Operand(rsp, kPointerSize * 2));
3147  __ movq(r8, Operand(rsp, kPointerSize * 3));
3149 
3150  // Fill out the elements FixedArray.
3151  // rax: JSArray.
3152  // rcx: FixedArray.
3153  // rbx: Number of elements in array as int32.
3154 
3155  // Set map.
3156  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
3158  // Set length.
3159  __ Integer32ToSmi(rdx, rbx);
3161  // Fill contents of fixed-array with the-hole.
3162  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3164  // Fill fixed array elements with hole.
3165  // rax: JSArray.
3166  // rbx: Number of elements in array that remains to be filled, as int32.
3167  // rcx: Start of elements in FixedArray.
3168  // rdx: the hole.
3169  Label loop;
3170  __ testl(rbx, rbx);
3171  __ bind(&loop);
3172  __ j(less_equal, &done); // Jump if rcx is negative or zero.
3173  __ subl(rbx, Immediate(1));
3174  __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
3175  __ jmp(&loop);
3176 
3177  __ bind(&done);
3178  __ ret(3 * kPointerSize);
3179 
3180  __ bind(&slowcase);
3181  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3182 }
3183 
3184 
3185 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3186  Register object,
3187  Register result,
3188  Register scratch1,
3189  Register scratch2,
3190  bool object_is_smi,
3191  Label* not_found) {
3192  // Use of registers. Register result is used as a temporary.
3193  Register number_string_cache = result;
3194  Register mask = scratch1;
3195  Register scratch = scratch2;
3196 
3197  // Load the number string cache.
3198  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3199 
3200  // Make the hash mask from the length of the number string cache. It
3201  // contains two elements (number and string) for each cache entry.
3202  __ SmiToInteger32(
3203  mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3204  __ shrl(mask, Immediate(1));
3205  __ subq(mask, Immediate(1)); // Make mask.
3206 
3207  // Calculate the entry in the number string cache. The hash value in the
3208  // number string cache for smis is just the smi value, and the hash for
3209  // doubles is the xor of the upper and lower words. See
3210  // Heap::GetNumberStringCache.
3211  Label is_smi;
3212  Label load_result_from_cache;
3213  Factory* factory = masm->isolate()->factory();
3214  if (!object_is_smi) {
3215  __ JumpIfSmi(object, &is_smi);
3216  __ CheckMap(object,
3217  factory->heap_number_map(),
3218  not_found,
3220 
3221  STATIC_ASSERT(8 == kDoubleSize);
3222  __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3223  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3224  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3225 
3226  Register index = scratch;
3227  Register probe = mask;
3228  __ movq(probe,
3229  FieldOperand(number_string_cache,
3230  index,
3231  times_1,
3233  __ JumpIfSmi(probe, not_found);
3234  __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3235  __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3236  __ ucomisd(xmm0, xmm1);
3237  __ j(parity_even, not_found); // Bail out if NaN is involved.
3238  __ j(not_equal, not_found); // The cache did not contain this value.
3239  __ jmp(&load_result_from_cache);
3240  }
3241 
3242  __ bind(&is_smi);
3243  __ SmiToInteger32(scratch, object);
3244  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3245 
3246  Register index = scratch;
3247  // Check if the entry is the smi we are looking for.
3248  __ cmpq(object,
3249  FieldOperand(number_string_cache,
3250  index,
3251  times_1,
3253  __ j(not_equal, not_found);
3254 
3255  // Get the result from the cache.
3256  __ bind(&load_result_from_cache);
3257  __ movq(result,
3258  FieldOperand(number_string_cache,
3259  index,
3260  times_1,
3261  FixedArray::kHeaderSize + kPointerSize));
3262  Counters* counters = masm->isolate()->counters();
3263  __ IncrementCounter(counters->number_to_string_native(), 1);
3264 }
3265 
3266 
3267 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
3268  Register hash,
3269  Register mask) {
3270  __ and_(hash, mask);
3271  // Each entry in string cache consists of two pointer sized fields,
3272  // but times_twice_pointer_size (multiplication by 16) scale factor
3273  // is not supported by addrmode on x64 platform.
3274  // So we have to premultiply entry index before lookup.
3275  __ shl(hash, Immediate(kPointerSizeLog2 + 1));
3276 }
3277 
3278 
3279 void NumberToStringStub::Generate(MacroAssembler* masm) {
3280  Label runtime;
3281 
3282  __ movq(rbx, Operand(rsp, kPointerSize));
3283 
3284  // Generate code to lookup number in the number string cache.
3285  GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
3286  __ ret(1 * kPointerSize);
3287 
3288  __ bind(&runtime);
3289  // Handle number to string in the runtime system if not found in the cache.
3290  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3291 }
3292 
3293 
3294 static int NegativeComparisonResult(Condition cc) {
3295  ASSERT(cc != equal);
3296  ASSERT((cc == less) || (cc == less_equal)
3297  || (cc == greater) || (cc == greater_equal));
3298  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3299 }
3300 
3301 
3302 void CompareStub::Generate(MacroAssembler* masm) {
3303  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3304 
3305  Label check_unequal_objects, done;
3306  Factory* factory = masm->isolate()->factory();
3307 
3308  // Compare two smis if required.
3309  if (include_smi_compare_) {
3310  Label non_smi, smi_done;
3311  __ JumpIfNotBothSmi(rax, rdx, &non_smi);
3312  __ subq(rdx, rax);
3313  __ j(no_overflow, &smi_done);
3314  __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
3315  __ bind(&smi_done);
3316  __ movq(rax, rdx);
3317  __ ret(0);
3318  __ bind(&non_smi);
3319  } else if (FLAG_debug_code) {
3320  Label ok;
3321  __ JumpIfNotSmi(rdx, &ok);
3322  __ JumpIfNotSmi(rax, &ok);
3323  __ Abort("CompareStub: smi operands");
3324  __ bind(&ok);
3325  }
3326 
3327  // The compare stub returns a positive, negative, or zero 64-bit integer
3328  // value in rax, corresponding to result of comparing the two inputs.
3329  // NOTICE! This code is only reached after a smi-fast-case check, so
3330  // it is certain that at least one operand isn't a smi.
3331 
3332  // Two identical objects are equal unless they are both NaN or undefined.
3333  {
3334  Label not_identical;
3335  __ cmpq(rax, rdx);
3336  __ j(not_equal, &not_identical, Label::kNear);
3337 
3338  if (cc_ != equal) {
3339  // Check for undefined. undefined OP undefined is false even though
3340  // undefined == undefined.
3341  Label check_for_nan;
3342  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
3343  __ j(not_equal, &check_for_nan, Label::kNear);
3344  __ Set(rax, NegativeComparisonResult(cc_));
3345  __ ret(0);
3346  __ bind(&check_for_nan);
3347  }
3348 
3349  // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
3350  // so we do the second best thing - test it ourselves.
3351  // Note: if cc_ != equal, never_nan_nan_ is not used.
3352  // We cannot set rax to EQUAL until just before return because
3353  // rax must be unchanged on jump to not_identical.
3354  if (never_nan_nan_ && (cc_ == equal)) {
3355  __ Set(rax, EQUAL);
3356  __ ret(0);
3357  } else {
3358  Label heap_number;
3359  // If it's not a heap number, then return equal for (in)equality operator.
3361  factory->heap_number_map());
3362  __ j(equal, &heap_number, Label::kNear);
3363  if (cc_ != equal) {
3364  // Call runtime on identical objects. Otherwise return equal.
3365  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3366  __ j(above_equal, &not_identical, Label::kNear);
3367  }
3368  __ Set(rax, EQUAL);
3369  __ ret(0);
3370 
3371  __ bind(&heap_number);
3372  // It is a heap number, so return equal if it's not NaN.
3373  // For NaN, return 1 for every condition except greater and
3374  // greater-equal. Return -1 for them, so the comparison yields
3375  // false for all conditions except not-equal.
3376  __ Set(rax, EQUAL);
3378  __ ucomisd(xmm0, xmm0);
3379  __ setcc(parity_even, rax);
3380  // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
3381  if (cc_ == greater_equal || cc_ == greater) {
3382  __ neg(rax);
3383  }
3384  __ ret(0);
3385  }
3386 
3387  __ bind(&not_identical);
3388  }
3389 
3390  if (cc_ == equal) { // Both strict and non-strict.
3391  Label slow; // Fallthrough label.
3392 
3393  // If we're doing a strict equality comparison, we don't have to do
3394  // type conversion, so we generate code to do fast comparison for objects
3395  // and oddballs. Non-smi numbers and strings still go through the usual
3396  // slow-case code.
3397  if (strict_) {
3398  // If either is a Smi (we know that not both are), then they can only
3399  // be equal if the other is a HeapNumber. If so, use the slow case.
3400  {
3401  Label not_smis;
3402  __ SelectNonSmi(rbx, rax, rdx, &not_smis);
3403 
3404  // Check if the non-smi operand is a heap number.
3406  factory->heap_number_map());
3407  // If heap number, handle it in the slow case.
3408  __ j(equal, &slow);
3409  // Return non-equal. ebx (the lower half of rbx) is not zero.
3410  __ movq(rax, rbx);
3411  __ ret(0);
3412 
3413  __ bind(&not_smis);
3414  }
3415 
3416  // If either operand is a JSObject or an oddball value, then they are not
3417  // equal since their pointers are different
3418  // There is no test for undetectability in strict equality.
3419 
3420  // If the first object is a JS object, we have done pointer comparison.
3422  Label first_non_object;
3423  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3424  __ j(below, &first_non_object, Label::kNear);
3425  // Return non-zero (eax (not rax) is not zero)
3426  Label return_not_equal;
3428  __ bind(&return_not_equal);
3429  __ ret(0);
3430 
3431  __ bind(&first_non_object);
3432  // Check for oddballs: true, false, null, undefined.
3433  __ CmpInstanceType(rcx, ODDBALL_TYPE);
3434  __ j(equal, &return_not_equal);
3435 
3436  __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3437  __ j(above_equal, &return_not_equal);
3438 
3439  // Check for oddballs: true, false, null, undefined.
3440  __ CmpInstanceType(rcx, ODDBALL_TYPE);
3441  __ j(equal, &return_not_equal);
3442 
3443  // Fall through to the general case.
3444  }
3445  __ bind(&slow);
3446  }
3447 
3448  // Generate the number comparison code.
3449  if (include_number_compare_) {
3450  Label non_number_comparison;
3451  Label unordered;
3452  FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
3453  __ xorl(rax, rax);
3454  __ xorl(rcx, rcx);
3455  __ ucomisd(xmm0, xmm1);
3456 
3457  // Don't base result on EFLAGS when a NaN is involved.
3458  __ j(parity_even, &unordered, Label::kNear);
3459  // Return a result of -1, 0, or 1, based on EFLAGS.
3460  __ setcc(above, rax);
3461  __ setcc(below, rcx);
3462  __ subq(rax, rcx);
3463  __ ret(0);
3464 
3465  // If one of the numbers was NaN, then the result is always false.
3466  // The cc is never not-equal.
3467  __ bind(&unordered);
3468  ASSERT(cc_ != not_equal);
3469  if (cc_ == less || cc_ == less_equal) {
3470  __ Set(rax, 1);
3471  } else {
3472  __ Set(rax, -1);
3473  }
3474  __ ret(0);
3475 
3476  // The number comparison code did not provide a valid result.
3477  __ bind(&non_number_comparison);
3478  }
3479 
3480  // Fast negative check for symbol-to-symbol equality.
3481  Label check_for_strings;
3482  if (cc_ == equal) {
3483  BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
3484  BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
3485 
3486  // We've already checked for object identity, so if both operands
3487  // are symbols they aren't equal. Register eax (not rax) already holds a
3488  // non-zero value, which indicates not equal, so just return.
3489  __ ret(0);
3490  }
3491 
3492  __ bind(&check_for_strings);
3493 
3494  __ JumpIfNotBothSequentialAsciiStrings(
3495  rdx, rax, rcx, rbx, &check_unequal_objects);
3496 
3497  // Inline comparison of ASCII strings.
3498  if (cc_ == equal) {
3500  rdx,
3501  rax,
3502  rcx,
3503  rbx);
3504  } else {
3506  rdx,
3507  rax,
3508  rcx,
3509  rbx,
3510  rdi,
3511  r8);
3512  }
3513 
3514 #ifdef DEBUG
3515  __ Abort("Unexpected fall-through from string comparison");
3516 #endif
3517 
3518  __ bind(&check_unequal_objects);
3519  if (cc_ == equal && !strict_) {
3520  // Not strict equality. Objects are unequal if
3521  // they are both JSObjects and not undetectable,
3522  // and their pointers are different.
3523  Label not_both_objects, return_unequal;
3524  // At most one is a smi, so we can test for smi by adding the two.
3525  // A smi plus a heap object has the low bit set, a heap object plus
3526  // a heap object has the low bit clear.
3527  STATIC_ASSERT(kSmiTag == 0);
3528  STATIC_ASSERT(kSmiTagMask == 1);
3529  __ lea(rcx, Operand(rax, rdx, times_1, 0));
3530  __ testb(rcx, Immediate(kSmiTagMask));
3531  __ j(not_zero, &not_both_objects, Label::kNear);
3532  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
3533  __ j(below, &not_both_objects, Label::kNear);
3534  __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3535  __ j(below, &not_both_objects, Label::kNear);
3537  Immediate(1 << Map::kIsUndetectable));
3538  __ j(zero, &return_unequal, Label::kNear);
3540  Immediate(1 << Map::kIsUndetectable));
3541  __ j(zero, &return_unequal, Label::kNear);
3542  // The objects are both undetectable, so they both compare as the value
3543  // undefined, and are equal.
3544  __ Set(rax, EQUAL);
3545  __ bind(&return_unequal);
3546  // Return non-equal by returning the non-zero object pointer in rax,
3547  // or return equal if we fell through to here.
3548  __ ret(0);
3549  __ bind(&not_both_objects);
3550  }
3551 
3552  // Push arguments below the return address to prepare jump to builtin.
3553  __ pop(rcx);
3554  __ push(rdx);
3555  __ push(rax);
3556 
3557  // Figure out which native to call and setup the arguments.
3558  Builtins::JavaScript builtin;
3559  if (cc_ == equal) {
3560  builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
3561  } else {
3562  builtin = Builtins::COMPARE;
3563  __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
3564  }
3565 
3566  // Restore return address on the stack.
3567  __ push(rcx);
3568 
3569  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
3570  // tagged as a small integer.
3571  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
3572 }
3573 
3574 
3575 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
3576  Label* label,
3577  Register object,
3578  Register scratch) {
3579  __ JumpIfSmi(object, label);
3580  __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
3581  __ movzxbq(scratch,
3583  // Ensure that no non-strings have the symbol bit set.
3585  STATIC_ASSERT(kSymbolTag != 0);
3586  __ testb(scratch, Immediate(kIsSymbolMask));
3587  __ j(zero, label);
3588 }
3589 
3590 
3591 void StackCheckStub::Generate(MacroAssembler* masm) {
3592  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3593 }
3594 
3595 
3596 void InterruptStub::Generate(MacroAssembler* masm) {
3597  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3598 }
3599 
3600 
3601 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3602  // Cache the called function in a global property cell. Cache states
3603  // are uninitialized, monomorphic (indicated by a JSFunction), and
3604  // megamorphic.
3605  // rbx : cache cell for call target
3606  // rdi : the function to call
3607  Isolate* isolate = masm->isolate();
3608  Label initialize, done;
3609 
3610  // Load the cache state into rcx.
3612 
3613  // A monomorphic cache hit or an already megamorphic state: invoke the
3614  // function without changing the state.
3615  __ cmpq(rcx, rdi);
3616  __ j(equal, &done, Label::kNear);
3618  __ j(equal, &done, Label::kNear);
3619 
3620  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3621  // megamorphic.
3623  __ j(equal, &initialize, Label::kNear);
3624  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3625  // write-barrier is needed.
3628  __ jmp(&done, Label::kNear);
3629 
3630  // An uninitialized cache is patched with the function.
3631  __ bind(&initialize);
3633  // No need for a write barrier here - cells are rescanned.
3634 
3635  __ bind(&done);
3636 }
3637 
3638 
3639 void CallFunctionStub::Generate(MacroAssembler* masm) {
3640  // rbx : cache cell for call target
3641  // rdi : the function to call
3642  Isolate* isolate = masm->isolate();
3643  Label slow, non_function;
3644 
3645  // The receiver might implicitly be the global object. This is
3646  // indicated by passing the hole as the receiver to the call
3647  // function stub.
3648  if (ReceiverMightBeImplicit()) {
3649  Label call;
3650  // Get the receiver from the stack.
3651  // +1 ~ return address
3652  __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
3653  // Call as function is indicated with the hole.
3654  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3655  __ j(not_equal, &call, Label::kNear);
3656  // Patch the receiver on the stack with the global receiver object.
3657  __ movq(rcx, GlobalObjectOperand());
3659  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
3660  __ bind(&call);
3661  }
3662 
3663  // Check that the function really is a JavaScript function.
3664  __ JumpIfSmi(rdi, &non_function);
3665  // Goto slow case if we do not have a function.
3666  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3667  __ j(not_equal, &slow);
3668 
3669  if (RecordCallTarget()) {
3670  GenerateRecordCallTarget(masm);
3671  }
3672 
3673  // Fast-case: Just invoke the function.
3674  ParameterCount actual(argc_);
3675 
3676  if (ReceiverMightBeImplicit()) {
3677  Label call_as_function;
3678  __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3679  __ j(equal, &call_as_function);
3680  __ InvokeFunction(rdi,
3681  actual,
3682  JUMP_FUNCTION,
3683  NullCallWrapper(),
3684  CALL_AS_METHOD);
3685  __ bind(&call_as_function);
3686  }
3687  __ InvokeFunction(rdi,
3688  actual,
3689  JUMP_FUNCTION,
3690  NullCallWrapper(),
3692 
3693  // Slow-case: Non-function called.
3694  __ bind(&slow);
3695  if (RecordCallTarget()) {
3696  // If there is a call target cache, mark it megamorphic in the
3697  // non-function case. MegamorphicSentinel is an immortal immovable
3698  // object (undefined) so no write barrier is needed.
3701  }
3702  // Check for function proxy.
3703  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3704  __ j(not_equal, &non_function);
3705  __ pop(rcx);
3706  __ push(rdi); // put proxy as additional argument under return address
3707  __ push(rcx);
3708  __ Set(rax, argc_ + 1);
3709  __ Set(rbx, 0);
3710  __ SetCallKind(rcx, CALL_AS_METHOD);
3711  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
3712  {
3713  Handle<Code> adaptor =
3714  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3715  __ jmp(adaptor, RelocInfo::CODE_TARGET);
3716  }
3717 
3718  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3719  // of the original receiver from the call site).
3720  __ bind(&non_function);
3721  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
3722  __ Set(rax, argc_);
3723  __ Set(rbx, 0);
3724  __ SetCallKind(rcx, CALL_AS_METHOD);
3725  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
3726  Handle<Code> adaptor =
3727  Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
3728  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3729 }
3730 
3731 
3732 void CallConstructStub::Generate(MacroAssembler* masm) {
3733  // rax : number of arguments
3734  // rbx : cache cell for call target
3735  // rdi : constructor function
3736  Label slow, non_function_call;
3737 
3738  // Check that function is not a smi.
3739  __ JumpIfSmi(rdi, &non_function_call);
3740  // Check that function is a JSFunction.
3741  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3742  __ j(not_equal, &slow);
3743 
3744  if (RecordCallTarget()) {
3745  GenerateRecordCallTarget(masm);
3746  }
3747 
3748  // Jump to the function-specific construct stub.
3752  __ jmp(rbx);
3753 
3754  // rdi: called object
3755  // rax: number of arguments
3756  // rcx: object map
3757  Label do_call;
3758  __ bind(&slow);
3759  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3760  __ j(not_equal, &non_function_call);
3761  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3762  __ jmp(&do_call);
3763 
3764  __ bind(&non_function_call);
3765  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3766  __ bind(&do_call);
3767  // Set expected number of arguments to zero (not changing rax).
3768  __ Set(rbx, 0);
3769  __ SetCallKind(rcx, CALL_AS_METHOD);
3770  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3771  RelocInfo::CODE_TARGET);
3772 }
3773 
3774 
3775 bool CEntryStub::NeedsImmovableCode() {
3776  return false;
3777 }
3778 
3779 
3781 #ifdef _WIN64
3782  return result_size_ == 1;
3783 #else
3784  return true;
3785 #endif
3786 }
3787 
3788 
3789 void CodeStub::GenerateStubsAheadOfTime() {
3792  // It is important that the store buffer overflow stubs are generated first.
3794 }
3795 
3796 
3797 void CodeStub::GenerateFPStubs() {
3798 }
3799 
3800 
3802  CEntryStub stub(1, kDontSaveFPRegs);
3803  stub.GetCode()->set_is_pregenerated(true);
3804  CEntryStub save_doubles(1, kSaveFPRegs);
3805  save_doubles.GetCode()->set_is_pregenerated(true);
3806 }
3807 
3808 
3809 void CEntryStub::GenerateCore(MacroAssembler* masm,
3810  Label* throw_normal_exception,
3811  Label* throw_termination_exception,
3812  Label* throw_out_of_memory_exception,
3813  bool do_gc,
3814  bool always_allocate_scope) {
3815  // rax: result parameter for PerformGC, if any.
3816  // rbx: pointer to C function (C callee-saved).
3817  // rbp: frame pointer (restored after C call).
3818  // rsp: stack pointer (restored after C call).
3819  // r14: number of arguments including receiver (C callee-saved).
3820  // r15: pointer to the first argument (C callee-saved).
3821  // This pointer is reused in LeaveExitFrame(), so it is stored in a
3822  // callee-saved register.
3823 
3824  // Simple results returned in rax (both AMD64 and Win64 calling conventions).
3825  // Complex results must be written to address passed as first argument.
3826  // AMD64 calling convention: a struct of two pointers in rax+rdx
3827 
3828  // Check stack alignment.
3829  if (FLAG_debug_code) {
3830  __ CheckStackAlignment();
3831  }
3832 
3833  if (do_gc) {
3834  // Pass failure code returned from last attempt as first argument to
3835  // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3836  // stack is known to be aligned. This function takes one argument which is
3837  // passed in register.
3838 #ifdef _WIN64
3839  __ movq(rcx, rax);
3840 #else // _WIN64
3841  __ movq(rdi, rax);
3842 #endif
3843  __ movq(kScratchRegister,
3846  __ call(kScratchRegister);
3847  }
3848 
3849  ExternalReference scope_depth =
3850  ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3851  if (always_allocate_scope) {
3852  Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3853  __ incl(scope_depth_operand);
3854  }
3855 
3856  // Call C function.
3857 #ifdef _WIN64
3858  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
3859  // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
3860  __ movq(StackSpaceOperand(0), r14); // argc.
3861  __ movq(StackSpaceOperand(1), r15); // argv.
3862  if (result_size_ < 2) {
3863  // Pass a pointer to the Arguments object as the first argument.
3864  // Return result in single register (rax).
3865  __ lea(rcx, StackSpaceOperand(0));
3866  __ LoadAddress(rdx, ExternalReference::isolate_address());
3867  } else {
3868  ASSERT_EQ(2, result_size_);
3869  // Pass a pointer to the result location as the first argument.
3870  __ lea(rcx, StackSpaceOperand(2));
3871  // Pass a pointer to the Arguments object as the second argument.
3872  __ lea(rdx, StackSpaceOperand(0));
3873  __ LoadAddress(r8, ExternalReference::isolate_address());
3874  }
3875 
3876 #else // _WIN64
3877  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
3878  __ movq(rdi, r14); // argc.
3879  __ movq(rsi, r15); // argv.
3880  __ movq(rdx, ExternalReference::isolate_address());
3881 #endif
3882  __ call(rbx);
3883  // Result is in rax - do not destroy this register!
3884 
3885  if (always_allocate_scope) {
3886  Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3887  __ decl(scope_depth_operand);
3888  }
3889 
3890  // Check for failure result.
3891  Label failure_returned;
3892  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3893 #ifdef _WIN64
3894  // If return value is on the stack, pop it to registers.
3895  if (result_size_ > 1) {
3896  ASSERT_EQ(2, result_size_);
3897  // Read result values stored on stack. Result is stored
3898  // above the four argument mirror slots and the two
3899  // Arguments object slots.
3900  __ movq(rax, Operand(rsp, 6 * kPointerSize));
3901  __ movq(rdx, Operand(rsp, 7 * kPointerSize));
3902  }
3903 #endif
3904  __ lea(rcx, Operand(rax, 1));
3905  // Lower 2 bits of rcx are 0 iff rax has failure tag.
3906  __ testl(rcx, Immediate(kFailureTagMask));
3907  __ j(zero, &failure_returned);
3908 
3909  // Exit the JavaScript to C++ exit frame.
3910  __ LeaveExitFrame(save_doubles_);
3911  __ ret(0);
3912 
3913  // Handling of failure.
3914  __ bind(&failure_returned);
3915 
3916  Label retry;
3917  // If the returned exception is RETRY_AFTER_GC continue at retry label
3919  __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3920  __ j(zero, &retry, Label::kNear);
3921 
3922  // Special handling of out of memory exceptions.
3924  __ cmpq(rax, kScratchRegister);
3925  __ j(equal, throw_out_of_memory_exception);
3926 
3927  // Retrieve the pending exception and clear the variable.
3928  ExternalReference pending_exception_address(
3929  Isolate::kPendingExceptionAddress, masm->isolate());
3930  Operand pending_exception_operand =
3931  masm->ExternalOperand(pending_exception_address);
3932  __ movq(rax, pending_exception_operand);
3933  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3934  __ movq(pending_exception_operand, rdx);
3935 
3936  // Special handling of termination exceptions which are uncatchable
3937  // by javascript code.
3938  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3939  __ j(equal, throw_termination_exception);
3940 
3941  // Handle normal exception.
3942  __ jmp(throw_normal_exception);
3943 
3944  // Retry.
3945  __ bind(&retry);
3946 }
3947 
3948 
3949 void CEntryStub::Generate(MacroAssembler* masm) {
3950  // rax: number of arguments including receiver
3951  // rbx: pointer to C function (C callee-saved)
3952  // rbp: frame pointer of calling JS frame (restored after C call)
3953  // rsp: stack pointer (restored after C call)
3954  // rsi: current context (restored)
3955 
3956  // NOTE: Invocations of builtins may return failure objects
3957  // instead of a proper result. The builtin entry handles
3958  // this by performing a garbage collection and retrying the
3959  // builtin once.
3960 
3961  // Enter the exit frame that transitions from JavaScript to C++.
3962 #ifdef _WIN64
3963  int arg_stack_space = (result_size_ < 2 ? 2 : 4);
3964 #else
3965  int arg_stack_space = 0;
3966 #endif
3967  __ EnterExitFrame(arg_stack_space, save_doubles_);
3968 
3969  // rax: Holds the context at this point, but should not be used.
3970  // On entry to code generated by GenerateCore, it must hold
3971  // a failure result if the collect_garbage argument to GenerateCore
3972  // is true. This failure result can be the result of code
3973  // generated by a previous call to GenerateCore. The value
3974  // of rax is then passed to Runtime::PerformGC.
3975  // rbx: pointer to builtin function (C callee-saved).
3976  // rbp: frame pointer of exit frame (restored after C call).
3977  // rsp: stack pointer (restored after C call).
3978  // r14: number of arguments including receiver (C callee-saved).
3979  // r15: argv pointer (C callee-saved).
3980 
3981  Label throw_normal_exception;
3982  Label throw_termination_exception;
3983  Label throw_out_of_memory_exception;
3984 
3985  // Call into the runtime system.
3986  GenerateCore(masm,
3987  &throw_normal_exception,
3988  &throw_termination_exception,
3989  &throw_out_of_memory_exception,
3990  false,
3991  false);
3992 
3993  // Do space-specific GC and retry runtime call.
3994  GenerateCore(masm,
3995  &throw_normal_exception,
3996  &throw_termination_exception,
3997  &throw_out_of_memory_exception,
3998  true,
3999  false);
4000 
4001  // Do full GC and retry runtime call one final time.
4002  Failure* failure = Failure::InternalError();
4003  __ movq(rax, failure, RelocInfo::NONE);
4004  GenerateCore(masm,
4005  &throw_normal_exception,
4006  &throw_termination_exception,
4007  &throw_out_of_memory_exception,
4008  true,
4009  true);
4010 
4011  __ bind(&throw_out_of_memory_exception);
4012  // Set external caught exception to false.
4013  Isolate* isolate = masm->isolate();
4014  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4015  isolate);
4016  __ Set(rax, static_cast<int64_t>(false));
4017  __ Store(external_caught, rax);
4018 
4019  // Set pending exception and rax to out of memory exception.
4020  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4021  isolate);
4023  __ Store(pending_exception, rax);
4024  // Fall through to the next label.
4025 
4026  __ bind(&throw_termination_exception);
4027  __ ThrowUncatchable(rax);
4028 
4029  __ bind(&throw_normal_exception);
4030  __ Throw(rax);
4031 }
4032 
4033 
4034 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4035  Label invoke, handler_entry, exit;
4036  Label not_outermost_js, not_outermost_js_2;
4037  { // NOLINT. Scope block confuses linter.
4038  MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
4039  // Set up frame.
4040  __ push(rbp);
4041  __ movq(rbp, rsp);
4042 
4043  // Push the stack frame type marker twice.
4044  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4045  // Scratch register is neither callee-save, nor an argument register on any
4046  // platform. It's free to use at this point.
4047  // Cannot use smi-register for loading yet.
4048  __ movq(kScratchRegister,
4049  reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
4050  RelocInfo::NONE);
4051  __ push(kScratchRegister); // context slot
4052  __ push(kScratchRegister); // function slot
4053  // Save callee-saved registers (X64/Win64 calling conventions).
4054  __ push(r12);
4055  __ push(r13);
4056  __ push(r14);
4057  __ push(r15);
4058 #ifdef _WIN64
4059  __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
4060  __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
4061 #endif
4062  __ push(rbx);
4063  // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
4064  // callee save as well.
4065 
4066  // Set up the roots and smi constant registers.
4067  // Needs to be done before any further smi loads.
4068  __ InitializeSmiConstantRegister();
4069  __ InitializeRootRegister();
4070  }
4071 
4072  Isolate* isolate = masm->isolate();
4073 
4074  // Save copies of the top frame descriptor on the stack.
4075  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
4076  {
4077  Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4078  __ push(c_entry_fp_operand);
4079  }
4080 
4081  // If this is the outermost JS call, set js_entry_sp value.
4082  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4083  __ Load(rax, js_entry_sp);
4084  __ testq(rax, rax);
4085  __ j(not_zero, &not_outermost_js);
4086  __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4087  __ movq(rax, rbp);
4088  __ Store(js_entry_sp, rax);
4089  Label cont;
4090  __ jmp(&cont);
4091  __ bind(&not_outermost_js);
4092  __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
4093  __ bind(&cont);
4094 
4095  // Jump to a faked try block that does the invoke, with a faked catch
4096  // block that sets the pending exception.
4097  __ jmp(&invoke);
4098  __ bind(&handler_entry);
4099  handler_offset_ = handler_entry.pos();
4100  // Caught exception: Store result (exception) in the pending exception
4101  // field in the JSEnv and return a failure sentinel.
4102  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4103  isolate);
4104  __ Store(pending_exception, rax);
4106  __ jmp(&exit);
4107 
4108  // Invoke: Link this frame into the handler chain. There's only one
4109  // handler block in this code object, so its index is 0.
4110  __ bind(&invoke);
4111  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4112 
4113  // Clear any pending exceptions.
4114  __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
4115  __ Store(pending_exception, rax);
4116 
4117  // Fake a receiver (NULL).
4118  __ push(Immediate(0)); // receiver
4119 
4120  // Invoke the function by calling through JS entry trampoline builtin and
4121  // pop the faked function when we return. We load the address from an
4122  // external reference instead of inlining the call target address directly
4123  // in the code, because the builtin stubs may not have been generated yet
4124  // at the time this code is generated.
4125  if (is_construct) {
4126  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4127  isolate);
4128  __ Load(rax, construct_entry);
4129  } else {
4130  ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4131  __ Load(rax, entry);
4132  }
4134  __ call(kScratchRegister);
4135 
4136  // Unlink this frame from the handler chain.
4137  __ PopTryHandler();
4138 
4139  __ bind(&exit);
4140  // Check if the current stack frame is marked as the outermost JS frame.
4141  __ pop(rbx);
4142  __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4143  __ j(not_equal, &not_outermost_js_2);
4144  __ movq(kScratchRegister, js_entry_sp);
4145  __ movq(Operand(kScratchRegister, 0), Immediate(0));
4146  __ bind(&not_outermost_js_2);
4147 
4148  // Restore the top frame descriptor from the stack.
4149  { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4150  __ pop(c_entry_fp_operand);
4151  }
4152 
4153  // Restore callee-saved registers (X64 conventions).
4154  __ pop(rbx);
4155 #ifdef _WIN64
4156  // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
4157  __ pop(rsi);
4158  __ pop(rdi);
4159 #endif
4160  __ pop(r15);
4161  __ pop(r14);
4162  __ pop(r13);
4163  __ pop(r12);
4164  __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
4165 
4166  // Restore frame pointer and return.
4167  __ pop(rbp);
4168  __ ret(0);
4169 }
4170 
4171 
4172 void InstanceofStub::Generate(MacroAssembler* masm) {
4173  // Implements "value instanceof function" operator.
4174  // Expected input state with no inline cache:
4175  // rsp[0] : return address
4176  // rsp[1] : function pointer
4177  // rsp[2] : value
4178  // Expected input state with an inline one-element cache:
4179  // rsp[0] : return address
4180  // rsp[1] : offset from return address to location of inline cache
4181  // rsp[2] : function pointer
4182  // rsp[3] : value
4183  // Returns a bitwise zero to indicate that the value
4184  // is and instance of the function and anything else to
4185  // indicate that the value is not an instance.
4186 
4187  static const int kOffsetToMapCheckValue = 2;
4188  static const int kOffsetToResultValue = 18;
4189  // The last 4 bytes of the instruction sequence
4190  // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
4191  // Move(kScratchRegister, FACTORY->the_hole_value())
4192  // in front of the hole value address.
4193  static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
4194  // The last 4 bytes of the instruction sequence
4195  // __ j(not_equal, &cache_miss);
4196  // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
4197  // before the offset of the hole value in the root array.
4198  static const unsigned int kWordBeforeResultValue = 0x458B4909;
4199  // Only the inline check flag is supported on X64.
4200  ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
4201  int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
4202 
4203  // Get the object - go slow case if it's a smi.
4204  Label slow;
4205 
4206  __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
4207  __ JumpIfSmi(rax, &slow);
4208 
4209  // Check that the left hand is a JS object. Leave its map in rax.
4210  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
4211  __ j(below, &slow);
4212  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
4213  __ j(above, &slow);
4214 
4215  // Get the prototype of the function.
4216  __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
4217  // rdx is function, rax is map.
4218 
4219  // If there is a call site cache don't look in the global cache, but do the
4220  // real lookup and update the call site cache.
4221  if (!HasCallSiteInlineCheck()) {
4222  // Look up the function and the map in the instanceof cache.
4223  Label miss;
4224  __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4225  __ j(not_equal, &miss, Label::kNear);
4226  __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4227  __ j(not_equal, &miss, Label::kNear);
4228  __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4229  __ ret(2 * kPointerSize);
4230  __ bind(&miss);
4231  }
4232 
4233  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
4234 
4235  // Check that the function prototype is a JS object.
4236  __ JumpIfSmi(rbx, &slow);
4237  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
4238  __ j(below, &slow);
4239  __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
4240  __ j(above, &slow);
4241 
4242  // Register mapping:
4243  // rax is object map.
4244  // rdx is function.
4245  // rbx is function prototype.
4246  if (!HasCallSiteInlineCheck()) {
4247  __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4248  __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4249  } else {
4250  // Get return address and delta to inlined map check.
4251  __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4252  __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4253  if (FLAG_debug_code) {
4254  __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
4255  __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
4256  __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
4257  }
4258  __ movq(kScratchRegister,
4259  Operand(kScratchRegister, kOffsetToMapCheckValue));
4260  __ movq(Operand(kScratchRegister, 0), rax);
4261  }
4262 
4264 
4265  // Loop through the prototype chain looking for the function prototype.
4266  Label loop, is_instance, is_not_instance;
4267  __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
4268  __ bind(&loop);
4269  __ cmpq(rcx, rbx);
4270  __ j(equal, &is_instance, Label::kNear);
4271  __ cmpq(rcx, kScratchRegister);
4272  // The code at is_not_instance assumes that kScratchRegister contains a
4273  // non-zero GCable value (the null object in this case).
4274  __ j(equal, &is_not_instance, Label::kNear);
4277  __ jmp(&loop);
4278 
4279  __ bind(&is_instance);
4280  if (!HasCallSiteInlineCheck()) {
4281  __ xorl(rax, rax);
4282  // Store bitwise zero in the cache. This is a Smi in GC terms.
4283  STATIC_ASSERT(kSmiTag == 0);
4284  __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4285  } else {
4286  // Store offset of true in the root array at the inline check site.
4287  int true_offset = 0x100 +
4288  (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4289  // Assert it is a 1-byte signed value.
4290  ASSERT(true_offset >= 0 && true_offset < 0x100);
4291  __ movl(rax, Immediate(true_offset));
4292  __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4293  __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4294  __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4295  if (FLAG_debug_code) {
4296  __ movl(rax, Immediate(kWordBeforeResultValue));
4297  __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4298  __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
4299  }
4300  __ Set(rax, 0);
4301  }
4302  __ ret(2 * kPointerSize + extra_stack_space);
4303 
4304  __ bind(&is_not_instance);
4305  if (!HasCallSiteInlineCheck()) {
4306  // We have to store a non-zero value in the cache.
4307  __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
4308  } else {
4309  // Store offset of false in the root array at the inline check site.
4310  int false_offset = 0x100 +
4311  (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4312  // Assert it is a 1-byte signed value.
4313  ASSERT(false_offset >= 0 && false_offset < 0x100);
4314  __ movl(rax, Immediate(false_offset));
4315  __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4316  __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4317  __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4318  if (FLAG_debug_code) {
4319  __ movl(rax, Immediate(kWordBeforeResultValue));
4320  __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4321  __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4322  }
4323  }
4324  __ ret(2 * kPointerSize + extra_stack_space);
4325 
4326  // Slow-case: Go through the JavaScript implementation.
4327  __ bind(&slow);
4328  if (HasCallSiteInlineCheck()) {
4329  // Remove extra value from the stack.
4330  __ pop(rcx);
4331  __ pop(rax);
4332  __ push(rcx);
4333  }
4334  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4335 }
4336 
4337 
4338 // Passing arguments in registers is not supported.
4339 Register InstanceofStub::left() { return no_reg; }
4340 
4341 
4342 Register InstanceofStub::right() { return no_reg; }
4343 
4344 
4345 int CompareStub::MinorKey() {
4346  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4347  // stubs the never NaN NaN condition is only taken into account if the
4348  // condition is equals.
4349  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4350  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4351  return ConditionField::encode(static_cast<unsigned>(cc_))
4352  | RegisterField::encode(false) // lhs_ and rhs_ are not used
4353  | StrictField::encode(strict_)
4354  | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
4355  | IncludeNumberCompareField::encode(include_number_compare_)
4356  | IncludeSmiCompareField::encode(include_smi_compare_);
4357 }
4358 
4359 
4360 // Unfortunately you have to run without snapshots to see most of these
4361 // names in the profile since most compare stubs end up in the snapshot.
4362 void CompareStub::PrintName(StringStream* stream) {
4363  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4364  const char* cc_name;
4365  switch (cc_) {
4366  case less: cc_name = "LT"; break;
4367  case greater: cc_name = "GT"; break;
4368  case less_equal: cc_name = "LE"; break;
4369  case greater_equal: cc_name = "GE"; break;
4370  case equal: cc_name = "EQ"; break;
4371  case not_equal: cc_name = "NE"; break;
4372  default: cc_name = "UnknownCondition"; break;
4373  }
4374  bool is_equality = cc_ == equal || cc_ == not_equal;
4375  stream->Add("CompareStub_%s", cc_name);
4376  if (strict_ && is_equality) stream->Add("_STRICT");
4377  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4378  if (!include_number_compare_) stream->Add("_NO_NUMBER");
4379  if (!include_smi_compare_) stream->Add("_NO_SMI");
4380 }
4381 
4382 
4383 // -------------------------------------------------------------------------
4384 // StringCharCodeAtGenerator
4385 
4386 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4387  Label flat_string;
4388  Label ascii_string;
4389  Label got_char_code;
4390  Label sliced_string;
4391 
4392  // If the receiver is a smi trigger the non-string case.
4393  __ JumpIfSmi(object_, receiver_not_string_);
4394 
4395  // Fetch the instance type of the receiver into result register.
4396  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4397  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4398  // If the receiver is not a string trigger the non-string case.
4399  __ testb(result_, Immediate(kIsNotStringMask));
4400  __ j(not_zero, receiver_not_string_);
4401 
4402  // If the index is non-smi trigger the non-smi case.
4403  __ JumpIfNotSmi(index_, &index_not_smi_);
4404  __ bind(&got_smi_index_);
4405 
4406  // Check for index out of range.
4407  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
4408  __ j(above_equal, index_out_of_range_);
4409 
4410  __ SmiToInteger32(index_, index_);
4411 
4413  masm, object_, index_, result_, &call_runtime_);
4414 
4415  __ Integer32ToSmi(result_, result_);
4416  __ bind(&exit_);
4417 }
4418 
4419 
4421  MacroAssembler* masm,
4422  const RuntimeCallHelper& call_helper) {
4423  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4424 
4425  Factory* factory = masm->isolate()->factory();
4426  // Index is not a smi.
4427  __ bind(&index_not_smi_);
4428  // If index is a heap number, try converting it to an integer.
4429  __ CheckMap(index_,
4430  factory->heap_number_map(),
4431  index_not_number_,
4433  call_helper.BeforeCall(masm);
4434  __ push(object_);
4435  __ push(index_); // Consumed by runtime conversion function.
4436  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4437  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4438  } else {
4439  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4440  // NumberToSmi discards numbers that are not exact integers.
4441  __ CallRuntime(Runtime::kNumberToSmi, 1);
4442  }
4443  if (!index_.is(rax)) {
4444  // Save the conversion result before the pop instructions below
4445  // have a chance to overwrite it.
4446  __ movq(index_, rax);
4447  }
4448  __ pop(object_);
4449  // Reload the instance type.
4450  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4451  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4452  call_helper.AfterCall(masm);
4453  // If index is still not a smi, it must be out of range.
4454  __ JumpIfNotSmi(index_, index_out_of_range_);
4455  // Otherwise, return to the fast path.
4456  __ jmp(&got_smi_index_);
4457 
4458  // Call runtime. We get here when the receiver is a string and the
4459  // index is a number, but the code of getting the actual character
4460  // is too complex (e.g., when the string needs to be flattened).
4461  __ bind(&call_runtime_);
4462  call_helper.BeforeCall(masm);
4463  __ push(object_);
4464  __ Integer32ToSmi(index_, index_);
4465  __ push(index_);
4466  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4467  if (!result_.is(rax)) {
4468  __ movq(result_, rax);
4469  }
4470  call_helper.AfterCall(masm);
4471  __ jmp(&exit_);
4472 
4473  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
4474 }
4475 
4476 
4477 // -------------------------------------------------------------------------
4478 // StringCharFromCodeGenerator
4479 
4480 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4481  // Fast case of Heap::LookupSingleCharacterStringFromCode.
4482  __ JumpIfNotSmi(code_, &slow_case_);
4483  __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
4484  __ j(above, &slow_case_);
4485 
4486  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4487  SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
4488  __ movq(result_, FieldOperand(result_, index.reg, index.scale,
4490  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
4491  __ j(equal, &slow_case_);
4492  __ bind(&exit_);
4493 }
4494 
4495 
4497  MacroAssembler* masm,
4498  const RuntimeCallHelper& call_helper) {
4499  __ Abort("Unexpected fallthrough to CharFromCode slow case");
4500 
4501  __ bind(&slow_case_);
4502  call_helper.BeforeCall(masm);
4503  __ push(code_);
4504  __ CallRuntime(Runtime::kCharFromCode, 1);
4505  if (!result_.is(rax)) {
4506  __ movq(result_, rax);
4507  }
4508  call_helper.AfterCall(masm);
4509  __ jmp(&exit_);
4510 
4511  __ Abort("Unexpected fallthrough from CharFromCode slow case");
4512 }
4513 
4514 
4515 // -------------------------------------------------------------------------
4516 // StringCharAtGenerator
4517 
4518 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
4519  char_code_at_generator_.GenerateFast(masm);
4520  char_from_code_generator_.GenerateFast(masm);
4521 }
4522 
4523 
4525  MacroAssembler* masm,
4526  const RuntimeCallHelper& call_helper) {
4527  char_code_at_generator_.GenerateSlow(masm, call_helper);
4528  char_from_code_generator_.GenerateSlow(masm, call_helper);
4529 }
4530 
4531 
4532 void StringAddStub::Generate(MacroAssembler* masm) {
4533  Label call_runtime, call_builtin;
4534  Builtins::JavaScript builtin_id = Builtins::ADD;
4535 
4536  // Load the two arguments.
4537  __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
4538  __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
4539 
4540  // Make sure that both arguments are strings if not known in advance.
4541  if (flags_ == NO_STRING_ADD_FLAGS) {
4542  __ JumpIfSmi(rax, &call_runtime);
4543  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
4544  __ j(above_equal, &call_runtime);
4545 
4546  // First argument is a a string, test second.
4547  __ JumpIfSmi(rdx, &call_runtime);
4548  __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
4549  __ j(above_equal, &call_runtime);
4550  } else {
4551  // Here at least one of the arguments is definitely a string.
4552  // We convert the one that is not known to be a string.
4553  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
4554  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
4555  GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
4556  &call_builtin);
4557  builtin_id = Builtins::STRING_ADD_RIGHT;
4558  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
4559  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
4560  GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
4561  &call_builtin);
4562  builtin_id = Builtins::STRING_ADD_LEFT;
4563  }
4564  }
4565 
4566  // Both arguments are strings.
4567  // rax: first string
4568  // rdx: second string
4569  // Check if either of the strings are empty. In that case return the other.
4570  Label second_not_zero_length, both_not_zero_length;
4572  __ SmiTest(rcx);
4573  __ j(not_zero, &second_not_zero_length, Label::kNear);
4574  // Second string is empty, result is first string which is already in rax.
4575  Counters* counters = masm->isolate()->counters();
4576  __ IncrementCounter(counters->string_add_native(), 1);
4577  __ ret(2 * kPointerSize);
4578  __ bind(&second_not_zero_length);
4580  __ SmiTest(rbx);
4581  __ j(not_zero, &both_not_zero_length, Label::kNear);
4582  // First string is empty, result is second string which is in rdx.
4583  __ movq(rax, rdx);
4584  __ IncrementCounter(counters->string_add_native(), 1);
4585  __ ret(2 * kPointerSize);
4586 
4587  // Both strings are non-empty.
4588  // rax: first string
4589  // rbx: length of first string
4590  // rcx: length of second string
4591  // rdx: second string
4592  // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
4593  // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
4594  Label string_add_flat_result, longer_than_two;
4595  __ bind(&both_not_zero_length);
4596 
4597  // If arguments where known to be strings, maps are not loaded to r8 and r9
4598  // by the code above.
4599  if (flags_ != NO_STRING_ADD_FLAGS) {
4602  }
4603  // Get the instance types of the two strings as they will be needed soon.
4606 
4607  // Look at the length of the result of adding the two strings.
4609  __ SmiAdd(rbx, rbx, rcx);
4610  // Use the symbol table when adding two one character strings, as it
4611  // helps later optimizations to return a symbol here.
4612  __ SmiCompare(rbx, Smi::FromInt(2));
4613  __ j(not_equal, &longer_than_two);
4614 
4615  // Check that both strings are non-external ASCII strings.
4616  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
4617  &call_runtime);
4618 
4619  // Get the two characters forming the sub string.
4622 
4623  // Try to lookup two character string in symbol table. If it is not found
4624  // just allocate a new one.
4625  Label make_two_character_string, make_flat_ascii_string;
4627  masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
4628  __ IncrementCounter(counters->string_add_native(), 1);
4629  __ ret(2 * kPointerSize);
4630 
4631  __ bind(&make_two_character_string);
4632  __ Set(rdi, 2);
4633  __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
4634  // rbx - first byte: first character
4635  // rbx - second byte: *maybe* second character
4636  // Make sure that the second byte of rbx contains the second character.
4638  __ shll(rcx, Immediate(kBitsPerByte));
4639  __ orl(rbx, rcx);
4640  // Write both characters to the new string.
4642  __ IncrementCounter(counters->string_add_native(), 1);
4643  __ ret(2 * kPointerSize);
4644 
4645  __ bind(&longer_than_two);
4646  // Check if resulting string will be flat.
4647  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
4648  __ j(below, &string_add_flat_result);
4649  // Handle exceptionally long strings in the runtime system.
4650  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4651  __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
4652  __ j(above, &call_runtime);
4653 
4654  // If result is not supposed to be flat, allocate a cons string object. If
4655  // both strings are ASCII the result is an ASCII cons string.
4656  // rax: first string
4657  // rbx: length of resulting flat string
4658  // rdx: second string
4659  // r8: instance type of first string
4660  // r9: instance type of second string
4661  Label non_ascii, allocated, ascii_data;
4662  __ movl(rcx, r8);
4663  __ and_(rcx, r9);
4666  __ testl(rcx, Immediate(kStringEncodingMask));
4667  __ j(zero, &non_ascii);
4668  __ bind(&ascii_data);
4669  // Allocate an ASCII cons string.
4670  __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
4671  __ bind(&allocated);
4672  // Fill the fields of the cons string.
4675  Immediate(String::kEmptyHashField));
4678  __ movq(rax, rcx);
4679  __ IncrementCounter(counters->string_add_native(), 1);
4680  __ ret(2 * kPointerSize);
4681  __ bind(&non_ascii);
4682  // At least one of the strings is two-byte. Check whether it happens
4683  // to contain only ASCII characters.
4684  // rcx: first instance type AND second instance type.
4685  // r8: first instance type.
4686  // r9: second instance type.
4687  __ testb(rcx, Immediate(kAsciiDataHintMask));
4688  __ j(not_zero, &ascii_data);
4689  __ xor_(r8, r9);
4691  __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4692  __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4693  __ j(equal, &ascii_data);
4694  // Allocate a two byte cons string.
4695  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
4696  __ jmp(&allocated);
4697 
4698  // We cannot encounter sliced strings or cons strings here since:
4700  // Handle creating a flat result from either external or sequential strings.
4701  // Locate the first characters' locations.
4702  // rax: first string
4703  // rbx: length of resulting flat string as smi
4704  // rdx: second string
4705  // r8: instance type of first string
4706  // r9: instance type of first string
4707  Label first_prepared, second_prepared;
4708  Label first_is_sequential, second_is_sequential;
4709  __ bind(&string_add_flat_result);
4710 
4711  __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
4712  // r14: length of first string
4714  __ testb(r8, Immediate(kStringRepresentationMask));
4715  __ j(zero, &first_is_sequential, Label::kNear);
4716  // Rule out short external string and load string resource.
4718  __ testb(r8, Immediate(kShortExternalStringMask));
4719  __ j(not_zero, &call_runtime);
4721  __ jmp(&first_prepared, Label::kNear);
4722  __ bind(&first_is_sequential);
4725  __ bind(&first_prepared);
4726 
4727  // Check whether both strings have same encoding.
4728  __ xorl(r8, r9);
4729  __ testb(r8, Immediate(kStringEncodingMask));
4730  __ j(not_zero, &call_runtime);
4731 
4732  __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
4733  // r15: length of second string
4735  __ testb(r9, Immediate(kStringRepresentationMask));
4736  __ j(zero, &second_is_sequential, Label::kNear);
4737  // Rule out short external string and load string resource.
4739  __ testb(r9, Immediate(kShortExternalStringMask));
4740  __ j(not_zero, &call_runtime);
4742  __ jmp(&second_prepared, Label::kNear);
4743  __ bind(&second_is_sequential);
4746  __ bind(&second_prepared);
4747 
4748  Label non_ascii_string_add_flat_result;
4749  // r9: instance type of second string
4750  // First string and second string have the same encoding.
4752  __ SmiToInteger32(rbx, rbx);
4753  __ testb(r9, Immediate(kStringEncodingMask));
4754  __ j(zero, &non_ascii_string_add_flat_result);
4755 
4756  __ bind(&make_flat_ascii_string);
4757  // Both strings are ASCII strings. As they are short they are both flat.
4758  __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
4759  // rax: result string
4760  // Locate first character of result.
4762  // rcx: first char of first string
4763  // rbx: first character of result
4764  // r14: length of first string
4766  // rbx: next character of result
4767  // rdx: first char of second string
4768  // r15: length of second string
4770  __ IncrementCounter(counters->string_add_native(), 1);
4771  __ ret(2 * kPointerSize);
4772 
4773  __ bind(&non_ascii_string_add_flat_result);
4774  // Both strings are ASCII strings. As they are short they are both flat.
4775  __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
4776  // rax: result string
4777  // Locate first character of result.
4779  // rcx: first char of first string
4780  // rbx: first character of result
4781  // r14: length of first string
4783  // rbx: next character of result
4784  // rdx: first char of second string
4785  // r15: length of second string
4787  __ IncrementCounter(counters->string_add_native(), 1);
4788  __ ret(2 * kPointerSize);
4789 
4790  // Just jump to runtime to add the two strings.
4791  __ bind(&call_runtime);
4792  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4793 
4794  if (call_builtin.is_linked()) {
4795  __ bind(&call_builtin);
4796  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4797  }
4798 }
4799 
4800 
4801 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4802  int stack_offset,
4803  Register arg,
4804  Register scratch1,
4805  Register scratch2,
4806  Register scratch3,
4807  Label* slow) {
4808  // First check if the argument is already a string.
4809  Label not_string, done;
4810  __ JumpIfSmi(arg, &not_string);
4811  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
4812  __ j(below, &done);
4813 
4814  // Check the number to string cache.
4815  Label not_cached;
4816  __ bind(&not_string);
4817  // Puts the cached result into scratch1.
4819  arg,
4820  scratch1,
4821  scratch2,
4822  scratch3,
4823  false,
4824  &not_cached);
4825  __ movq(arg, scratch1);
4826  __ movq(Operand(rsp, stack_offset), arg);
4827  __ jmp(&done);
4828 
4829  // Check if the argument is a safe string wrapper.
4830  __ bind(&not_cached);
4831  __ JumpIfSmi(arg, slow);
4832  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
4833  __ j(not_equal, slow);
4834  __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
4836  __ j(zero, slow);
4837  __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
4838  __ movq(Operand(rsp, stack_offset), arg);
4839 
4840  __ bind(&done);
4841 }
4842 
4843 
4844 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
4845  Register dest,
4846  Register src,
4847  Register count,
4848  bool ascii) {
4849  Label loop;
4850  __ bind(&loop);
4851  // This loop just copies one character at a time, as it is only used for very
4852  // short strings.
4853  if (ascii) {
4854  __ movb(kScratchRegister, Operand(src, 0));
4855  __ movb(Operand(dest, 0), kScratchRegister);
4856  __ incq(src);
4857  __ incq(dest);
4858  } else {
4859  __ movzxwl(kScratchRegister, Operand(src, 0));
4860  __ movw(Operand(dest, 0), kScratchRegister);
4861  __ addq(src, Immediate(2));
4862  __ addq(dest, Immediate(2));
4863  }
4864  __ decl(count);
4865  __ j(not_zero, &loop);
4866 }
4867 
4868 
4869 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
4870  Register dest,
4871  Register src,
4872  Register count,
4873  bool ascii) {
4874  // Copy characters using rep movs of doublewords. Align destination on 4 byte
4875  // boundary before starting rep movs. Copy remaining characters after running
4876  // rep movs.
4877  // Count is positive int32, dest and src are character pointers.
4878  ASSERT(dest.is(rdi)); // rep movs destination
4879  ASSERT(src.is(rsi)); // rep movs source
4880  ASSERT(count.is(rcx)); // rep movs count
4881 
4882  // Nothing to do for zero characters.
4883  Label done;
4884  __ testl(count, count);
4885  __ j(zero, &done, Label::kNear);
4886 
4887  // Make count the number of bytes to copy.
4888  if (!ascii) {
4889  STATIC_ASSERT(2 == sizeof(uc16));
4890  __ addl(count, count);
4891  }
4892 
4893  // Don't enter the rep movs if there are less than 4 bytes to copy.
4894  Label last_bytes;
4895  __ testl(count, Immediate(~7));
4896  __ j(zero, &last_bytes, Label::kNear);
4897 
4898  // Copy from edi to esi using rep movs instruction.
4899  __ movl(kScratchRegister, count);
4900  __ shr(count, Immediate(3)); // Number of doublewords to copy.
4901  __ repmovsq();
4902 
4903  // Find number of bytes left.
4904  __ movl(count, kScratchRegister);
4905  __ and_(count, Immediate(7));
4906 
4907  // Check if there are more bytes to copy.
4908  __ bind(&last_bytes);
4909  __ testl(count, count);
4910  __ j(zero, &done, Label::kNear);
4911 
4912  // Copy remaining characters.
4913  Label loop;
4914  __ bind(&loop);
4915  __ movb(kScratchRegister, Operand(src, 0));
4916  __ movb(Operand(dest, 0), kScratchRegister);
4917  __ incq(src);
4918  __ incq(dest);
4919  __ decl(count);
4920  __ j(not_zero, &loop);
4921 
4922  __ bind(&done);
4923 }
4924 
4925 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
4926  Register c1,
4927  Register c2,
4928  Register scratch1,
4929  Register scratch2,
4930  Register scratch3,
4931  Register scratch4,
4932  Label* not_found) {
4933  // Register scratch3 is the general scratch register in this function.
4934  Register scratch = scratch3;
4935 
4936  // Make sure that both characters are not digits as such strings has a
4937  // different hash algorithm. Don't try to look for these in the symbol table.
4938  Label not_array_index;
4939  __ leal(scratch, Operand(c1, -'0'));
4940  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4941  __ j(above, &not_array_index, Label::kNear);
4942  __ leal(scratch, Operand(c2, -'0'));
4943  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4944  __ j(below_equal, not_found);
4945 
4946  __ bind(&not_array_index);
4947  // Calculate the two character string hash.
4948  Register hash = scratch1;
4949  GenerateHashInit(masm, hash, c1, scratch);
4950  GenerateHashAddCharacter(masm, hash, c2, scratch);
4951  GenerateHashGetHash(masm, hash, scratch);
4952 
4953  // Collect the two characters in a register.
4954  Register chars = c1;
4955  __ shl(c2, Immediate(kBitsPerByte));
4956  __ orl(chars, c2);
4957 
4958  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4959  // hash: hash of two character string.
4960 
4961  // Load the symbol table.
4962  Register symbol_table = c2;
4963  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
4964 
4965  // Calculate capacity mask from the symbol table capacity.
4966  Register mask = scratch2;
4967  __ SmiToInteger32(mask,
4969  __ decl(mask);
4970 
4971  Register map = scratch4;
4972 
4973  // Registers
4974  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4975  // hash: hash of two character string (32-bit int)
4976  // symbol_table: symbol table
4977  // mask: capacity mask (32-bit int)
4978  // map: -
4979  // scratch: -
4980 
4981  // Perform a number of probes in the symbol table.
4982  static const int kProbes = 4;
4983  Label found_in_symbol_table;
4984  Label next_probe[kProbes];
4985  Register candidate = scratch; // Scratch register contains candidate.
4986  for (int i = 0; i < kProbes; i++) {
4987  // Calculate entry in symbol table.
4988  __ movl(scratch, hash);
4989  if (i > 0) {
4990  __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
4991  }
4992  __ andl(scratch, mask);
4993 
4994  // Load the entry from the symbol table.
4996  __ movq(candidate,
4997  FieldOperand(symbol_table,
4998  scratch,
5001 
5002  // If entry is undefined no string with this hash can be found.
5003  Label is_string;
5004  __ CmpObjectType(candidate, ODDBALL_TYPE, map);
5005  __ j(not_equal, &is_string, Label::kNear);
5006 
5007  __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
5008  __ j(equal, not_found);
5009  // Must be the hole (deleted entry).
5010  if (FLAG_debug_code) {
5011  __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
5012  __ cmpq(kScratchRegister, candidate);
5013  __ Assert(equal, "oddball in symbol table is not undefined or the hole");
5014  }
5015  __ jmp(&next_probe[i]);
5016 
5017  __ bind(&is_string);
5018 
5019  // If length is not 2 the string is not a candidate.
5020  __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
5021  Smi::FromInt(2));
5022  __ j(not_equal, &next_probe[i]);
5023 
5024  // We use kScratchRegister as a temporary register in assumption that
5025  // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
5026  Register temp = kScratchRegister;
5027 
5028  // Check that the candidate is a non-external ASCII string.
5029  __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
5030  __ JumpIfInstanceTypeIsNotSequentialAscii(
5031  temp, temp, &next_probe[i]);
5032 
5033  // Check if the two characters match.
5034  __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5035  __ andl(temp, Immediate(0x0000ffff));
5036  __ cmpl(chars, temp);
5037  __ j(equal, &found_in_symbol_table);
5038  __ bind(&next_probe[i]);
5039  }
5040 
5041  // No matching 2 character string found by probing.
5042  __ jmp(not_found);
5043 
5044  // Scratch register contains result when we fall through to here.
5045  Register result = candidate;
5046  __ bind(&found_in_symbol_table);
5047  if (!result.is(rax)) {
5048  __ movq(rax, result);
5049  }
5050 }
5051 
5052 
5053 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5054  Register hash,
5055  Register character,
5056  Register scratch) {
5057  // hash = (seed + character) + ((seed + character) << 10);
5058  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
5059  __ SmiToInteger32(scratch, scratch);
5060  __ addl(scratch, character);
5061  __ movl(hash, scratch);
5062  __ shll(scratch, Immediate(10));
5063  __ addl(hash, scratch);
5064  // hash ^= hash >> 6;
5065  __ movl(scratch, hash);
5066  __ shrl(scratch, Immediate(6));
5067  __ xorl(hash, scratch);
5068 }
5069 
5070 
5071 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5072  Register hash,
5073  Register character,
5074  Register scratch) {
5075  // hash += character;
5076  __ addl(hash, character);
5077  // hash += hash << 10;
5078  __ movl(scratch, hash);
5079  __ shll(scratch, Immediate(10));
5080  __ addl(hash, scratch);
5081  // hash ^= hash >> 6;
5082  __ movl(scratch, hash);
5083  __ shrl(scratch, Immediate(6));
5084  __ xorl(hash, scratch);
5085 }
5086 
5087 
5088 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5089  Register hash,
5090  Register scratch) {
5091  // hash += hash << 3;
5092  __ leal(hash, Operand(hash, hash, times_8, 0));
5093  // hash ^= hash >> 11;
5094  __ movl(scratch, hash);
5095  __ shrl(scratch, Immediate(11));
5096  __ xorl(hash, scratch);
5097  // hash += hash << 15;
5098  __ movl(scratch, hash);
5099  __ shll(scratch, Immediate(15));
5100  __ addl(hash, scratch);
5101 
5102  __ andl(hash, Immediate(String::kHashBitMask));
5103 
5104  // if (hash == 0) hash = 27;
5105  Label hash_not_zero;
5106  __ j(not_zero, &hash_not_zero);
5107  __ Set(hash, StringHasher::kZeroHash);
5108  __ bind(&hash_not_zero);
5109 }
5110 
5111 void SubStringStub::Generate(MacroAssembler* masm) {
5112  Label runtime;
5113 
5114  // Stack frame on entry.
5115  // rsp[0]: return address
5116  // rsp[8]: to
5117  // rsp[16]: from
5118  // rsp[24]: string
5119 
5120  const int kToOffset = 1 * kPointerSize;
5121  const int kFromOffset = kToOffset + kPointerSize;
5122  const int kStringOffset = kFromOffset + kPointerSize;
5123  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
5124 
5125  // Make sure first argument is a string.
5126  __ movq(rax, Operand(rsp, kStringOffset));
5127  STATIC_ASSERT(kSmiTag == 0);
5128  __ testl(rax, Immediate(kSmiTagMask));
5129  __ j(zero, &runtime);
5130  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
5131  __ j(NegateCondition(is_string), &runtime);
5132 
5133  // rax: string
5134  // rbx: instance type
5135  // Calculate length of sub string using the smi values.
5136  __ movq(rcx, Operand(rsp, kToOffset));
5137  __ movq(rdx, Operand(rsp, kFromOffset));
5138  __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
5139 
5140  __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
5142  Label not_original_string;
5143  // Shorter than original string's length: an actual substring.
5144  __ j(below, &not_original_string, Label::kNear);
5145  // Longer than original string's length or negative: unsafe arguments.
5146  __ j(above, &runtime);
5147  // Return original string.
5148  Counters* counters = masm->isolate()->counters();
5149  __ IncrementCounter(counters->sub_string_native(), 1);
5150  __ ret(kArgumentsSize);
5151  __ bind(&not_original_string);
5152  __ SmiToInteger32(rcx, rcx);
5153 
5154  // rax: string
5155  // rbx: instance type
5156  // rcx: sub string length
5157  // rdx: from index (smi)
5158  // Deal with different string types: update the index if necessary
5159  // and put the underlying string into edi.
5160  Label underlying_unpacked, sliced_string, seq_or_external_string;
5161  // If the string is not indirect, it can only be sequential or external.
5164  __ testb(rbx, Immediate(kIsIndirectStringMask));
5165  __ j(zero, &seq_or_external_string, Label::kNear);
5166 
5167  __ testb(rbx, Immediate(kSlicedNotConsMask));
5168  __ j(not_zero, &sliced_string, Label::kNear);
5169  // Cons string. Check whether it is flat, then fetch first part.
5170  // Flat cons strings have an empty second part.
5172  Heap::kEmptyStringRootIndex);
5173  __ j(not_equal, &runtime);
5175  // Update instance type.
5178  __ jmp(&underlying_unpacked, Label::kNear);
5179 
5180  __ bind(&sliced_string);
5181  // Sliced string. Fetch parent and correct start index by offset.
5184  // Update instance type.
5187  __ jmp(&underlying_unpacked, Label::kNear);
5188 
5189  __ bind(&seq_or_external_string);
5190  // Sequential or external string. Just move string to the correct register.
5191  __ movq(rdi, rax);
5192 
5193  __ bind(&underlying_unpacked);
5194 
5195  if (FLAG_string_slices) {
5196  Label copy_routine;
5197  // rdi: underlying subject string
5198  // rbx: instance type of underlying subject string
5199  // rdx: adjusted start index (smi)
5200  // rcx: length
5201  // If coming from the make_two_character_string path, the string
5202  // is too short to be sliced anyways.
5203  __ cmpq(rcx, Immediate(SlicedString::kMinLength));
5204  // Short slice. Copy instead of slicing.
5205  __ j(less, &copy_routine);
5206  // Allocate new sliced string. At this point we do not reload the instance
5207  // type including the string encoding because we simply rely on the info
5208  // provided by the original string. It does not matter if the original
5209  // string's encoding is wrong because we always have to recheck encoding of
5210  // the newly created string's parent anyways due to externalized strings.
5211  Label two_byte_slice, set_slice_header;
5214  __ testb(rbx, Immediate(kStringEncodingMask));
5215  __ j(zero, &two_byte_slice, Label::kNear);
5216  __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
5217  __ jmp(&set_slice_header, Label::kNear);
5218  __ bind(&two_byte_slice);
5219  __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
5220  __ bind(&set_slice_header);
5221  __ Integer32ToSmi(rcx, rcx);
5224  Immediate(String::kEmptyHashField));
5227  __ IncrementCounter(counters->sub_string_native(), 1);
5228  __ ret(kArgumentsSize);
5229 
5230  __ bind(&copy_routine);
5231  }
5232 
5233  // rdi: underlying subject string
5234  // rbx: instance type of underlying subject string
5235  // rdx: adjusted start index (smi)
5236  // rcx: length
5237  // The subject string can only be external or sequential string of either
5238  // encoding at this point.
5239  Label two_byte_sequential, sequential_string;
5242  __ testb(rbx, Immediate(kExternalStringTag));
5243  __ j(zero, &sequential_string);
5244 
5245  // Handle external string.
5246  // Rule out short external strings.
5248  __ testb(rbx, Immediate(kShortExternalStringMask));
5249  __ j(not_zero, &runtime);
5251  // Move the pointer so that offset-wise, it looks like a sequential string.
5253  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5254 
5255  __ bind(&sequential_string);
5257  __ testb(rbx, Immediate(kStringEncodingMask));
5258  __ j(zero, &two_byte_sequential);
5259 
5260  // Allocate the result.
5261  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
5262 
5263  // rax: result string
5264  // rcx: result string length
5265  __ movq(r14, rsi); // esi used by following code.
5266  { // Locate character of sub string start.
5267  SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
5268  __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5270  }
5271  // Locate first character of result.
5273 
5274  // rax: result string
5275  // rcx: result length
5276  // rdi: first character of result
5277  // rsi: character of sub string start
5278  // r14: original value of rsi
5280  __ movq(rsi, r14); // Restore rsi.
5281  __ IncrementCounter(counters->sub_string_native(), 1);
5282  __ ret(kArgumentsSize);
5283 
5284  __ bind(&two_byte_sequential);
5285  // Allocate the result.
5286  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
5287 
5288  // rax: result string
5289  // rcx: result string length
5290  __ movq(r14, rsi); // esi used by following code.
5291  { // Locate character of sub string start.
5292  SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
5293  __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5295  }
5296  // Locate first character of result.
5298 
5299  // rax: result string
5300  // rcx: result length
5301  // rdi: first character of result
5302  // rsi: character of sub string start
5303  // r14: original value of rsi
5305  __ movq(rsi, r14); // Restore esi.
5306  __ IncrementCounter(counters->sub_string_native(), 1);
5307  __ ret(kArgumentsSize);
5308 
5309  // Just jump to runtime to create the sub string.
5310  __ bind(&runtime);
5311  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5312 }
5313 
5314 
5315 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5316  Register left,
5317  Register right,
5318  Register scratch1,
5319  Register scratch2) {
5320  Register length = scratch1;
5321 
5322  // Compare lengths.
5323  Label check_zero_length;
5324  __ movq(length, FieldOperand(left, String::kLengthOffset));
5325  __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
5326  __ j(equal, &check_zero_length, Label::kNear);
5327  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5328  __ ret(0);
5329 
5330  // Check if the length is zero.
5331  Label compare_chars;
5332  __ bind(&check_zero_length);
5333  STATIC_ASSERT(kSmiTag == 0);
5334  __ SmiTest(length);
5335  __ j(not_zero, &compare_chars, Label::kNear);
5336  __ Move(rax, Smi::FromInt(EQUAL));
5337  __ ret(0);
5338 
5339  // Compare characters.
5340  __ bind(&compare_chars);
5341  Label strings_not_equal;
5342  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5343  &strings_not_equal, Label::kNear);
5344 
5345  // Characters are equal.
5346  __ Move(rax, Smi::FromInt(EQUAL));
5347  __ ret(0);
5348 
5349  // Characters are not equal.
5350  __ bind(&strings_not_equal);
5351  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5352  __ ret(0);
5353 }
5354 
5355 
5356 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5357  Register left,
5358  Register right,
5359  Register scratch1,
5360  Register scratch2,
5361  Register scratch3,
5362  Register scratch4) {
5363  // Ensure that you can always subtract a string length from a non-negative
5364  // number (e.g. another length).
5365  STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
5366 
5367  // Find minimum length and length difference.
5368  __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
5369  __ movq(scratch4, scratch1);
5370  __ SmiSub(scratch4,
5371  scratch4,
5373  // Register scratch4 now holds left.length - right.length.
5374  const Register length_difference = scratch4;
5375  Label left_shorter;
5376  __ j(less, &left_shorter, Label::kNear);
5377  // The right string isn't longer that the left one.
5378  // Get the right string's length by subtracting the (non-negative) difference
5379  // from the left string's length.
5380  __ SmiSub(scratch1, scratch1, length_difference);
5381  __ bind(&left_shorter);
5382  // Register scratch1 now holds Min(left.length, right.length).
5383  const Register min_length = scratch1;
5384 
5385  Label compare_lengths;
5386  // If min-length is zero, go directly to comparing lengths.
5387  __ SmiTest(min_length);
5388  __ j(zero, &compare_lengths, Label::kNear);
5389 
5390  // Compare loop.
5391  Label result_not_equal;
5392  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5393  &result_not_equal, Label::kNear);
5394 
5395  // Completed loop without finding different characters.
5396  // Compare lengths (precomputed).
5397  __ bind(&compare_lengths);
5398  __ SmiTest(length_difference);
5399  __ j(not_zero, &result_not_equal, Label::kNear);
5400 
5401  // Result is EQUAL.
5402  __ Move(rax, Smi::FromInt(EQUAL));
5403  __ ret(0);
5404 
5405  Label result_greater;
5406  __ bind(&result_not_equal);
5407  // Unequal comparison of left to right, either character or length.
5408  __ j(greater, &result_greater, Label::kNear);
5409 
5410  // Result is LESS.
5411  __ Move(rax, Smi::FromInt(LESS));
5412  __ ret(0);
5413 
5414  // Result is GREATER.
5415  __ bind(&result_greater);
5416  __ Move(rax, Smi::FromInt(GREATER));
5417  __ ret(0);
5418 }
5419 
5420 
5421 void StringCompareStub::GenerateAsciiCharsCompareLoop(
5422  MacroAssembler* masm,
5423  Register left,
5424  Register right,
5425  Register length,
5426  Register scratch,
5427  Label* chars_not_equal,
5428  Label::Distance near_jump) {
5429  // Change index to run from -length to -1 by adding length to string
5430  // start. This means that loop ends when index reaches zero, which
5431  // doesn't need an additional compare.
5432  __ SmiToInteger32(length, length);
5433  __ lea(left,
5435  __ lea(right,
5437  __ neg(length);
5438  Register index = length; // index = -length;
5439 
5440  // Compare loop.
5441  Label loop;
5442  __ bind(&loop);
5443  __ movb(scratch, Operand(left, index, times_1, 0));
5444  __ cmpb(scratch, Operand(right, index, times_1, 0));
5445  __ j(not_equal, chars_not_equal, near_jump);
5446  __ incq(index);
5447  __ j(not_zero, &loop);
5448 }
5449 
5450 
5451 void StringCompareStub::Generate(MacroAssembler* masm) {
5452  Label runtime;
5453 
5454  // Stack frame on entry.
5455  // rsp[0]: return address
5456  // rsp[8]: right string
5457  // rsp[16]: left string
5458 
5459  __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
5460  __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
5461 
5462  // Check for identity.
5463  Label not_same;
5464  __ cmpq(rdx, rax);
5465  __ j(not_equal, &not_same, Label::kNear);
5466  __ Move(rax, Smi::FromInt(EQUAL));
5467  Counters* counters = masm->isolate()->counters();
5468  __ IncrementCounter(counters->string_compare_native(), 1);
5469  __ ret(2 * kPointerSize);
5470 
5471  __ bind(&not_same);
5472 
5473  // Check that both are sequential ASCII strings.
5474  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
5475 
5476  // Inline comparison of ASCII strings.
5477  __ IncrementCounter(counters->string_compare_native(), 1);
5478  // Drop arguments from the stack
5479  __ pop(rcx);
5480  __ addq(rsp, Immediate(2 * kPointerSize));
5481  __ push(rcx);
5483 
5484  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5485  // tagged as a small integer.
5486  __ bind(&runtime);
5487  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5488 }
5489 
5490 
5491 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5492  ASSERT(state_ == CompareIC::SMIS);
5493  Label miss;
5494  __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
5495 
5496  if (GetCondition() == equal) {
5497  // For equality we do not care about the sign of the result.
5498  __ subq(rax, rdx);
5499  } else {
5500  Label done;
5501  __ subq(rdx, rax);
5502  __ j(no_overflow, &done, Label::kNear);
5503  // Correct sign of result in case of overflow.
5504  __ SmiNot(rdx, rdx);
5505  __ bind(&done);
5506  __ movq(rax, rdx);
5507  }
5508  __ ret(0);
5509 
5510  __ bind(&miss);
5511  GenerateMiss(masm);
5512 }
5513 
5514 
5515 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
5516  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
5517 
5518  Label generic_stub;
5519  Label unordered, maybe_undefined1, maybe_undefined2;
5520  Label miss;
5521  Condition either_smi = masm->CheckEitherSmi(rax, rdx);
5522  __ j(either_smi, &generic_stub, Label::kNear);
5523 
5524  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
5525  __ j(not_equal, &maybe_undefined1, Label::kNear);
5526  __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5527  __ j(not_equal, &maybe_undefined2, Label::kNear);
5528 
5529  // Load left and right operand
5532 
5533  // Compare operands
5534  __ ucomisd(xmm0, xmm1);
5535 
5536  // Don't base result on EFLAGS when a NaN is involved.
5537  __ j(parity_even, &unordered, Label::kNear);
5538 
5539  // Return a result of -1, 0, or 1, based on EFLAGS.
5540  // Performing mov, because xor would destroy the flag register.
5541  __ movl(rax, Immediate(0));
5542  __ movl(rcx, Immediate(0));
5543  __ setcc(above, rax); // Add one to zero if carry clear and not equal.
5544  __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
5545  __ ret(0);
5546 
5547  __ bind(&unordered);
5548  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
5549  __ bind(&generic_stub);
5550  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
5551 
5552  __ bind(&maybe_undefined1);
5554  __ Cmp(rax, masm->isolate()->factory()->undefined_value());
5555  __ j(not_equal, &miss);
5556  __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5557  __ j(not_equal, &maybe_undefined2, Label::kNear);
5558  __ jmp(&unordered);
5559  }
5560 
5561  __ bind(&maybe_undefined2);
5563  __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
5564  __ j(equal, &unordered);
5565  }
5566 
5567  __ bind(&miss);
5568  GenerateMiss(masm);
5569 }
5570 
5571 
5572 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
5573  ASSERT(state_ == CompareIC::SYMBOLS);
5574  ASSERT(GetCondition() == equal);
5575 
5576  // Registers containing left and right operands respectively.
5577  Register left = rdx;
5578  Register right = rax;
5579  Register tmp1 = rcx;
5580  Register tmp2 = rbx;
5581 
5582  // Check that both operands are heap objects.
5583  Label miss;
5584  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5585  __ j(cond, &miss, Label::kNear);
5586 
5587  // Check that both operands are symbols.
5588  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5589  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5590  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5591  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5592  STATIC_ASSERT(kSymbolTag != 0);
5593  __ and_(tmp1, tmp2);
5594  __ testb(tmp1, Immediate(kIsSymbolMask));
5595  __ j(zero, &miss, Label::kNear);
5596 
5597  // Symbols are compared by identity.
5598  Label done;
5599  __ cmpq(left, right);
5600  // Make sure rax is non-zero. At this point input operands are
5601  // guaranteed to be non-zero.
5602  ASSERT(right.is(rax));
5603  __ j(not_equal, &done, Label::kNear);
5604  STATIC_ASSERT(EQUAL == 0);
5605  STATIC_ASSERT(kSmiTag == 0);
5606  __ Move(rax, Smi::FromInt(EQUAL));
5607  __ bind(&done);
5608  __ ret(0);
5609 
5610  __ bind(&miss);
5611  GenerateMiss(masm);
5612 }
5613 
5614 
5615 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5616  ASSERT(state_ == CompareIC::STRINGS);
5617  Label miss;
5618 
5619  bool equality = Token::IsEqualityOp(op_);
5620 
5621  // Registers containing left and right operands respectively.
5622  Register left = rdx;
5623  Register right = rax;
5624  Register tmp1 = rcx;
5625  Register tmp2 = rbx;
5626  Register tmp3 = rdi;
5627 
5628  // Check that both operands are heap objects.
5629  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5630  __ j(cond, &miss);
5631 
5632  // Check that both operands are strings. This leaves the instance
5633  // types loaded in tmp1 and tmp2.
5634  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5635  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5636  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5637  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5638  __ movq(tmp3, tmp1);
5640  __ or_(tmp3, tmp2);
5641  __ testb(tmp3, Immediate(kIsNotStringMask));
5642  __ j(not_zero, &miss);
5643 
5644  // Fast check for identical strings.
5645  Label not_same;
5646  __ cmpq(left, right);
5647  __ j(not_equal, &not_same, Label::kNear);
5648  STATIC_ASSERT(EQUAL == 0);
5649  STATIC_ASSERT(kSmiTag == 0);
5650  __ Move(rax, Smi::FromInt(EQUAL));
5651  __ ret(0);
5652 
5653  // Handle not identical strings.
5654  __ bind(&not_same);
5655 
5656  // Check that both strings are symbols. If they are, we're done
5657  // because we already know they are not identical.
5658  if (equality) {
5659  Label do_compare;
5660  STATIC_ASSERT(kSymbolTag != 0);
5661  __ and_(tmp1, tmp2);
5662  __ testb(tmp1, Immediate(kIsSymbolMask));
5663  __ j(zero, &do_compare, Label::kNear);
5664  // Make sure rax is non-zero. At this point input operands are
5665  // guaranteed to be non-zero.
5666  ASSERT(right.is(rax));
5667  __ ret(0);
5668  __ bind(&do_compare);
5669  }
5670 
5671  // Check that both strings are sequential ASCII.
5672  Label runtime;
5673  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
5674 
5675  // Compare flat ASCII strings. Returns when done.
5676  if (equality) {
5678  masm, left, right, tmp1, tmp2);
5679  } else {
5681  masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
5682  }
5683 
5684  // Handle more complex cases in runtime.
5685  __ bind(&runtime);
5686  __ pop(tmp1); // Return address.
5687  __ push(left);
5688  __ push(right);
5689  __ push(tmp1);
5690  if (equality) {
5691  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5692  } else {
5693  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5694  }
5695 
5696  __ bind(&miss);
5697  GenerateMiss(masm);
5698 }
5699 
5700 
5701 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5702  ASSERT(state_ == CompareIC::OBJECTS);
5703  Label miss;
5704  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5705  __ j(either_smi, &miss, Label::kNear);
5706 
5707  __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
5708  __ j(not_equal, &miss, Label::kNear);
5709  __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
5710  __ j(not_equal, &miss, Label::kNear);
5711 
5712  ASSERT(GetCondition() == equal);
5713  __ subq(rax, rdx);
5714  __ ret(0);
5715 
5716  __ bind(&miss);
5717  GenerateMiss(masm);
5718 }
5719 
5720 
5721 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5722  Label miss;
5723  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5724  __ j(either_smi, &miss, Label::kNear);
5725 
5728  __ Cmp(rcx, known_map_);
5729  __ j(not_equal, &miss, Label::kNear);
5730  __ Cmp(rbx, known_map_);
5731  __ j(not_equal, &miss, Label::kNear);
5732 
5733  __ subq(rax, rdx);
5734  __ ret(0);
5735 
5736  __ bind(&miss);
5737  GenerateMiss(masm);
5738 }
5739 
5740 
5741 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5742  {
5743  // Call the runtime system in a fresh internal frame.
5744  ExternalReference miss =
5745  ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5746 
5747  FrameScope scope(masm, StackFrame::INTERNAL);
5748  __ push(rdx);
5749  __ push(rax);
5750  __ push(rdx);
5751  __ push(rax);
5752  __ Push(Smi::FromInt(op_));
5753  __ CallExternalReference(miss, 3);
5754 
5755  // Compute the entry point of the rewritten stub.
5757  __ pop(rax);
5758  __ pop(rdx);
5759  }
5760 
5761  // Do a tail call to the rewritten stub.
5762  __ jmp(rdi);
5763 }
5764 
5765 
5766 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5767  Label* miss,
5768  Label* done,
5769  Register properties,
5770  Handle<String> name,
5771  Register r0) {
5772  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5773  // not equal to the name and kProbes-th slot is not used (its name is the
5774  // undefined value), it guarantees the hash table doesn't contain the
5775  // property. It's true even if some slots represent deleted properties
5776  // (their names are the hole value).
5777  for (int i = 0; i < kInlinedProbes; i++) {
5778  // r0 points to properties hash.
5779  // Compute the masked index: (hash + i + i * i) & mask.
5780  Register index = r0;
5781  // Capacity is smi 2^n.
5782  __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
5783  __ decl(index);
5784  __ and_(index,
5785  Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
5786 
5787  // Scale the index by multiplying by the entry size.
5789  __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
5790 
5791  Register entity_name = r0;
5792  // Having undefined at this place means the name is not contained.
5793  ASSERT_EQ(kSmiTagSize, 1);
5794  __ movq(entity_name, Operand(properties,
5795  index,
5797  kElementsStartOffset - kHeapObjectTag));
5798  __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
5799  __ j(equal, done);
5800 
5801  // Stop if found the property.
5802  __ Cmp(entity_name, Handle<String>(name));
5803  __ j(equal, miss);
5804 
5805  Label the_hole;
5806  // Check for the hole and skip.
5807  __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
5808  __ j(equal, &the_hole, Label::kNear);
5809 
5810  // Check if the entry name is not a symbol.
5811  __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
5812  __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
5813  Immediate(kIsSymbolMask));
5814  __ j(zero, miss);
5815 
5816  __ bind(&the_hole);
5817  }
5818 
5819  StringDictionaryLookupStub stub(properties,
5820  r0,
5821  r0,
5823  __ Push(Handle<Object>(name));
5824  __ push(Immediate(name->Hash()));
5825  __ CallStub(&stub);
5826  __ testq(r0, r0);
5827  __ j(not_zero, miss);
5828  __ jmp(done);
5829 }
5830 
5831 
5832 // Probe the string dictionary in the |elements| register. Jump to the
5833 // |done| label if a property with the given name is found leaving the
5834 // index into the dictionary in |r1|. Jump to the |miss| label
5835 // otherwise.
5836 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5837  Label* miss,
5838  Label* done,
5839  Register elements,
5840  Register name,
5841  Register r0,
5842  Register r1) {
5843  ASSERT(!elements.is(r0));
5844  ASSERT(!elements.is(r1));
5845  ASSERT(!name.is(r0));
5846  ASSERT(!name.is(r1));
5847 
5848  // Assert that name contains a string.
5849  if (FLAG_debug_code) __ AbortIfNotString(name);
5850 
5851  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
5852  __ decl(r0);
5853 
5854  for (int i = 0; i < kInlinedProbes; i++) {
5855  // Compute the masked index: (hash + i + i * i) & mask.
5856  __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
5857  __ shrl(r1, Immediate(String::kHashShift));
5858  if (i > 0) {
5859  __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
5860  }
5861  __ and_(r1, r0);
5862 
5863  // Scale the index by multiplying by the entry size.
5865  __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
5866 
5867  // Check if the key is identical to the name.
5868  __ cmpq(name, Operand(elements, r1, times_pointer_size,
5869  kElementsStartOffset - kHeapObjectTag));
5870  __ j(equal, done);
5871  }
5872 
5873  StringDictionaryLookupStub stub(elements,
5874  r0,
5875  r1,
5876  POSITIVE_LOOKUP);
5877  __ push(name);
5878  __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
5879  __ shrl(r0, Immediate(String::kHashShift));
5880  __ push(r0);
5881  __ CallStub(&stub);
5882 
5883  __ testq(r0, r0);
5884  __ j(zero, miss);
5885  __ jmp(done);
5886 }
5887 
5888 
5889 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
5890  // This stub overrides SometimesSetsUpAFrame() to return false. That means
5891  // we cannot call anything that could cause a GC from this stub.
5892  // Stack frame on entry:
5893  // esp[0 * kPointerSize]: return address.
5894  // esp[1 * kPointerSize]: key's hash.
5895  // esp[2 * kPointerSize]: key.
5896  // Registers:
5897  // dictionary_: StringDictionary to probe.
5898  // result_: used as scratch.
5899  // index_: will hold an index of entry if lookup is successful.
5900  // might alias with result_.
5901  // Returns:
5902  // result_ is zero if lookup failed, non zero otherwise.
5903 
5904  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5905 
5906  Register scratch = result_;
5907 
5908  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
5909  __ decl(scratch);
5910  __ push(scratch);
5911 
5912  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5913  // not equal to the name and kProbes-th slot is not used (its name is the
5914  // undefined value), it guarantees the hash table doesn't contain the
5915  // property. It's true even if some slots represent deleted properties
5916  // (their names are the null value).
5917  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5918  // Compute the masked index: (hash + i + i * i) & mask.
5919  __ movq(scratch, Operand(rsp, 2 * kPointerSize));
5920  if (i > 0) {
5921  __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
5922  }
5923  __ and_(scratch, Operand(rsp, 0));
5924 
5925  // Scale the index by multiplying by the entry size.
5927  __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
5928 
5929  // Having undefined at this place means the name is not contained.
5930  __ movq(scratch, Operand(dictionary_,
5931  index_,
5933  kElementsStartOffset - kHeapObjectTag));
5934 
5935  __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
5936  __ j(equal, &not_in_dictionary);
5937 
5938  // Stop if found the property.
5939  __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
5940  __ j(equal, &in_dictionary);
5941 
5942  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5943  // If we hit a non symbol key during negative lookup
5944  // we have to bailout as this key might be equal to the
5945  // key we are looking for.
5946 
5947  // Check if the entry name is not a symbol.
5948  __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5949  __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5950  Immediate(kIsSymbolMask));
5951  __ j(zero, &maybe_in_dictionary);
5952  }
5953  }
5954 
5955  __ bind(&maybe_in_dictionary);
5956  // If we are doing negative lookup then probing failure should be
5957  // treated as a lookup success. For positive lookup probing failure
5958  // should be treated as lookup failure.
5959  if (mode_ == POSITIVE_LOOKUP) {
5960  __ movq(scratch, Immediate(0));
5961  __ Drop(1);
5962  __ ret(2 * kPointerSize);
5963  }
5964 
5965  __ bind(&in_dictionary);
5966  __ movq(scratch, Immediate(1));
5967  __ Drop(1);
5968  __ ret(2 * kPointerSize);
5969 
5970  __ bind(&not_in_dictionary);
5971  __ movq(scratch, Immediate(0));
5972  __ Drop(1);
5973  __ ret(2 * kPointerSize);
5974 }
5975 
5976 
5977 struct AheadOfTimeWriteBarrierStubList {
5978  Register object, value, address;
5979  RememberedSetAction action;
5980 };
5981 
5982 
5983 #define REG(Name) { kRegister_ ## Name ## _Code }
5984 
5985 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
5986  // Used in RegExpExecStub.
5987  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
5988  // Used in CompileArrayPushCall.
5989  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
5990  // Used in CompileStoreGlobal.
5991  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
5992  // Used in StoreStubCompiler::CompileStoreField and
5993  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
5994  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
5995  // GenerateStoreField calls the stub with two different permutations of
5996  // registers. This is the second.
5997  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
5998  // StoreIC::GenerateNormal via GenerateDictionaryStore.
5999  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
6000  // KeyedStoreIC::GenerateGeneric.
6001  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
6002  // KeyedStoreStubCompiler::GenerateStoreFastElement.
6003  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
6004  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
6005  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
6006  // and ElementsTransitionGenerator::GenerateSmiToDouble
6007  // and ElementsTransitionGenerator::GenerateDoubleToObject
6008  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
6009  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
6010  // ElementsTransitionGenerator::GenerateSmiToDouble
6011  // and ElementsTransitionGenerator::GenerateDoubleToObject
6012  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
6013  // ElementsTransitionGenerator::GenerateDoubleToObject
6014  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
6015  // StoreArrayLiteralElementStub::Generate
6016  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
6017  // Null termination.
6018  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6019 };
6020 
6021 #undef REG
6022 
6024  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6025  !entry->object.is(no_reg);
6026  entry++) {
6027  if (object_.is(entry->object) &&
6028  value_.is(entry->value) &&
6029  address_.is(entry->address) &&
6030  remembered_set_action_ == entry->action &&
6031  save_fp_regs_mode_ == kDontSaveFPRegs) {
6032  return true;
6033  }
6034  }
6035  return false;
6036 }
6037 
6038 
6041  stub1.GetCode()->set_is_pregenerated(true);
6043  stub2.GetCode()->set_is_pregenerated(true);
6044 }
6045 
6046 
6048  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6049  !entry->object.is(no_reg);
6050  entry++) {
6051  RecordWriteStub stub(entry->object,
6052  entry->value,
6053  entry->address,
6054  entry->action,
6055  kDontSaveFPRegs);
6056  stub.GetCode()->set_is_pregenerated(true);
6057  }
6058 }
6059 
6060 
6061 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
6062 // the value has just been written into the object, now this stub makes sure
6063 // we keep the GC informed. The word in the object where the value has been
6064 // written is in the address register.
6065 void RecordWriteStub::Generate(MacroAssembler* masm) {
6066  Label skip_to_incremental_noncompacting;
6067  Label skip_to_incremental_compacting;
6068 
6069  // The first two instructions are generated with labels so as to get the
6070  // offset fixed up correctly by the bind(Label*) call. We patch it back and
6071  // forth between a compare instructions (a nop in this position) and the
6072  // real branch when we start and stop incremental heap marking.
6073  // See RecordWriteStub::Patch for details.
6074  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
6075  __ jmp(&skip_to_incremental_compacting, Label::kFar);
6076 
6077  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6078  __ RememberedSetHelper(object_,
6079  address_,
6080  value_,
6081  save_fp_regs_mode_,
6083  } else {
6084  __ ret(0);
6085  }
6086 
6087  __ bind(&skip_to_incremental_noncompacting);
6088  GenerateIncremental(masm, INCREMENTAL);
6089 
6090  __ bind(&skip_to_incremental_compacting);
6091  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
6092 
6093  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
6094  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
6095  masm->set_byte_at(0, kTwoByteNopInstruction);
6096  masm->set_byte_at(2, kFiveByteNopInstruction);
6097 }
6098 
6099 
6100 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
6101  regs_.Save(masm);
6102 
6103  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6104  Label dont_need_remembered_set;
6105 
6106  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6107  __ JumpIfNotInNewSpace(regs_.scratch0(),
6108  regs_.scratch0(),
6109  &dont_need_remembered_set);
6110 
6111  __ CheckPageFlag(regs_.object(),
6112  regs_.scratch0(),
6114  not_zero,
6115  &dont_need_remembered_set);
6116 
6117  // First notify the incremental marker if necessary, then update the
6118  // remembered set.
6119  CheckNeedsToInformIncrementalMarker(
6120  masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
6121  InformIncrementalMarker(masm, mode);
6122  regs_.Restore(masm);
6123  __ RememberedSetHelper(object_,
6124  address_,
6125  value_,
6126  save_fp_regs_mode_,
6128 
6129  __ bind(&dont_need_remembered_set);
6130  }
6131 
6132  CheckNeedsToInformIncrementalMarker(
6133  masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
6134  InformIncrementalMarker(masm, mode);
6135  regs_.Restore(masm);
6136  __ ret(0);
6137 }
6138 
6139 
6140 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
6141  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
6142 #ifdef _WIN64
6143  Register arg3 = r8;
6144  Register arg2 = rdx;
6145  Register arg1 = rcx;
6146 #else
6147  Register arg3 = rdx;
6148  Register arg2 = rsi;
6149  Register arg1 = rdi;
6150 #endif
6151  Register address =
6152  arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
6153  ASSERT(!address.is(regs_.object()));
6154  ASSERT(!address.is(arg1));
6155  __ Move(address, regs_.address());
6156  __ Move(arg1, regs_.object());
6157  if (mode == INCREMENTAL_COMPACTION) {
6158  // TODO(gc) Can we just set address arg2 in the beginning?
6159  __ Move(arg2, address);
6160  } else {
6161  ASSERT(mode == INCREMENTAL);
6162  __ movq(arg2, Operand(address, 0));
6163  }
6164  __ LoadAddress(arg3, ExternalReference::isolate_address());
6165  int argument_count = 3;
6166 
6167  AllowExternalCallThatCantCauseGC scope(masm);
6168  __ PrepareCallCFunction(argument_count);
6169  if (mode == INCREMENTAL_COMPACTION) {
6170  __ CallCFunction(
6171  ExternalReference::incremental_evacuation_record_write_function(
6172  masm->isolate()),
6173  argument_count);
6174  } else {
6175  ASSERT(mode == INCREMENTAL);
6176  __ CallCFunction(
6177  ExternalReference::incremental_marking_record_write_function(
6178  masm->isolate()),
6179  argument_count);
6180  }
6181  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
6182 }
6183 
6184 
6185 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
6186  MacroAssembler* masm,
6187  OnNoNeedToInformIncrementalMarker on_no_need,
6188  Mode mode) {
6189  Label on_black;
6190  Label need_incremental;
6191  Label need_incremental_pop_object;
6192 
6193  // Let's look at the color of the object: If it is not black we don't have
6194  // to inform the incremental marker.
6195  __ JumpIfBlack(regs_.object(),
6196  regs_.scratch0(),
6197  regs_.scratch1(),
6198  &on_black,
6199  Label::kNear);
6200 
6201  regs_.Restore(masm);
6202  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6203  __ RememberedSetHelper(object_,
6204  address_,
6205  value_,
6206  save_fp_regs_mode_,
6208  } else {
6209  __ ret(0);
6210  }
6211 
6212  __ bind(&on_black);
6213 
6214  // Get the value from the slot.
6215  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6216 
6217  if (mode == INCREMENTAL_COMPACTION) {
6218  Label ensure_not_white;
6219 
6220  __ CheckPageFlag(regs_.scratch0(), // Contains value.
6221  regs_.scratch1(), // Scratch.
6223  zero,
6224  &ensure_not_white,
6225  Label::kNear);
6226 
6227  __ CheckPageFlag(regs_.object(),
6228  regs_.scratch1(), // Scratch.
6230  zero,
6231  &need_incremental);
6232 
6233  __ bind(&ensure_not_white);
6234  }
6235 
6236  // We need an extra register for this, so we push the object register
6237  // temporarily.
6238  __ push(regs_.object());
6239  __ EnsureNotWhite(regs_.scratch0(), // The value.
6240  regs_.scratch1(), // Scratch.
6241  regs_.object(), // Scratch.
6242  &need_incremental_pop_object,
6243  Label::kNear);
6244  __ pop(regs_.object());
6245 
6246  regs_.Restore(masm);
6247  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6248  __ RememberedSetHelper(object_,
6249  address_,
6250  value_,
6251  save_fp_regs_mode_,
6253  } else {
6254  __ ret(0);
6255  }
6256 
6257  __ bind(&need_incremental_pop_object);
6258  __ pop(regs_.object());
6259 
6260  __ bind(&need_incremental);
6261 
6262  // Fall through when we need to inform the incremental marker.
6263 }
6264 
6265 
6266 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
6267  // ----------- S t a t e -------------
6268  // -- rax : element value to store
6269  // -- rbx : array literal
6270  // -- rdi : map of array literal
6271  // -- rcx : element index as smi
6272  // -- rdx : array literal index in function
6273  // -- rsp[0] : return address
6274  // -----------------------------------
6275 
6276  Label element_done;
6277  Label double_elements;
6278  Label smi_element;
6279  Label slow_elements;
6280  Label fast_elements;
6281 
6282  __ CheckFastElements(rdi, &double_elements);
6283 
6284  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
6285  __ JumpIfSmi(rax, &smi_element);
6286  __ CheckFastSmiElements(rdi, &fast_elements);
6287 
6288  // Store into the array literal requires a elements transition. Call into
6289  // the runtime.
6290 
6291  __ bind(&slow_elements);
6292  __ pop(rdi); // Pop return address and remember to put back later for tail
6293  // call.
6294  __ push(rbx);
6295  __ push(rcx);
6296  __ push(rax);
6299  __ push(rdx);
6300  __ push(rdi); // Return return address so that tail call returns to right
6301  // place.
6302  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
6303 
6304  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
6305  __ bind(&fast_elements);
6306  __ SmiToInteger32(kScratchRegister, rcx);
6310  __ movq(Operand(rcx, 0), rax);
6311  // Update the write barrier for the array store.
6312  __ RecordWrite(rbx, rcx, rax,
6315  OMIT_SMI_CHECK);
6316  __ ret(0);
6317 
6318  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
6319  // FAST_*_ELEMENTS, and value is Smi.
6320  __ bind(&smi_element);
6321  __ SmiToInteger32(kScratchRegister, rcx);
6325  __ ret(0);
6326 
6327  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
6328  __ bind(&double_elements);
6329 
6331  __ SmiToInteger32(r11, rcx);
6332  __ StoreNumberToDoubleElements(rax,
6333  r9,
6334  r11,
6335  xmm0,
6336  &slow_elements);
6337  __ ret(0);
6338 }
6339 
6340 #undef __
6341 
6342 } } // namespace v8::internal
6343 
6344 #endif // V8_TARGET_ARCH_X64
static const int kResourceDataOffset
Definition: objects.h:7517
const Register rdx
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:4994
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static const int kMaxLength
Definition: objects.h:2301
const intptr_t kSmiTagMask
Definition: v8.h:3855
static const int kCodeOffset
Definition: objects.h:5606
static const int kEvacuationCandidateMask
Definition: spaces.h:407
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:5981
const Register r14
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:408
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const int kStaticOffsetsVectorSize
Definition: jsregexp.h:1649
const Register r11
static const int kArgumentsObjectSize
Definition: heap.h:863
const XMMRegister xmm4
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:450
const int kFailureTypeTagSize
Definition: objects.h:1037
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2220
static Failure * InternalError()
Definition: objects-inl.h:1011
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void PerformGC(Object *result)
Definition: runtime.cc:13582
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:973
const Register rbp
void Generate(MacroAssembler *masm)
static const byte kTwoByteNopInstruction
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static const int kDataOffset
Definition: objects.h:6432
static const int kGlobalReceiverOffset
Definition: objects.h:6085
const int kSmiValueSize
Definition: v8.h:3900
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1021
const Register rsi
static const int kEmptyHashField
Definition: objects.h:7159
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
Definition: objects.h:1321
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5052
static Failure * Exception()
Definition: objects-inl.h:1016
const int kMaxInt
Definition: globals.h:224
void Generate(MacroAssembler *masm)
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:270
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInstanceSizeOffset
Definition: objects.h:4981
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5057
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2285
const uint32_t kStringRepresentationMask
Definition: objects.h:455
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
MemOperand GlobalObjectOperand()
static const int kSize
Definition: objects.h:8134
static const int kGlobalContextOffset
Definition: objects.h:6084
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
const uint32_t kAsciiDataHintTag
Definition: objects.h:479
const uint32_t kShortExternalStringMask
Definition: objects.h:483
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:152
const int kIntSize
Definition: globals.h:231
static const int kZeroHash
Definition: objects.h:6816
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7099
static const int kSize
Definition: objects.h:8112
static const int kLastCaptureCountOffset
Definition: jsregexp.h:150
static const int kFirstOffset
Definition: objects.h:7420
static const int kMinLength
Definition: objects.h:7433
const uint32_t kNotStringTag
Definition: objects.h:438
static const int kParentOffset
Definition: objects.h:7473
static const int kLiteralsOffset
Definition: objects.h:5987
#define UNREACHABLE()
Definition: checks.h:50
static const int kArgumentsObjectSizeStrict
Definition: heap.h:866
static void GenerateCopyCharactersREP(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
const uint32_t kIsSymbolMask
Definition: objects.h:443
const intptr_t kFailureTagMask
Definition: v8globals.h:73
static const int kValueOffset
Definition: objects.h:1307
const int kFailureTagSize
Definition: v8globals.h:72
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:232
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6478
static const int kInputOffset
Definition: objects.h:8133
static bool IsBitOp(Value op)
Definition: token.h:256
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
void Generate(MacroAssembler *masm)
const Register r9
const int kPointerSize
Definition: globals.h:234
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5011
Operand FieldOperand(Register object, int offset)
const int kHeapObjectTag
Definition: v8.h:3848
const Register rbx
const Register rsp
const uint32_t kAsciiDataHintMask
Definition: objects.h:478
#define __
const Register r12
Operand StackSpaceOperand(int index)
static const byte kFiveByteNopInstruction
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2113
const Register rax
static const int kMinLength
Definition: objects.h:7485
const uint32_t kShortExternalStringTag
Definition: objects.h:484
const Register r13
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
const Register rdi
static const int kHeaderSize
Definition: objects.h:7282
static const int kNextFunctionLinkOffset
Definition: objects.h:5989
const int kBitsPerByte
Definition: globals.h:251
static int SizeFor(int length)
Definition: objects.h:2369
const Register r0
static const int kElementsOffset
Definition: objects.h:2114
bool IsPowerOf2(T x)
Definition: utils.h:50
const int kRootRegisterBias
const uint32_t kStringTag
Definition: objects.h:437
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7474
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
Definition: jsregexp.h:147
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2233
void Generate(MacroAssembler *masm)
static const int kMapOffset
Definition: objects.h:1219
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:410
const uint32_t kIsNotStringMask
Definition: objects.h:436
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
static const int kSize
Definition: objects.h:1315
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:7421
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kArgumentsLengthIndex
Definition: heap.h:869
const Register kScratchRegister
static const int kFirstCaptureOffset
Definition: jsregexp.h:156
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7125
uint16_t uc16
Definition: globals.h:273
static const int kLastInputOffset
Definition: jsregexp.h:154
v8::Handle< v8::Value > Load(const v8::Arguments &args)
Definition: shell.cc:159
const int kSmiTagSize
Definition: v8.h:3854
static const int kHeaderSize
Definition: objects.h:4513
const Register r8
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6474
const Register rcx
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void GenerateAheadOfTime()
static const int kArgumentsCalleeIndex
Definition: heap.h:871
const int kSmiTag
Definition: v8.h:3853
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
void Generate(MacroAssembler *masm)
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:71
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
static const int kDataTagOffset
Definition: objects.h:6472
static const int kPrototypeOffset
Definition: objects.h:4953
static const int kSize
Definition: objects.h:5990
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
static const int kMaxLength
Definition: objects.h:7166
static const int kValueOffset
Definition: objects.h:6188
bool Contains(Type type) const
Definition: code-stubs.h:1050
const uint32_t kSymbolTag
Definition: objects.h:445
const uint32_t kAsciiStringTag
Definition: objects.h:451
static const int kConstructStubOffset
Definition: objects.h:5608
static const int kExponentBits
Definition: objects.h:1320
static const int kHashShift
Definition: objects.h:7121
const XMMRegister xmm2
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
#define FUNCTION_ADDR(f)
Definition: globals.h:307
static const int kMaxValue
Definition: objects.h:1006
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:4995
static const int kMantissaBits
Definition: objects.h:1319
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:629
void check(i::Vector< const char > string)
const Register r15
static const int kDataUC16CodeOffset
Definition: objects.h:6476
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
FlagType type() const
Definition: flags.cc:1358
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
static const int kIndexOffset
Definition: objects.h:8132
void Generate(MacroAssembler *masm)
const XMMRegister xmm0
void Generate(MacroAssembler *masm)