v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "isolate.h"
35 #include "jsregexp.h"
36 #include "regexp-macro-assembler.h"
37 #include "stub-cache.h"
38 #include "codegen.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 #define __ ACCESS_MASM(masm)
44 
45 void ToNumberStub::Generate(MacroAssembler* masm) {
46  // The ToNumber stub takes one argument in eax.
47  Label check_heap_number, call_builtin;
48  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
49  __ ret(0);
50 
51  __ bind(&check_heap_number);
53  Factory* factory = masm->isolate()->factory();
54  __ cmp(ebx, Immediate(factory->heap_number_map()));
55  __ j(not_equal, &call_builtin, Label::kNear);
56  __ ret(0);
57 
58  __ bind(&call_builtin);
59  __ pop(ecx); // Pop return address.
60  __ push(eax);
61  __ push(ecx); // Push return address.
62  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
63 }
64 
65 
66 void FastNewClosureStub::Generate(MacroAssembler* masm) {
67  // Create a new closure from the given function info in new
68  // space. Set the context to the current context in esi.
69  Counters* counters = masm->isolate()->counters();
70 
71  Label gc;
72  __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
73 
74  __ IncrementCounter(counters->fast_new_closure_total(), 1);
75 
76  // Get the function info from the stack.
77  __ mov(edx, Operand(esp, 1 * kPointerSize));
78 
79  int map_index = (language_mode_ == CLASSIC_MODE)
82 
83  // Compute the function map in the current native context and set that
84  // as the map of the allocated object.
87  __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
89 
90  // Initialize the rest of the function. We don't have to update the
91  // write barrier because the allocated object is in new space.
92  Factory* factory = masm->isolate()->factory();
93  __ mov(ebx, Immediate(factory->empty_fixed_array()));
97  Immediate(factory->the_hole_value()));
101 
102  // Initialize the code pointer in the function to be the one
103  // found in the shared function info object.
104  // But first check if there is an optimized version for our context.
105  Label check_optimized;
106  Label install_unoptimized;
107  if (FLAG_cache_optimized_code) {
109  __ test(ebx, ebx);
110  __ j(not_zero, &check_optimized, Label::kNear);
111  }
112  __ bind(&install_unoptimized);
114  Immediate(factory->undefined_value()));
118 
119  // Return and remove the on-stack parameter.
120  __ ret(1 * kPointerSize);
121 
122  __ bind(&check_optimized);
123 
124  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
125 
126  // ecx holds native context, ebx points to fixed array of 3-element entries
127  // (native context, optimized code, literals).
128  // Map must never be empty, so check the first elements.
129  Label install_optimized;
130  // Speculatively move code object into edx.
133  __ j(equal, &install_optimized);
134 
135  // Iterate through the rest of map backwards. edx holds an index as a Smi.
136  Label loop;
137  Label restore;
139  __ bind(&loop);
140  // Do not double check first entry.
142  __ j(equal, &restore);
143  __ sub(edx, Immediate(Smi::FromInt(
144  SharedFunctionInfo::kEntryLength))); // Skip an entry.
146  __ j(not_equal, &loop, Label::kNear);
147  // Hit: fetch the optimized code.
149 
150  __ bind(&install_optimized);
151  __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
152 
153  // TODO(fschneider): Idea: store proper code pointers in the optimized code
154  // map and either unmangle them on marking or do nothing as the whole map is
155  // discarded on major GC anyway.
158 
159  // Now link a function into a list of optimized functions.
161 
163  // No need for write barrier as JSFunction (eax) is in the new space.
164 
166  // Store JSFunction (eax) into edx before issuing write barrier as
167  // it clobbers all the registers passed.
168  __ mov(edx, eax);
169  __ RecordWriteContextSlot(
170  ecx,
172  edx,
173  ebx,
175 
176  // Return and remove the on-stack parameter.
177  __ ret(1 * kPointerSize);
178 
179  __ bind(&restore);
180  // Restore SharedFunctionInfo into edx.
181  __ mov(edx, Operand(esp, 1 * kPointerSize));
182  __ jmp(&install_unoptimized);
183 
184  // Create a new closure through the slower runtime call.
185  __ bind(&gc);
186  __ pop(ecx); // Temporarily remove return address.
187  __ pop(edx);
188  __ push(esi);
189  __ push(edx);
190  __ push(Immediate(factory->false_value()));
191  __ push(ecx); // Restore return address.
192  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
193 }
194 
195 
196 void FastNewContextStub::Generate(MacroAssembler* masm) {
197  // Try to allocate the context in new space.
198  Label gc;
199  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
200  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
201  eax, ebx, ecx, &gc, TAG_OBJECT);
202 
203  // Get the function from the stack.
204  __ mov(ecx, Operand(esp, 1 * kPointerSize));
205 
206  // Set up the object header.
207  Factory* factory = masm->isolate()->factory();
209  factory->function_context_map());
211  Immediate(Smi::FromInt(length)));
212 
213  // Set up the fixed slots.
214  __ Set(ebx, Immediate(0)); // Set to NULL.
218 
219  // Copy the global object from the previous context.
222 
223  // Initialize the rest of the slots to undefined.
224  __ mov(ebx, factory->undefined_value());
225  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
226  __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
227  }
228 
229  // Return and remove the on-stack parameter.
230  __ mov(esi, eax);
231  __ ret(1 * kPointerSize);
232 
233  // Need to collect. Call into runtime system.
234  __ bind(&gc);
235  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
236 }
237 
238 
239 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
240  // Stack layout on entry:
241  //
242  // [esp + (1 * kPointerSize)]: function
243  // [esp + (2 * kPointerSize)]: serialized scope info
244 
245  // Try to allocate the context in new space.
246  Label gc;
247  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
248  __ AllocateInNewSpace(FixedArray::SizeFor(length),
249  eax, ebx, ecx, &gc, TAG_OBJECT);
250 
251  // Get the function or sentinel from the stack.
252  __ mov(ecx, Operand(esp, 1 * kPointerSize));
253 
254  // Get the serialized scope info from the stack.
255  __ mov(ebx, Operand(esp, 2 * kPointerSize));
256 
257  // Set up the object header.
258  Factory* factory = masm->isolate()->factory();
260  factory->block_context_map());
262  Immediate(Smi::FromInt(length)));
263 
264  // If this block context is nested in the native context we get a smi
265  // sentinel instead of a function. The block context should get the
266  // canonical empty function of the native context as its closure which
267  // we still have to look up.
268  Label after_sentinel;
269  __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
270  if (FLAG_debug_code) {
271  const char* message = "Expected 0 as a Smi sentinel";
272  __ cmp(ecx, 0);
273  __ Assert(equal, message);
274  }
275  __ mov(ecx, GlobalObjectOperand());
278  __ bind(&after_sentinel);
279 
280  // Set up the fixed slots.
284 
285  // Copy the global object from the previous context.
288 
289  // Initialize the rest of the slots to the hole value.
290  if (slots_ == 1) {
291  __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
292  factory->the_hole_value());
293  } else {
294  __ mov(ebx, factory->the_hole_value());
295  for (int i = 0; i < slots_; i++) {
296  __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
297  }
298  }
299 
300  // Return and remove the on-stack parameters.
301  __ mov(esi, eax);
302  __ ret(2 * kPointerSize);
303 
304  // Need to collect. Call into runtime system.
305  __ bind(&gc);
306  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
307 }
308 
309 
310 static void GenerateFastCloneShallowArrayCommon(
311  MacroAssembler* masm,
312  int length,
314  Label* fail) {
315  // Registers on entry:
316  //
317  // ecx: boilerplate literal array.
319 
320  // All sizes here are multiples of kPointerSize.
321  int elements_size = 0;
322  if (length > 0) {
324  ? FixedDoubleArray::SizeFor(length)
325  : FixedArray::SizeFor(length);
326  }
327  int size = JSArray::kSize + elements_size;
328 
329  // Allocate both the JS array and the elements array in one big
330  // allocation. This avoids multiple limit checks.
331  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
332 
333  // Copy the JS array part.
334  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
335  if ((i != JSArray::kElementsOffset) || (length == 0)) {
336  __ mov(ebx, FieldOperand(ecx, i));
337  __ mov(FieldOperand(eax, i), ebx);
338  }
339  }
340 
341  if (length > 0) {
342  // Get hold of the elements array of the boilerplate and setup the
343  // elements pointer in the resulting object.
345  __ lea(edx, Operand(eax, JSArray::kSize));
347 
348  // Copy the elements array.
350  for (int i = 0; i < elements_size; i += kPointerSize) {
351  __ mov(ebx, FieldOperand(ecx, i));
352  __ mov(FieldOperand(edx, i), ebx);
353  }
354  } else {
356  int i;
357  for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
358  __ mov(ebx, FieldOperand(ecx, i));
359  __ mov(FieldOperand(edx, i), ebx);
360  }
361  while (i < elements_size) {
362  __ fld_d(FieldOperand(ecx, i));
363  __ fstp_d(FieldOperand(edx, i));
364  i += kDoubleSize;
365  }
366  ASSERT(i == elements_size);
367  }
368  }
369 }
370 
371 
372 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
373  // Stack layout on entry:
374  //
375  // [esp + kPointerSize]: constant elements.
376  // [esp + (2 * kPointerSize)]: literal index.
377  // [esp + (3 * kPointerSize)]: literals array.
378 
379  // Load boilerplate object into ecx and check if we need to create a
380  // boilerplate.
381  __ mov(ecx, Operand(esp, 3 * kPointerSize));
382  __ mov(eax, Operand(esp, 2 * kPointerSize));
385  STATIC_ASSERT(kSmiTag == 0);
388  Factory* factory = masm->isolate()->factory();
389  __ cmp(ecx, factory->undefined_value());
390  Label slow_case;
391  __ j(equal, &slow_case);
392 
393  FastCloneShallowArrayStub::Mode mode = mode_;
394  // ecx is boilerplate object.
395  if (mode == CLONE_ANY_ELEMENTS) {
396  Label double_elements, check_fast_elements;
398  __ CheckMap(ebx, factory->fixed_cow_array_map(),
399  &check_fast_elements, DONT_DO_SMI_CHECK);
400  GenerateFastCloneShallowArrayCommon(masm, 0,
401  COPY_ON_WRITE_ELEMENTS, &slow_case);
402  __ ret(3 * kPointerSize);
403 
404  __ bind(&check_fast_elements);
405  __ CheckMap(ebx, factory->fixed_array_map(),
406  &double_elements, DONT_DO_SMI_CHECK);
407  GenerateFastCloneShallowArrayCommon(masm, length_,
408  CLONE_ELEMENTS, &slow_case);
409  __ ret(3 * kPointerSize);
410 
411  __ bind(&double_elements);
412  mode = CLONE_DOUBLE_ELEMENTS;
413  // Fall through to generate the code to handle double elements.
414  }
415 
416  if (FLAG_debug_code) {
417  const char* message;
418  Handle<Map> expected_map;
419  if (mode == CLONE_ELEMENTS) {
420  message = "Expected (writable) fixed array";
421  expected_map = factory->fixed_array_map();
422  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
423  message = "Expected (writable) fixed double array";
424  expected_map = factory->fixed_double_array_map();
425  } else {
427  message = "Expected copy-on-write fixed array";
428  expected_map = factory->fixed_cow_array_map();
429  }
430  __ push(ecx);
432  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
433  __ Assert(equal, message);
434  __ pop(ecx);
435  }
436 
437  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
438  // Return and remove the on-stack parameters.
439  __ ret(3 * kPointerSize);
440 
441  __ bind(&slow_case);
442  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
443 }
444 
445 
446 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
447  // Stack layout on entry:
448  //
449  // [esp + kPointerSize]: object literal flags.
450  // [esp + (2 * kPointerSize)]: constant properties.
451  // [esp + (3 * kPointerSize)]: literal index.
452  // [esp + (4 * kPointerSize)]: literals array.
453 
454  // Load boilerplate object into ecx and check if we need to create a
455  // boilerplate.
456  Label slow_case;
457  __ mov(ecx, Operand(esp, 4 * kPointerSize));
458  __ mov(eax, Operand(esp, 3 * kPointerSize));
461  STATIC_ASSERT(kSmiTag == 0);
464  Factory* factory = masm->isolate()->factory();
465  __ cmp(ecx, factory->undefined_value());
466  __ j(equal, &slow_case);
467 
468  // Check that the boilerplate contains only fast properties and we can
469  // statically determine the instance size.
470  int size = JSObject::kHeaderSize + length_ * kPointerSize;
473  __ cmp(eax, Immediate(size >> kPointerSizeLog2));
474  __ j(not_equal, &slow_case);
475 
476  // Allocate the JS object and copy header together with all in-object
477  // properties from the boilerplate.
478  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
479  for (int i = 0; i < size; i += kPointerSize) {
480  __ mov(ebx, FieldOperand(ecx, i));
481  __ mov(FieldOperand(eax, i), ebx);
482  }
483 
484  // Return and remove the on-stack parameters.
485  __ ret(4 * kPointerSize);
486 
487  __ bind(&slow_case);
488  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
489 }
490 
491 
492 // The stub expects its argument on the stack and returns its result in tos_:
493 // zero for false, and a non-zero value for true.
494 void ToBooleanStub::Generate(MacroAssembler* masm) {
495  // This stub overrides SometimesSetsUpAFrame() to return false. That means
496  // we cannot call anything that could cause a GC from this stub.
497  Label patch;
498  Factory* factory = masm->isolate()->factory();
499  const Register argument = eax;
500  const Register map = edx;
501 
502  if (!types_.IsEmpty()) {
503  __ mov(argument, Operand(esp, 1 * kPointerSize));
504  }
505 
506  // undefined -> false
507  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
508 
509  // Boolean -> its value
510  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
511  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
512 
513  // 'null' -> false.
514  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
515 
516  if (types_.Contains(SMI)) {
517  // Smis: 0 -> false, all other -> true
518  Label not_smi;
519  __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
520  // argument contains the correct return value already.
521  if (!tos_.is(argument)) {
522  __ mov(tos_, argument);
523  }
524  __ ret(1 * kPointerSize);
525  __ bind(&not_smi);
526  } else if (types_.NeedsMap()) {
527  // If we need a map later and have a Smi -> patch.
528  __ JumpIfSmi(argument, &patch, Label::kNear);
529  }
530 
531  if (types_.NeedsMap()) {
532  __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
533 
534  if (types_.CanBeUndetectable()) {
535  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
536  1 << Map::kIsUndetectable);
537  // Undetectable -> false.
538  Label not_undetectable;
539  __ j(zero, &not_undetectable, Label::kNear);
540  __ Set(tos_, Immediate(0));
541  __ ret(1 * kPointerSize);
542  __ bind(&not_undetectable);
543  }
544  }
545 
546  if (types_.Contains(SPEC_OBJECT)) {
547  // spec object -> true.
548  Label not_js_object;
549  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
550  __ j(below, &not_js_object, Label::kNear);
551  // argument contains the correct return value already.
552  if (!tos_.is(argument)) {
553  __ Set(tos_, Immediate(1));
554  }
555  __ ret(1 * kPointerSize);
556  __ bind(&not_js_object);
557  }
558 
559  if (types_.Contains(STRING)) {
560  // String value -> false iff empty.
561  Label not_string;
562  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
563  __ j(above_equal, &not_string, Label::kNear);
564  __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
565  __ ret(1 * kPointerSize); // the string length is OK as the return value
566  __ bind(&not_string);
567  }
568 
569  if (types_.Contains(HEAP_NUMBER)) {
570  // heap number -> false iff +0, -0, or NaN.
571  Label not_heap_number, false_result;
572  __ cmp(map, factory->heap_number_map());
573  __ j(not_equal, &not_heap_number, Label::kNear);
574  __ fldz();
575  __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
576  __ FCmp();
577  __ j(zero, &false_result, Label::kNear);
578  // argument contains the correct return value already.
579  if (!tos_.is(argument)) {
580  __ Set(tos_, Immediate(1));
581  }
582  __ ret(1 * kPointerSize);
583  __ bind(&false_result);
584  __ Set(tos_, Immediate(0));
585  __ ret(1 * kPointerSize);
586  __ bind(&not_heap_number);
587  }
588 
589  __ bind(&patch);
590  GenerateTypeTransition(masm);
591 }
592 
593 
594 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
595  // We don't allow a GC during a store buffer overflow so there is no need to
596  // store the registers in any particular way, but we do have to store and
597  // restore them.
598  __ pushad();
599  if (save_doubles_ == kSaveFPRegs) {
600  CpuFeatures::Scope scope(SSE2);
601  __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
602  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
603  XMMRegister reg = XMMRegister::from_code(i);
604  __ movdbl(Operand(esp, i * kDoubleSize), reg);
605  }
606  }
607  const int argument_count = 1;
608 
609  AllowExternalCallThatCantCauseGC scope(masm);
610  __ PrepareCallCFunction(argument_count, ecx);
611  __ mov(Operand(esp, 0 * kPointerSize),
612  Immediate(ExternalReference::isolate_address()));
613  __ CallCFunction(
614  ExternalReference::store_buffer_overflow_function(masm->isolate()),
615  argument_count);
616  if (save_doubles_ == kSaveFPRegs) {
617  CpuFeatures::Scope scope(SSE2);
618  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
619  XMMRegister reg = XMMRegister::from_code(i);
620  __ movdbl(reg, Operand(esp, i * kDoubleSize));
621  }
622  __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
623  }
624  __ popad();
625  __ ret(0);
626 }
627 
628 
629 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
630  Type type,
631  Heap::RootListIndex value,
632  bool result) {
633  const Register argument = eax;
634  if (types_.Contains(type)) {
635  // If we see an expected oddball, return its ToBoolean value tos_.
636  Label different_value;
637  __ CompareRoot(argument, value);
638  __ j(not_equal, &different_value, Label::kNear);
639  if (!result) {
640  // If we have to return zero, there is no way around clearing tos_.
641  __ Set(tos_, Immediate(0));
642  } else if (!tos_.is(argument)) {
643  // If we have to return non-zero, we can re-use the argument if it is the
644  // same register as the result, because we never see Smi-zero here.
645  __ Set(tos_, Immediate(1));
646  }
647  __ ret(1 * kPointerSize);
648  __ bind(&different_value);
649  }
650 }
651 
652 
653 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
654  __ pop(ecx); // Get return address, operand is now on top of stack.
655  __ push(Immediate(Smi::FromInt(tos_.code())));
656  __ push(Immediate(Smi::FromInt(types_.ToByte())));
657  __ push(ecx); // Push return address.
658  // Patch the caller to an appropriate specialized stub and return the
659  // operation result to the caller of the stub.
660  __ TailCallExternalReference(
661  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
662  3,
663  1);
664 }
665 
666 
667 class FloatingPointHelper : public AllStatic {
668  public:
669  enum ArgLocation {
670  ARGS_ON_STACK,
671  ARGS_IN_REGISTERS
672  };
673 
674  // Code pattern for loading a floating point value. Input value must
675  // be either a smi or a heap number object (fp value). Requirements:
676  // operand in register number. Returns operand as floating point number
677  // on FPU stack.
678  static void LoadFloatOperand(MacroAssembler* masm, Register number);
679 
680  // Code pattern for loading floating point values. Input values must
681  // be either smi or heap number objects (fp values). Requirements:
682  // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
683  // Returns operands as floating point numbers on FPU stack.
684  static void LoadFloatOperands(MacroAssembler* masm,
685  Register scratch,
686  ArgLocation arg_location = ARGS_ON_STACK);
687 
688  // Similar to LoadFloatOperand but assumes that both operands are smis.
689  // Expects operands in edx, eax.
690  static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
691 
692  // Test if operands are smi or number objects (fp). Requirements:
693  // operand_1 in eax, operand_2 in edx; falls through on float
694  // operands, jumps to the non_float label otherwise.
695  static void CheckFloatOperands(MacroAssembler* masm,
696  Label* non_float,
697  Register scratch);
698 
699  // Checks that the two floating point numbers on top of the FPU stack
700  // have int32 values.
701  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
702  Label* non_int32);
703 
704  // Takes the operands in edx and eax and loads them as integers in eax
705  // and ecx.
706  static void LoadUnknownsAsIntegers(MacroAssembler* masm,
707  bool use_sse3,
708  Label* operand_conversion_failure);
709 
710  // Must only be called after LoadUnknownsAsIntegers. Assumes that the
711  // operands are pushed on the stack, and that their conversions to int32
712  // are in eax and ecx. Checks that the original numbers were in the int32
713  // range.
714  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
715  bool use_sse3,
716  Label* not_int32);
717 
718  // Assumes that operands are smis or heap numbers and loads them
719  // into xmm0 and xmm1. Operands are in edx and eax.
720  // Leaves operands unchanged.
721  static void LoadSSE2Operands(MacroAssembler* masm);
722 
723  // Test if operands are numbers (smi or HeapNumber objects), and load
724  // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
725  // either operand is not a number. Operands are in edx and eax.
726  // Leaves operands unchanged.
727  static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
728 
729  // Similar to LoadSSE2Operands but assumes that both operands are smis.
730  // Expects operands in edx, eax.
731  static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
732 
733  // Checks that the two floating point numbers loaded into xmm0 and xmm1
734  // have int32 values.
735  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
736  Label* non_int32,
737  Register scratch);
738 };
739 
740 
741 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
742 // is faster than using the built-in instructions on floating point registers.
743 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
744 // trashed registers.
745 static void IntegerConvert(MacroAssembler* masm,
746  Register source,
747  bool use_sse3,
748  Label* conversion_failure) {
749  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
750  Label done, right_exponent, normal_exponent;
751  Register scratch = ebx;
752  Register scratch2 = edi;
753  // Get exponent word.
754  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
755  // Get exponent alone in scratch2.
756  __ mov(scratch2, scratch);
757  __ and_(scratch2, HeapNumber::kExponentMask);
758  if (use_sse3) {
759  CpuFeatures::Scope scope(SSE3);
760  // Check whether the exponent is too big for a 64 bit signed integer.
761  static const uint32_t kTooBigExponent =
763  __ cmp(scratch2, Immediate(kTooBigExponent));
764  __ j(greater_equal, conversion_failure);
765  // Load x87 register with heap number.
766  __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
767  // Reserve space for 64 bit answer.
768  __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
769  // Do conversion, which cannot fail because we checked the exponent.
770  __ fisttp_d(Operand(esp, 0));
771  __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
772  __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
773  } else {
774  // Load ecx with zero. We use this either for the final shift or
775  // for the answer.
776  __ xor_(ecx, ecx);
777  // Check whether the exponent matches a 32 bit signed int that cannot be
778  // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
779  // exponent is 30 (biased). This is the exponent that we are fastest at and
780  // also the highest exponent we can handle here.
781  const uint32_t non_smi_exponent =
783  __ cmp(scratch2, Immediate(non_smi_exponent));
784  // If we have a match of the int32-but-not-Smi exponent then skip some
785  // logic.
786  __ j(equal, &right_exponent, Label::kNear);
787  // If the exponent is higher than that then go to slow case. This catches
788  // numbers that don't fit in a signed int32, infinities and NaNs.
789  __ j(less, &normal_exponent, Label::kNear);
790 
791  {
792  // Handle a big exponent. The only reason we have this code is that the
793  // >>> operator has a tendency to generate numbers with an exponent of 31.
794  const uint32_t big_non_smi_exponent =
796  __ cmp(scratch2, Immediate(big_non_smi_exponent));
797  __ j(not_equal, conversion_failure);
798  // We have the big exponent, typically from >>>. This means the number is
799  // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
800  __ mov(scratch2, scratch);
801  __ and_(scratch2, HeapNumber::kMantissaMask);
802  // Put back the implicit 1.
803  __ or_(scratch2, 1 << HeapNumber::kExponentShift);
804  // Shift up the mantissa bits to take up the space the exponent used to
805  // take. We just orred in the implicit bit so that took care of one and
806  // we want to use the full unsigned range so we subtract 1 bit from the
807  // shift distance.
808  const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
809  __ shl(scratch2, big_shift_distance);
810  // Get the second half of the double.
812  // Shift down 21 bits to get the most significant 11 bits or the low
813  // mantissa word.
814  __ shr(ecx, 32 - big_shift_distance);
815  __ or_(ecx, scratch2);
816  // We have the answer in ecx, but we may need to negate it.
817  __ test(scratch, scratch);
818  __ j(positive, &done, Label::kNear);
819  __ neg(ecx);
820  __ jmp(&done, Label::kNear);
821  }
822 
823  __ bind(&normal_exponent);
824  // Exponent word in scratch, exponent part of exponent word in scratch2.
825  // Zero in ecx.
826  // We know the exponent is smaller than 30 (biased). If it is less than
827  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
828  // it rounds to zero.
829  const uint32_t zero_exponent =
831  __ sub(scratch2, Immediate(zero_exponent));
832  // ecx already has a Smi zero.
833  __ j(less, &done, Label::kNear);
834 
835  // We have a shifted exponent between 0 and 30 in scratch2.
836  __ shr(scratch2, HeapNumber::kExponentShift);
837  __ mov(ecx, Immediate(30));
838  __ sub(ecx, scratch2);
839 
840  __ bind(&right_exponent);
841  // Here ecx is the shift, scratch is the exponent word.
842  // Get the top bits of the mantissa.
843  __ and_(scratch, HeapNumber::kMantissaMask);
844  // Put back the implicit 1.
845  __ or_(scratch, 1 << HeapNumber::kExponentShift);
846  // Shift up the mantissa bits to take up the space the exponent used to
847  // take. We have kExponentShift + 1 significant bits int he low end of the
848  // word. Shift them to the top bits.
849  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
850  __ shl(scratch, shift_distance);
851  // Get the second half of the double. For some exponents we don't
852  // actually need this because the bits get shifted out again, but
853  // it's probably slower to test than just to do it.
854  __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
855  // Shift down 22 bits to get the most significant 10 bits or the low
856  // mantissa word.
857  __ shr(scratch2, 32 - shift_distance);
858  __ or_(scratch2, scratch);
859  // Move down according to the exponent.
860  __ shr_cl(scratch2);
861  // Now the unsigned answer is in scratch2. We need to move it to ecx and
862  // we may need to fix the sign.
863  Label negative;
864  __ xor_(ecx, ecx);
866  __ j(greater, &negative, Label::kNear);
867  __ mov(ecx, scratch2);
868  __ jmp(&done, Label::kNear);
869  __ bind(&negative);
870  __ sub(ecx, scratch2);
871  __ bind(&done);
872  }
873 }
874 
875 
876 void UnaryOpStub::PrintName(StringStream* stream) {
877  const char* op_name = Token::Name(op_);
878  const char* overwrite_name = NULL; // Make g++ happy.
879  switch (mode_) {
880  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
881  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
882  }
883  stream->Add("UnaryOpStub_%s_%s_%s",
884  op_name,
885  overwrite_name,
886  UnaryOpIC::GetName(operand_type_));
887 }
888 
889 
890 // TODO(svenpanne): Use virtual functions instead of switch.
891 void UnaryOpStub::Generate(MacroAssembler* masm) {
892  switch (operand_type_) {
894  GenerateTypeTransition(masm);
895  break;
896  case UnaryOpIC::SMI:
897  GenerateSmiStub(masm);
898  break;
900  GenerateHeapNumberStub(masm);
901  break;
902  case UnaryOpIC::GENERIC:
903  GenerateGenericStub(masm);
904  break;
905  }
906 }
907 
908 
909 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
910  __ pop(ecx); // Save return address.
911 
912  __ push(eax); // the operand
913  __ push(Immediate(Smi::FromInt(op_)));
914  __ push(Immediate(Smi::FromInt(mode_)));
915  __ push(Immediate(Smi::FromInt(operand_type_)));
916 
917  __ push(ecx); // Push return address.
918 
919  // Patch the caller to an appropriate specialized stub and return the
920  // operation result to the caller of the stub.
921  __ TailCallExternalReference(
922  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
923 }
924 
925 
926 // TODO(svenpanne): Use virtual functions instead of switch.
927 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
928  switch (op_) {
929  case Token::SUB:
930  GenerateSmiStubSub(masm);
931  break;
932  case Token::BIT_NOT:
933  GenerateSmiStubBitNot(masm);
934  break;
935  default:
936  UNREACHABLE();
937  }
938 }
939 
940 
941 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
942  Label non_smi, undo, slow;
943  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
944  Label::kNear, Label::kNear, Label::kNear);
945  __ bind(&undo);
946  GenerateSmiCodeUndo(masm);
947  __ bind(&non_smi);
948  __ bind(&slow);
949  GenerateTypeTransition(masm);
950 }
951 
952 
953 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
954  Label non_smi;
955  GenerateSmiCodeBitNot(masm, &non_smi);
956  __ bind(&non_smi);
957  GenerateTypeTransition(masm);
958 }
959 
960 
961 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
962  Label* non_smi,
963  Label* undo,
964  Label* slow,
965  Label::Distance non_smi_near,
966  Label::Distance undo_near,
967  Label::Distance slow_near) {
968  // Check whether the value is a smi.
969  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
970 
971  // We can't handle -0 with smis, so use a type transition for that case.
972  __ test(eax, eax);
973  __ j(zero, slow, slow_near);
974 
975  // Try optimistic subtraction '0 - value', saving operand in eax for undo.
976  __ mov(edx, eax);
977  __ Set(eax, Immediate(0));
978  __ sub(eax, edx);
979  __ j(overflow, undo, undo_near);
980  __ ret(0);
981 }
982 
983 
984 void UnaryOpStub::GenerateSmiCodeBitNot(
985  MacroAssembler* masm,
986  Label* non_smi,
987  Label::Distance non_smi_near) {
988  // Check whether the value is a smi.
989  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
990 
991  // Flip bits and revert inverted smi-tag.
992  __ not_(eax);
993  __ and_(eax, ~kSmiTagMask);
994  __ ret(0);
995 }
996 
997 
998 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
999  __ mov(eax, edx);
1000 }
1001 
1002 
1003 // TODO(svenpanne): Use virtual functions instead of switch.
1004 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1005  switch (op_) {
1006  case Token::SUB:
1007  GenerateHeapNumberStubSub(masm);
1008  break;
1009  case Token::BIT_NOT:
1010  GenerateHeapNumberStubBitNot(masm);
1011  break;
1012  default:
1013  UNREACHABLE();
1014  }
1015 }
1016 
1017 
1018 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
1019  Label non_smi, undo, slow, call_builtin;
1020  GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
1021  __ bind(&non_smi);
1022  GenerateHeapNumberCodeSub(masm, &slow);
1023  __ bind(&undo);
1024  GenerateSmiCodeUndo(masm);
1025  __ bind(&slow);
1026  GenerateTypeTransition(masm);
1027  __ bind(&call_builtin);
1028  GenerateGenericCodeFallback(masm);
1029 }
1030 
1031 
1032 void UnaryOpStub::GenerateHeapNumberStubBitNot(
1033  MacroAssembler* masm) {
1034  Label non_smi, slow;
1035  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
1036  __ bind(&non_smi);
1037  GenerateHeapNumberCodeBitNot(masm, &slow);
1038  __ bind(&slow);
1039  GenerateTypeTransition(masm);
1040 }
1041 
1042 
1043 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1044  Label* slow) {
1046  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
1047  __ j(not_equal, slow);
1048 
1049  if (mode_ == UNARY_OVERWRITE) {
1051  Immediate(HeapNumber::kSignMask)); // Flip sign.
1052  } else {
1053  __ mov(edx, eax);
1054  // edx: operand
1055 
1056  Label slow_allocate_heapnumber, heapnumber_allocated;
1057  __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
1058  __ jmp(&heapnumber_allocated, Label::kNear);
1059 
1060  __ bind(&slow_allocate_heapnumber);
1061  {
1062  FrameScope scope(masm, StackFrame::INTERNAL);
1063  __ push(edx);
1064  __ CallRuntime(Runtime::kNumberAlloc, 0);
1065  __ pop(edx);
1066  }
1067 
1068  __ bind(&heapnumber_allocated);
1069  // eax: allocated 'empty' number
1071  __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
1075  }
1076  __ ret(0);
1077 }
1078 
1079 
1080 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1081  Label* slow) {
1083  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
1084  __ j(not_equal, slow);
1085 
1086  // Convert the heap number in eax to an untagged integer in ecx.
1087  IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
1088 
1089  // Do the bitwise operation and check if the result fits in a smi.
1090  Label try_float;
1091  __ not_(ecx);
1092  __ cmp(ecx, 0xc0000000);
1093  __ j(sign, &try_float, Label::kNear);
1094 
1095  // Tag the result as a smi and we're done.
1096  STATIC_ASSERT(kSmiTagSize == 1);
1097  __ lea(eax, Operand(ecx, times_2, kSmiTag));
1098  __ ret(0);
1099 
1100  // Try to store the result in a heap number.
1101  __ bind(&try_float);
1102  if (mode_ == UNARY_NO_OVERWRITE) {
1103  Label slow_allocate_heapnumber, heapnumber_allocated;
1104  __ mov(ebx, eax);
1105  __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
1106  __ jmp(&heapnumber_allocated);
1107 
1108  __ bind(&slow_allocate_heapnumber);
1109  {
1110  FrameScope scope(masm, StackFrame::INTERNAL);
1111  // Push the original HeapNumber on the stack. The integer value can't
1112  // be stored since it's untagged and not in the smi range (so we can't
1113  // smi-tag it). We'll recalculate the value after the GC instead.
1114  __ push(ebx);
1115  __ CallRuntime(Runtime::kNumberAlloc, 0);
1116  // New HeapNumber is in eax.
1117  __ pop(edx);
1118  }
1119  // IntegerConvert uses ebx and edi as scratch registers.
1120  // This conversion won't go slow-case.
1121  IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
1122  __ not_(ecx);
1123 
1124  __ bind(&heapnumber_allocated);
1125  }
1127  CpuFeatures::Scope use_sse2(SSE2);
1128  __ cvtsi2sd(xmm0, ecx);
1130  } else {
1131  __ push(ecx);
1132  __ fild_s(Operand(esp, 0));
1133  __ pop(ecx);
1135  }
1136  __ ret(0);
1137 }
1138 
1139 
1140 // TODO(svenpanne): Use virtual functions instead of switch.
1141 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1142  switch (op_) {
1143  case Token::SUB:
1144  GenerateGenericStubSub(masm);
1145  break;
1146  case Token::BIT_NOT:
1147  GenerateGenericStubBitNot(masm);
1148  break;
1149  default:
1150  UNREACHABLE();
1151  }
1152 }
1153 
1154 
1155 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
1156  Label non_smi, undo, slow;
1157  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
1158  __ bind(&non_smi);
1159  GenerateHeapNumberCodeSub(masm, &slow);
1160  __ bind(&undo);
1161  GenerateSmiCodeUndo(masm);
1162  __ bind(&slow);
1163  GenerateGenericCodeFallback(masm);
1164 }
1165 
1166 
1167 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
1168  Label non_smi, slow;
1169  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
1170  __ bind(&non_smi);
1171  GenerateHeapNumberCodeBitNot(masm, &slow);
1172  __ bind(&slow);
1173  GenerateGenericCodeFallback(masm);
1174 }
1175 
1176 
1177 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1178  // Handle the slow case by jumping to the corresponding JavaScript builtin.
1179  __ pop(ecx); // pop return address.
1180  __ push(eax);
1181  __ push(ecx); // push return address
1182  switch (op_) {
1183  case Token::SUB:
1184  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1185  break;
1186  case Token::BIT_NOT:
1187  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1188  break;
1189  default:
1190  UNREACHABLE();
1191  }
1192 }
1193 
1194 
1195 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1196  __ pop(ecx); // Save return address.
1197  __ push(edx);
1198  __ push(eax);
1199  // Left and right arguments are now on top.
1200  // Push this stub's key. Although the operation and the type info are
1201  // encoded into the key, the encoding is opaque, so push them too.
1202  __ push(Immediate(Smi::FromInt(MinorKey())));
1203  __ push(Immediate(Smi::FromInt(op_)));
1204  __ push(Immediate(Smi::FromInt(operands_type_)));
1205 
1206  __ push(ecx); // Push return address.
1207 
1208  // Patch the caller to an appropriate specialized stub and return the
1209  // operation result to the caller of the stub.
1210  __ TailCallExternalReference(
1211  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1212  masm->isolate()),
1213  5,
1214  1);
1215 }
1216 
1217 
1218 // Prepare for a type transition runtime call when the args are already on
1219 // the stack, under the return address.
1220 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
1221  __ pop(ecx); // Save return address.
1222  // Left and right arguments are already on top of the stack.
1223  // Push this stub's key. Although the operation and the type info are
1224  // encoded into the key, the encoding is opaque, so push them too.
1225  __ push(Immediate(Smi::FromInt(MinorKey())));
1226  __ push(Immediate(Smi::FromInt(op_)));
1227  __ push(Immediate(Smi::FromInt(operands_type_)));
1228 
1229  __ push(ecx); // Push return address.
1230 
1231  // Patch the caller to an appropriate specialized stub and return the
1232  // operation result to the caller of the stub.
1233  __ TailCallExternalReference(
1234  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1235  masm->isolate()),
1236  5,
1237  1);
1238 }
1239 
1240 
1241 void BinaryOpStub::Generate(MacroAssembler* masm) {
1242  // Explicitly allow generation of nested stubs. It is safe here because
1243  // generation code does not use any raw pointers.
1244  AllowStubCallsScope allow_stub_calls(masm, true);
1245 
1246  switch (operands_type_) {
1248  GenerateTypeTransition(masm);
1249  break;
1250  case BinaryOpIC::SMI:
1251  GenerateSmiStub(masm);
1252  break;
1253  case BinaryOpIC::INT32:
1254  GenerateInt32Stub(masm);
1255  break;
1257  GenerateHeapNumberStub(masm);
1258  break;
1259  case BinaryOpIC::ODDBALL:
1260  GenerateOddballStub(masm);
1261  break;
1263  GenerateBothStringStub(masm);
1264  break;
1265  case BinaryOpIC::STRING:
1266  GenerateStringStub(masm);
1267  break;
1268  case BinaryOpIC::GENERIC:
1269  GenerateGeneric(masm);
1270  break;
1271  default:
1272  UNREACHABLE();
1273  }
1274 }
1275 
1276 
1277 void BinaryOpStub::PrintName(StringStream* stream) {
1278  const char* op_name = Token::Name(op_);
1279  const char* overwrite_name;
1280  switch (mode_) {
1281  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1282  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1283  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1284  default: overwrite_name = "UnknownOverwrite"; break;
1285  }
1286  stream->Add("BinaryOpStub_%s_%s_%s",
1287  op_name,
1288  overwrite_name,
1289  BinaryOpIC::GetName(operands_type_));
1290 }
1291 
1292 
1293 void BinaryOpStub::GenerateSmiCode(
1294  MacroAssembler* masm,
1295  Label* slow,
1296  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1297  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1298  // dividend in eax and edx free for the division. Use eax, ebx for those.
1299  Comment load_comment(masm, "-- Load arguments");
1300  Register left = edx;
1301  Register right = eax;
1302  if (op_ == Token::DIV || op_ == Token::MOD) {
1303  left = eax;
1304  right = ebx;
1305  __ mov(ebx, eax);
1306  __ mov(eax, edx);
1307  }
1308 
1309 
1310  // 2. Prepare the smi check of both operands by oring them together.
1311  Comment smi_check_comment(masm, "-- Smi check arguments");
1312  Label not_smis;
1313  Register combined = ecx;
1314  ASSERT(!left.is(combined) && !right.is(combined));
1315  switch (op_) {
1316  case Token::BIT_OR:
1317  // Perform the operation into eax and smi check the result. Preserve
1318  // eax in case the result is not a smi.
1319  ASSERT(!left.is(ecx) && !right.is(ecx));
1320  __ mov(ecx, right);
1321  __ or_(right, left); // Bitwise or is commutative.
1322  combined = right;
1323  break;
1324 
1325  case Token::BIT_XOR:
1326  case Token::BIT_AND:
1327  case Token::ADD:
1328  case Token::SUB:
1329  case Token::MUL:
1330  case Token::DIV:
1331  case Token::MOD:
1332  __ mov(combined, right);
1333  __ or_(combined, left);
1334  break;
1335 
1336  case Token::SHL:
1337  case Token::SAR:
1338  case Token::SHR:
1339  // Move the right operand into ecx for the shift operation, use eax
1340  // for the smi check register.
1341  ASSERT(!left.is(ecx) && !right.is(ecx));
1342  __ mov(ecx, right);
1343  __ or_(right, left);
1344  combined = right;
1345  break;
1346 
1347  default:
1348  break;
1349  }
1350 
1351  // 3. Perform the smi check of the operands.
1352  STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
1353  __ JumpIfNotSmi(combined, &not_smis);
1354 
1355  // 4. Operands are both smis, perform the operation leaving the result in
1356  // eax and check the result if necessary.
1357  Comment perform_smi(masm, "-- Perform smi operation");
1358  Label use_fp_on_smis;
1359  switch (op_) {
1360  case Token::BIT_OR:
1361  // Nothing to do.
1362  break;
1363 
1364  case Token::BIT_XOR:
1365  ASSERT(right.is(eax));
1366  __ xor_(right, left); // Bitwise xor is commutative.
1367  break;
1368 
1369  case Token::BIT_AND:
1370  ASSERT(right.is(eax));
1371  __ and_(right, left); // Bitwise and is commutative.
1372  break;
1373 
1374  case Token::SHL:
1375  // Remove tags from operands (but keep sign).
1376  __ SmiUntag(left);
1377  __ SmiUntag(ecx);
1378  // Perform the operation.
1379  __ shl_cl(left);
1380  // Check that the *signed* result fits in a smi.
1381  __ cmp(left, 0xc0000000);
1382  __ j(sign, &use_fp_on_smis);
1383  // Tag the result and store it in register eax.
1384  __ SmiTag(left);
1385  __ mov(eax, left);
1386  break;
1387 
1388  case Token::SAR:
1389  // Remove tags from operands (but keep sign).
1390  __ SmiUntag(left);
1391  __ SmiUntag(ecx);
1392  // Perform the operation.
1393  __ sar_cl(left);
1394  // Tag the result and store it in register eax.
1395  __ SmiTag(left);
1396  __ mov(eax, left);
1397  break;
1398 
1399  case Token::SHR:
1400  // Remove tags from operands (but keep sign).
1401  __ SmiUntag(left);
1402  __ SmiUntag(ecx);
1403  // Perform the operation.
1404  __ shr_cl(left);
1405  // Check that the *unsigned* result fits in a smi.
1406  // Neither of the two high-order bits can be set:
1407  // - 0x80000000: high bit would be lost when smi tagging.
1408  // - 0x40000000: this number would convert to negative when
1409  // Smi tagging these two cases can only happen with shifts
1410  // by 0 or 1 when handed a valid smi.
1411  __ test(left, Immediate(0xc0000000));
1412  __ j(not_zero, &use_fp_on_smis);
1413  // Tag the result and store it in register eax.
1414  __ SmiTag(left);
1415  __ mov(eax, left);
1416  break;
1417 
1418  case Token::ADD:
1419  ASSERT(right.is(eax));
1420  __ add(right, left); // Addition is commutative.
1421  __ j(overflow, &use_fp_on_smis);
1422  break;
1423 
1424  case Token::SUB:
1425  __ sub(left, right);
1426  __ j(overflow, &use_fp_on_smis);
1427  __ mov(eax, left);
1428  break;
1429 
1430  case Token::MUL:
1431  // If the smi tag is 0 we can just leave the tag on one operand.
1432  STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1433  // We can't revert the multiplication if the result is not a smi
1434  // so save the right operand.
1435  __ mov(ebx, right);
1436  // Remove tag from one of the operands (but keep sign).
1437  __ SmiUntag(right);
1438  // Do multiplication.
1439  __ imul(right, left); // Multiplication is commutative.
1440  __ j(overflow, &use_fp_on_smis);
1441  // Check for negative zero result. Use combined = left | right.
1442  __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1443  break;
1444 
1445  case Token::DIV:
1446  // We can't revert the division if the result is not a smi so
1447  // save the left operand.
1448  __ mov(edi, left);
1449  // Check for 0 divisor.
1450  __ test(right, right);
1451  __ j(zero, &use_fp_on_smis);
1452  // Sign extend left into edx:eax.
1453  ASSERT(left.is(eax));
1454  __ cdq();
1455  // Divide edx:eax by right.
1456  __ idiv(right);
1457  // Check for the corner case of dividing the most negative smi by
1458  // -1. We cannot use the overflow flag, since it is not set by idiv
1459  // instruction.
1460  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1461  __ cmp(eax, 0x40000000);
1462  __ j(equal, &use_fp_on_smis);
1463  // Check for negative zero result. Use combined = left | right.
1464  __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1465  // Check that the remainder is zero.
1466  __ test(edx, edx);
1467  __ j(not_zero, &use_fp_on_smis);
1468  // Tag the result and store it in register eax.
1469  __ SmiTag(eax);
1470  break;
1471 
1472  case Token::MOD:
1473  // Check for 0 divisor.
1474  __ test(right, right);
1475  __ j(zero, &not_smis);
1476 
1477  // Sign extend left into edx:eax.
1478  ASSERT(left.is(eax));
1479  __ cdq();
1480  // Divide edx:eax by right.
1481  __ idiv(right);
1482  // Check for negative zero result. Use combined = left | right.
1483  __ NegativeZeroTest(edx, combined, slow);
1484  // Move remainder to register eax.
1485  __ mov(eax, edx);
1486  break;
1487 
1488  default:
1489  UNREACHABLE();
1490  }
1491 
1492  // 5. Emit return of result in eax. Some operations have registers pushed.
1493  switch (op_) {
1494  case Token::ADD:
1495  case Token::SUB:
1496  case Token::MUL:
1497  case Token::DIV:
1498  __ ret(0);
1499  break;
1500  case Token::MOD:
1501  case Token::BIT_OR:
1502  case Token::BIT_AND:
1503  case Token::BIT_XOR:
1504  case Token::SAR:
1505  case Token::SHL:
1506  case Token::SHR:
1507  __ ret(2 * kPointerSize);
1508  break;
1509  default:
1510  UNREACHABLE();
1511  }
1512 
1513  // 6. For some operations emit inline code to perform floating point
1514  // operations on known smis (e.g., if the result of the operation
1515  // overflowed the smi range).
1516  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1517  __ bind(&use_fp_on_smis);
1518  switch (op_) {
1519  // Undo the effects of some operations, and some register moves.
1520  case Token::SHL:
1521  // The arguments are saved on the stack, and only used from there.
1522  break;
1523  case Token::ADD:
1524  // Revert right = right + left.
1525  __ sub(right, left);
1526  break;
1527  case Token::SUB:
1528  // Revert left = left - right.
1529  __ add(left, right);
1530  break;
1531  case Token::MUL:
1532  // Right was clobbered but a copy is in ebx.
1533  __ mov(right, ebx);
1534  break;
1535  case Token::DIV:
1536  // Left was clobbered but a copy is in edi. Right is in ebx for
1537  // division. They should be in eax, ebx for jump to not_smi.
1538  __ mov(eax, edi);
1539  break;
1540  default:
1541  // No other operators jump to use_fp_on_smis.
1542  break;
1543  }
1544  __ jmp(&not_smis);
1545  } else {
1546  ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1547  switch (op_) {
1548  case Token::SHL:
1549  case Token::SHR: {
1550  Comment perform_float(masm, "-- Perform float operation on smis");
1551  __ bind(&use_fp_on_smis);
1552  // Result we want is in left == edx, so we can put the allocated heap
1553  // number in eax.
1554  __ AllocateHeapNumber(eax, ecx, ebx, slow);
1555  // Store the result in the HeapNumber and return.
1556  // It's OK to overwrite the arguments on the stack because we
1557  // are about to return.
1558  if (op_ == Token::SHR) {
1559  __ mov(Operand(esp, 1 * kPointerSize), left);
1560  __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1561  __ fild_d(Operand(esp, 1 * kPointerSize));
1563  } else {
1564  ASSERT_EQ(Token::SHL, op_);
1566  CpuFeatures::Scope use_sse2(SSE2);
1567  __ cvtsi2sd(xmm0, left);
1569  } else {
1570  __ mov(Operand(esp, 1 * kPointerSize), left);
1571  __ fild_s(Operand(esp, 1 * kPointerSize));
1573  }
1574  }
1575  __ ret(2 * kPointerSize);
1576  break;
1577  }
1578 
1579  case Token::ADD:
1580  case Token::SUB:
1581  case Token::MUL:
1582  case Token::DIV: {
1583  Comment perform_float(masm, "-- Perform float operation on smis");
1584  __ bind(&use_fp_on_smis);
1585  // Restore arguments to edx, eax.
1586  switch (op_) {
1587  case Token::ADD:
1588  // Revert right = right + left.
1589  __ sub(right, left);
1590  break;
1591  case Token::SUB:
1592  // Revert left = left - right.
1593  __ add(left, right);
1594  break;
1595  case Token::MUL:
1596  // Right was clobbered but a copy is in ebx.
1597  __ mov(right, ebx);
1598  break;
1599  case Token::DIV:
1600  // Left was clobbered but a copy is in edi. Right is in ebx for
1601  // division.
1602  __ mov(edx, edi);
1603  __ mov(eax, right);
1604  break;
1605  default: UNREACHABLE();
1606  break;
1607  }
1608  __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1610  CpuFeatures::Scope use_sse2(SSE2);
1611  FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1612  switch (op_) {
1613  case Token::ADD: __ addsd(xmm0, xmm1); break;
1614  case Token::SUB: __ subsd(xmm0, xmm1); break;
1615  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1616  case Token::DIV: __ divsd(xmm0, xmm1); break;
1617  default: UNREACHABLE();
1618  }
1620  } else { // SSE2 not available, use FPU.
1621  FloatingPointHelper::LoadFloatSmis(masm, ebx);
1622  switch (op_) {
1623  case Token::ADD: __ faddp(1); break;
1624  case Token::SUB: __ fsubp(1); break;
1625  case Token::MUL: __ fmulp(1); break;
1626  case Token::DIV: __ fdivp(1); break;
1627  default: UNREACHABLE();
1628  }
1630  }
1631  __ mov(eax, ecx);
1632  __ ret(0);
1633  break;
1634  }
1635 
1636  default:
1637  break;
1638  }
1639  }
1640 
1641  // 7. Non-smi operands, fall out to the non-smi code with the operands in
1642  // edx and eax.
1643  Comment done_comment(masm, "-- Enter non-smi code");
1644  __ bind(&not_smis);
1645  switch (op_) {
1646  case Token::BIT_OR:
1647  case Token::SHL:
1648  case Token::SAR:
1649  case Token::SHR:
1650  // Right operand is saved in ecx and eax was destroyed by the smi
1651  // check.
1652  __ mov(eax, ecx);
1653  break;
1654 
1655  case Token::DIV:
1656  case Token::MOD:
1657  // Operands are in eax, ebx at this point.
1658  __ mov(edx, eax);
1659  __ mov(eax, ebx);
1660  break;
1661 
1662  default:
1663  break;
1664  }
1665 }
1666 
1667 
1668 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1669  Label call_runtime;
1670 
1671  switch (op_) {
1672  case Token::ADD:
1673  case Token::SUB:
1674  case Token::MUL:
1675  case Token::DIV:
1676  break;
1677  case Token::MOD:
1678  case Token::BIT_OR:
1679  case Token::BIT_AND:
1680  case Token::BIT_XOR:
1681  case Token::SAR:
1682  case Token::SHL:
1683  case Token::SHR:
1684  GenerateRegisterArgsPush(masm);
1685  break;
1686  default:
1687  UNREACHABLE();
1688  }
1689 
1690  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1691  result_type_ == BinaryOpIC::SMI) {
1692  GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1693  } else {
1694  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1695  }
1696  __ bind(&call_runtime);
1697  switch (op_) {
1698  case Token::ADD:
1699  case Token::SUB:
1700  case Token::MUL:
1701  case Token::DIV:
1702  GenerateTypeTransition(masm);
1703  break;
1704  case Token::MOD:
1705  case Token::BIT_OR:
1706  case Token::BIT_AND:
1707  case Token::BIT_XOR:
1708  case Token::SAR:
1709  case Token::SHL:
1710  case Token::SHR:
1711  GenerateTypeTransitionWithSavedArgs(masm);
1712  break;
1713  default:
1714  UNREACHABLE();
1715  }
1716 }
1717 
1718 
1719 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1720  ASSERT(operands_type_ == BinaryOpIC::STRING);
1721  ASSERT(op_ == Token::ADD);
1722  // Try to add arguments as strings, otherwise, transition to the generic
1723  // BinaryOpIC type.
1724  GenerateAddStrings(masm);
1725  GenerateTypeTransition(masm);
1726 }
1727 
1728 
1729 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1730  Label call_runtime;
1731  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1732  ASSERT(op_ == Token::ADD);
1733  // If both arguments are strings, call the string add stub.
1734  // Otherwise, do a transition.
1735 
1736  // Registers containing left and right operands respectively.
1737  Register left = edx;
1738  Register right = eax;
1739 
1740  // Test if left operand is a string.
1741  __ JumpIfSmi(left, &call_runtime, Label::kNear);
1742  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1743  __ j(above_equal, &call_runtime, Label::kNear);
1744 
1745  // Test if right operand is a string.
1746  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1747  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1748  __ j(above_equal, &call_runtime, Label::kNear);
1749 
1750  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1751  GenerateRegisterArgsPush(masm);
1752  __ TailCallStub(&string_add_stub);
1753 
1754  __ bind(&call_runtime);
1755  GenerateTypeTransition(masm);
1756 }
1757 
1758 
1759 // Input:
1760 // edx: left operand (tagged)
1761 // eax: right operand (tagged)
1762 // Output:
1763 // eax: result (tagged)
1764 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1765  Label call_runtime;
1766  ASSERT(operands_type_ == BinaryOpIC::INT32);
1767 
1768  // Floating point case.
1769  switch (op_) {
1770  case Token::ADD:
1771  case Token::SUB:
1772  case Token::MUL:
1773  case Token::DIV:
1774  case Token::MOD: {
1775  Label not_floats;
1776  Label not_int32;
1778  CpuFeatures::Scope use_sse2(SSE2);
1779  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1780  FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1781  if (op_ == Token::MOD) {
1782  GenerateRegisterArgsPush(masm);
1783  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1784  } else {
1785  switch (op_) {
1786  case Token::ADD: __ addsd(xmm0, xmm1); break;
1787  case Token::SUB: __ subsd(xmm0, xmm1); break;
1788  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1789  case Token::DIV: __ divsd(xmm0, xmm1); break;
1790  default: UNREACHABLE();
1791  }
1792  // Check result type if it is currently Int32.
1793  if (result_type_ <= BinaryOpIC::INT32) {
1794  __ cvttsd2si(ecx, Operand(xmm0));
1795  __ cvtsi2sd(xmm2, ecx);
1796  __ pcmpeqd(xmm2, xmm0);
1797  __ movmskpd(ecx, xmm2);
1798  __ test(ecx, Immediate(1));
1799  __ j(zero, &not_int32);
1800  }
1801  GenerateHeapResultAllocation(masm, &call_runtime);
1803  __ ret(0);
1804  }
1805  } else { // SSE2 not available, use FPU.
1806  FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1807  FloatingPointHelper::LoadFloatOperands(
1808  masm,
1809  ecx,
1810  FloatingPointHelper::ARGS_IN_REGISTERS);
1811  FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1812  if (op_ == Token::MOD) {
1813  // The operands are now on the FPU stack, but we don't need them.
1814  __ fstp(0);
1815  __ fstp(0);
1816  GenerateRegisterArgsPush(masm);
1817  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1818  } else {
1819  switch (op_) {
1820  case Token::ADD: __ faddp(1); break;
1821  case Token::SUB: __ fsubp(1); break;
1822  case Token::MUL: __ fmulp(1); break;
1823  case Token::DIV: __ fdivp(1); break;
1824  default: UNREACHABLE();
1825  }
1826  Label after_alloc_failure;
1827  GenerateHeapResultAllocation(masm, &after_alloc_failure);
1829  __ ret(0);
1830  __ bind(&after_alloc_failure);
1831  __ fstp(0); // Pop FPU stack before calling runtime.
1832  __ jmp(&call_runtime);
1833  }
1834  }
1835 
1836  __ bind(&not_floats);
1837  __ bind(&not_int32);
1838  GenerateTypeTransition(masm);
1839  break;
1840  }
1841 
1842  case Token::BIT_OR:
1843  case Token::BIT_AND:
1844  case Token::BIT_XOR:
1845  case Token::SAR:
1846  case Token::SHL:
1847  case Token::SHR: {
1848  GenerateRegisterArgsPush(masm);
1849  Label not_floats;
1850  Label not_int32;
1851  Label non_smi_result;
1852  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1853  use_sse3_,
1854  &not_floats);
1855  FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1856  &not_int32);
1857  switch (op_) {
1858  case Token::BIT_OR: __ or_(eax, ecx); break;
1859  case Token::BIT_AND: __ and_(eax, ecx); break;
1860  case Token::BIT_XOR: __ xor_(eax, ecx); break;
1861  case Token::SAR: __ sar_cl(eax); break;
1862  case Token::SHL: __ shl_cl(eax); break;
1863  case Token::SHR: __ shr_cl(eax); break;
1864  default: UNREACHABLE();
1865  }
1866  if (op_ == Token::SHR) {
1867  // Check if result is non-negative and fits in a smi.
1868  __ test(eax, Immediate(0xc0000000));
1869  __ j(not_zero, &call_runtime);
1870  } else {
1871  // Check if result fits in a smi.
1872  __ cmp(eax, 0xc0000000);
1873  __ j(negative, &non_smi_result, Label::kNear);
1874  }
1875  // Tag smi result and return.
1876  __ SmiTag(eax);
1877  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1878 
1879  // All ops except SHR return a signed int32 that we load in
1880  // a HeapNumber.
1881  if (op_ != Token::SHR) {
1882  __ bind(&non_smi_result);
1883  // Allocate a heap number if needed.
1884  __ mov(ebx, eax); // ebx: result
1885  Label skip_allocation;
1886  switch (mode_) {
1887  case OVERWRITE_LEFT:
1888  case OVERWRITE_RIGHT:
1889  // If the operand was an object, we skip the
1890  // allocation of a heap number.
1891  __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1892  1 * kPointerSize : 2 * kPointerSize));
1893  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1894  // Fall through!
1895  case NO_OVERWRITE:
1896  __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1897  __ bind(&skip_allocation);
1898  break;
1899  default: UNREACHABLE();
1900  }
1901  // Store the result in the HeapNumber and return.
1903  CpuFeatures::Scope use_sse2(SSE2);
1904  __ cvtsi2sd(xmm0, ebx);
1906  } else {
1907  __ mov(Operand(esp, 1 * kPointerSize), ebx);
1908  __ fild_s(Operand(esp, 1 * kPointerSize));
1910  }
1911  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1912  }
1913 
1914  __ bind(&not_floats);
1915  __ bind(&not_int32);
1916  GenerateTypeTransitionWithSavedArgs(masm);
1917  break;
1918  }
1919  default: UNREACHABLE(); break;
1920  }
1921 
1922  // If an allocation fails, or SHR hits a hard case, use the runtime system to
1923  // get the correct result.
1924  __ bind(&call_runtime);
1925 
1926  switch (op_) {
1927  case Token::ADD:
1928  GenerateRegisterArgsPush(masm);
1929  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1930  break;
1931  case Token::SUB:
1932  GenerateRegisterArgsPush(masm);
1933  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1934  break;
1935  case Token::MUL:
1936  GenerateRegisterArgsPush(masm);
1937  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1938  break;
1939  case Token::DIV:
1940  GenerateRegisterArgsPush(masm);
1941  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1942  break;
1943  case Token::MOD:
1944  break;
1945  case Token::BIT_OR:
1946  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1947  break;
1948  case Token::BIT_AND:
1949  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1950  break;
1951  case Token::BIT_XOR:
1952  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1953  break;
1954  case Token::SAR:
1955  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1956  break;
1957  case Token::SHL:
1958  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1959  break;
1960  case Token::SHR:
1961  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1962  break;
1963  default:
1964  UNREACHABLE();
1965  }
1966 }
1967 
1968 
1969 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1970  if (op_ == Token::ADD) {
1971  // Handle string addition here, because it is the only operation
1972  // that does not do a ToNumber conversion on the operands.
1973  GenerateAddStrings(masm);
1974  }
1975 
1976  Factory* factory = masm->isolate()->factory();
1977 
1978  // Convert odd ball arguments to numbers.
1979  Label check, done;
1980  __ cmp(edx, factory->undefined_value());
1981  __ j(not_equal, &check, Label::kNear);
1982  if (Token::IsBitOp(op_)) {
1983  __ xor_(edx, edx);
1984  } else {
1985  __ mov(edx, Immediate(factory->nan_value()));
1986  }
1987  __ jmp(&done, Label::kNear);
1988  __ bind(&check);
1989  __ cmp(eax, factory->undefined_value());
1990  __ j(not_equal, &done, Label::kNear);
1991  if (Token::IsBitOp(op_)) {
1992  __ xor_(eax, eax);
1993  } else {
1994  __ mov(eax, Immediate(factory->nan_value()));
1995  }
1996  __ bind(&done);
1997 
1998  GenerateHeapNumberStub(masm);
1999 }
2000 
2001 
2002 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2003  Label call_runtime;
2004 
2005  // Floating point case.
2006  switch (op_) {
2007  case Token::ADD:
2008  case Token::SUB:
2009  case Token::MUL:
2010  case Token::DIV: {
2011  Label not_floats;
2013  CpuFeatures::Scope use_sse2(SSE2);
2014  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2015 
2016  switch (op_) {
2017  case Token::ADD: __ addsd(xmm0, xmm1); break;
2018  case Token::SUB: __ subsd(xmm0, xmm1); break;
2019  case Token::MUL: __ mulsd(xmm0, xmm1); break;
2020  case Token::DIV: __ divsd(xmm0, xmm1); break;
2021  default: UNREACHABLE();
2022  }
2023  GenerateHeapResultAllocation(masm, &call_runtime);
2025  __ ret(0);
2026  } else { // SSE2 not available, use FPU.
2027  FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2028  FloatingPointHelper::LoadFloatOperands(
2029  masm,
2030  ecx,
2031  FloatingPointHelper::ARGS_IN_REGISTERS);
2032  switch (op_) {
2033  case Token::ADD: __ faddp(1); break;
2034  case Token::SUB: __ fsubp(1); break;
2035  case Token::MUL: __ fmulp(1); break;
2036  case Token::DIV: __ fdivp(1); break;
2037  default: UNREACHABLE();
2038  }
2039  Label after_alloc_failure;
2040  GenerateHeapResultAllocation(masm, &after_alloc_failure);
2042  __ ret(0);
2043  __ bind(&after_alloc_failure);
2044  __ fstp(0); // Pop FPU stack before calling runtime.
2045  __ jmp(&call_runtime);
2046  }
2047 
2048  __ bind(&not_floats);
2049  GenerateTypeTransition(masm);
2050  break;
2051  }
2052 
2053  case Token::MOD: {
2054  // For MOD we go directly to runtime in the non-smi case.
2055  break;
2056  }
2057  case Token::BIT_OR:
2058  case Token::BIT_AND:
2059  case Token::BIT_XOR:
2060  case Token::SAR:
2061  case Token::SHL:
2062  case Token::SHR: {
2063  GenerateRegisterArgsPush(masm);
2064  Label not_floats;
2065  Label non_smi_result;
2066  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2067  use_sse3_,
2068  &not_floats);
2069  switch (op_) {
2070  case Token::BIT_OR: __ or_(eax, ecx); break;
2071  case Token::BIT_AND: __ and_(eax, ecx); break;
2072  case Token::BIT_XOR: __ xor_(eax, ecx); break;
2073  case Token::SAR: __ sar_cl(eax); break;
2074  case Token::SHL: __ shl_cl(eax); break;
2075  case Token::SHR: __ shr_cl(eax); break;
2076  default: UNREACHABLE();
2077  }
2078  if (op_ == Token::SHR) {
2079  // Check if result is non-negative and fits in a smi.
2080  __ test(eax, Immediate(0xc0000000));
2081  __ j(not_zero, &call_runtime);
2082  } else {
2083  // Check if result fits in a smi.
2084  __ cmp(eax, 0xc0000000);
2085  __ j(negative, &non_smi_result, Label::kNear);
2086  }
2087  // Tag smi result and return.
2088  __ SmiTag(eax);
2089  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2090 
2091  // All ops except SHR return a signed int32 that we load in
2092  // a HeapNumber.
2093  if (op_ != Token::SHR) {
2094  __ bind(&non_smi_result);
2095  // Allocate a heap number if needed.
2096  __ mov(ebx, eax); // ebx: result
2097  Label skip_allocation;
2098  switch (mode_) {
2099  case OVERWRITE_LEFT:
2100  case OVERWRITE_RIGHT:
2101  // If the operand was an object, we skip the
2102  // allocation of a heap number.
2103  __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2104  1 * kPointerSize : 2 * kPointerSize));
2105  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2106  // Fall through!
2107  case NO_OVERWRITE:
2108  __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2109  __ bind(&skip_allocation);
2110  break;
2111  default: UNREACHABLE();
2112  }
2113  // Store the result in the HeapNumber and return.
2115  CpuFeatures::Scope use_sse2(SSE2);
2116  __ cvtsi2sd(xmm0, ebx);
2118  } else {
2119  __ mov(Operand(esp, 1 * kPointerSize), ebx);
2120  __ fild_s(Operand(esp, 1 * kPointerSize));
2122  }
2123  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2124  }
2125 
2126  __ bind(&not_floats);
2127  GenerateTypeTransitionWithSavedArgs(masm);
2128  break;
2129  }
2130  default: UNREACHABLE(); break;
2131  }
2132 
2133  // If an allocation fails, or SHR or MOD hit a hard case,
2134  // use the runtime system to get the correct result.
2135  __ bind(&call_runtime);
2136 
2137  switch (op_) {
2138  case Token::ADD:
2139  GenerateRegisterArgsPush(masm);
2140  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2141  break;
2142  case Token::SUB:
2143  GenerateRegisterArgsPush(masm);
2144  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2145  break;
2146  case Token::MUL:
2147  GenerateRegisterArgsPush(masm);
2148  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2149  break;
2150  case Token::DIV:
2151  GenerateRegisterArgsPush(masm);
2152  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2153  break;
2154  case Token::MOD:
2155  GenerateRegisterArgsPush(masm);
2156  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2157  break;
2158  case Token::BIT_OR:
2159  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2160  break;
2161  case Token::BIT_AND:
2162  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2163  break;
2164  case Token::BIT_XOR:
2165  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2166  break;
2167  case Token::SAR:
2168  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2169  break;
2170  case Token::SHL:
2171  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2172  break;
2173  case Token::SHR:
2174  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2175  break;
2176  default:
2177  UNREACHABLE();
2178  }
2179 }
2180 
2181 
2182 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2183  Label call_runtime;
2184 
2185  Counters* counters = masm->isolate()->counters();
2186  __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
2187 
2188  switch (op_) {
2189  case Token::ADD:
2190  case Token::SUB:
2191  case Token::MUL:
2192  case Token::DIV:
2193  break;
2194  case Token::MOD:
2195  case Token::BIT_OR:
2196  case Token::BIT_AND:
2197  case Token::BIT_XOR:
2198  case Token::SAR:
2199  case Token::SHL:
2200  case Token::SHR:
2201  GenerateRegisterArgsPush(masm);
2202  break;
2203  default:
2204  UNREACHABLE();
2205  }
2206 
2207  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2208 
2209  // Floating point case.
2210  switch (op_) {
2211  case Token::ADD:
2212  case Token::SUB:
2213  case Token::MUL:
2214  case Token::DIV: {
2215  Label not_floats;
2217  CpuFeatures::Scope use_sse2(SSE2);
2218  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2219 
2220  switch (op_) {
2221  case Token::ADD: __ addsd(xmm0, xmm1); break;
2222  case Token::SUB: __ subsd(xmm0, xmm1); break;
2223  case Token::MUL: __ mulsd(xmm0, xmm1); break;
2224  case Token::DIV: __ divsd(xmm0, xmm1); break;
2225  default: UNREACHABLE();
2226  }
2227  GenerateHeapResultAllocation(masm, &call_runtime);
2229  __ ret(0);
2230  } else { // SSE2 not available, use FPU.
2231  FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2232  FloatingPointHelper::LoadFloatOperands(
2233  masm,
2234  ecx,
2235  FloatingPointHelper::ARGS_IN_REGISTERS);
2236  switch (op_) {
2237  case Token::ADD: __ faddp(1); break;
2238  case Token::SUB: __ fsubp(1); break;
2239  case Token::MUL: __ fmulp(1); break;
2240  case Token::DIV: __ fdivp(1); break;
2241  default: UNREACHABLE();
2242  }
2243  Label after_alloc_failure;
2244  GenerateHeapResultAllocation(masm, &after_alloc_failure);
2246  __ ret(0);
2247  __ bind(&after_alloc_failure);
2248  __ fstp(0); // Pop FPU stack before calling runtime.
2249  __ jmp(&call_runtime);
2250  }
2251  __ bind(&not_floats);
2252  break;
2253  }
2254  case Token::MOD: {
2255  // For MOD we go directly to runtime in the non-smi case.
2256  break;
2257  }
2258  case Token::BIT_OR:
2259  case Token::BIT_AND:
2260  case Token::BIT_XOR:
2261  case Token::SAR:
2262  case Token::SHL:
2263  case Token::SHR: {
2264  Label non_smi_result;
2265  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2266  use_sse3_,
2267  &call_runtime);
2268  switch (op_) {
2269  case Token::BIT_OR: __ or_(eax, ecx); break;
2270  case Token::BIT_AND: __ and_(eax, ecx); break;
2271  case Token::BIT_XOR: __ xor_(eax, ecx); break;
2272  case Token::SAR: __ sar_cl(eax); break;
2273  case Token::SHL: __ shl_cl(eax); break;
2274  case Token::SHR: __ shr_cl(eax); break;
2275  default: UNREACHABLE();
2276  }
2277  if (op_ == Token::SHR) {
2278  // Check if result is non-negative and fits in a smi.
2279  __ test(eax, Immediate(0xc0000000));
2280  __ j(not_zero, &call_runtime);
2281  } else {
2282  // Check if result fits in a smi.
2283  __ cmp(eax, 0xc0000000);
2284  __ j(negative, &non_smi_result, Label::kNear);
2285  }
2286  // Tag smi result and return.
2287  __ SmiTag(eax);
2288  __ ret(2 * kPointerSize); // Drop the arguments from the stack.
2289 
2290  // All ops except SHR return a signed int32 that we load in
2291  // a HeapNumber.
2292  if (op_ != Token::SHR) {
2293  __ bind(&non_smi_result);
2294  // Allocate a heap number if needed.
2295  __ mov(ebx, eax); // ebx: result
2296  Label skip_allocation;
2297  switch (mode_) {
2298  case OVERWRITE_LEFT:
2299  case OVERWRITE_RIGHT:
2300  // If the operand was an object, we skip the
2301  // allocation of a heap number.
2302  __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2303  1 * kPointerSize : 2 * kPointerSize));
2304  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2305  // Fall through!
2306  case NO_OVERWRITE:
2307  __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2308  __ bind(&skip_allocation);
2309  break;
2310  default: UNREACHABLE();
2311  }
2312  // Store the result in the HeapNumber and return.
2314  CpuFeatures::Scope use_sse2(SSE2);
2315  __ cvtsi2sd(xmm0, ebx);
2317  } else {
2318  __ mov(Operand(esp, 1 * kPointerSize), ebx);
2319  __ fild_s(Operand(esp, 1 * kPointerSize));
2321  }
2322  __ ret(2 * kPointerSize);
2323  }
2324  break;
2325  }
2326  default: UNREACHABLE(); break;
2327  }
2328 
2329  // If all else fails, use the runtime system to get the correct
2330  // result.
2331  __ bind(&call_runtime);
2332  switch (op_) {
2333  case Token::ADD: {
2334  GenerateAddStrings(masm);
2335  GenerateRegisterArgsPush(masm);
2336  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2337  break;
2338  }
2339  case Token::SUB:
2340  GenerateRegisterArgsPush(masm);
2341  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2342  break;
2343  case Token::MUL:
2344  GenerateRegisterArgsPush(masm);
2345  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2346  break;
2347  case Token::DIV:
2348  GenerateRegisterArgsPush(masm);
2349  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2350  break;
2351  case Token::MOD:
2352  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2353  break;
2354  case Token::BIT_OR:
2355  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2356  break;
2357  case Token::BIT_AND:
2358  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2359  break;
2360  case Token::BIT_XOR:
2361  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2362  break;
2363  case Token::SAR:
2364  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2365  break;
2366  case Token::SHL:
2367  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2368  break;
2369  case Token::SHR:
2370  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2371  break;
2372  default:
2373  UNREACHABLE();
2374  }
2375 }
2376 
2377 
2378 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2379  ASSERT(op_ == Token::ADD);
2380  Label left_not_string, call_runtime;
2381 
2382  // Registers containing left and right operands respectively.
2383  Register left = edx;
2384  Register right = eax;
2385 
2386  // Test if left operand is a string.
2387  __ JumpIfSmi(left, &left_not_string, Label::kNear);
2388  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2389  __ j(above_equal, &left_not_string, Label::kNear);
2390 
2391  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2392  GenerateRegisterArgsPush(masm);
2393  __ TailCallStub(&string_add_left_stub);
2394 
2395  // Left operand is not a string, test right.
2396  __ bind(&left_not_string);
2397  __ JumpIfSmi(right, &call_runtime, Label::kNear);
2398  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2399  __ j(above_equal, &call_runtime, Label::kNear);
2400 
2401  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2402  GenerateRegisterArgsPush(masm);
2403  __ TailCallStub(&string_add_right_stub);
2404 
2405  // Neither argument is a string.
2406  __ bind(&call_runtime);
2407 }
2408 
2409 
2410 void BinaryOpStub::GenerateHeapResultAllocation(
2411  MacroAssembler* masm,
2412  Label* alloc_failure) {
2413  Label skip_allocation;
2414  OverwriteMode mode = mode_;
2415  switch (mode) {
2416  case OVERWRITE_LEFT: {
2417  // If the argument in edx is already an object, we skip the
2418  // allocation of a heap number.
2419  __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
2420  // Allocate a heap number for the result. Keep eax and edx intact
2421  // for the possible runtime call.
2422  __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2423  // Now edx can be overwritten losing one of the arguments as we are
2424  // now done and will not need it any more.
2425  __ mov(edx, ebx);
2426  __ bind(&skip_allocation);
2427  // Use object in edx as a result holder
2428  __ mov(eax, edx);
2429  break;
2430  }
2431  case OVERWRITE_RIGHT:
2432  // If the argument in eax is already an object, we skip the
2433  // allocation of a heap number.
2434  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2435  // Fall through!
2436  case NO_OVERWRITE:
2437  // Allocate a heap number for the result. Keep eax and edx intact
2438  // for the possible runtime call.
2439  __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2440  // Now eax can be overwritten losing one of the arguments as we are
2441  // now done and will not need it any more.
2442  __ mov(eax, ebx);
2443  __ bind(&skip_allocation);
2444  break;
2445  default: UNREACHABLE();
2446  }
2447 }
2448 
2449 
2450 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2451  __ pop(ecx);
2452  __ push(edx);
2453  __ push(eax);
2454  __ push(ecx);
2455 }
2456 
2457 
2458 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2459  // TAGGED case:
2460  // Input:
2461  // esp[4]: tagged number input argument (should be number).
2462  // esp[0]: return address.
2463  // Output:
2464  // eax: tagged double result.
2465  // UNTAGGED case:
2466  // Input::
2467  // esp[0]: return address.
2468  // xmm1: untagged double input argument
2469  // Output:
2470  // xmm1: untagged double result.
2471 
2472  Label runtime_call;
2473  Label runtime_call_clear_stack;
2474  Label skip_cache;
2475  const bool tagged = (argument_type_ == TAGGED);
2476  if (tagged) {
2477  // Test that eax is a number.
2478  Label input_not_smi;
2479  Label loaded;
2480  __ mov(eax, Operand(esp, kPointerSize));
2481  __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
2482  // Input is a smi. Untag and load it onto the FPU stack.
2483  // Then load the low and high words of the double into ebx, edx.
2484  STATIC_ASSERT(kSmiTagSize == 1);
2485  __ sar(eax, 1);
2486  __ sub(esp, Immediate(2 * kPointerSize));
2487  __ mov(Operand(esp, 0), eax);
2488  __ fild_s(Operand(esp, 0));
2489  __ fst_d(Operand(esp, 0));
2490  __ pop(edx);
2491  __ pop(ebx);
2492  __ jmp(&loaded, Label::kNear);
2493  __ bind(&input_not_smi);
2494  // Check if input is a HeapNumber.
2496  Factory* factory = masm->isolate()->factory();
2497  __ cmp(ebx, Immediate(factory->heap_number_map()));
2498  __ j(not_equal, &runtime_call);
2499  // Input is a HeapNumber. Push it on the FPU stack and load its
2500  // low and high words into ebx, edx.
2504 
2505  __ bind(&loaded);
2506  } else { // UNTAGGED.
2508  CpuFeatures::Scope sse4_scope(SSE4_1);
2509  __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
2510  } else {
2511  __ pshufd(xmm0, xmm1, 0x1);
2512  __ movd(edx, xmm0);
2513  }
2514  __ movd(ebx, xmm1);
2515  }
2516 
2517  // ST[0] or xmm1 == double value
2518  // ebx = low 32 bits of double value
2519  // edx = high 32 bits of double value
2520  // Compute hash (the shifts are arithmetic):
2521  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2522  __ mov(ecx, ebx);
2523  __ xor_(ecx, edx);
2524  __ mov(eax, ecx);
2525  __ sar(eax, 16);
2526  __ xor_(ecx, eax);
2527  __ mov(eax, ecx);
2528  __ sar(eax, 8);
2529  __ xor_(ecx, eax);
2530  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2531  __ and_(ecx,
2532  Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2533 
2534  // ST[0] or xmm1 == double value.
2535  // ebx = low 32 bits of double value.
2536  // edx = high 32 bits of double value.
2537  // ecx = TranscendentalCache::hash(double value).
2538  ExternalReference cache_array =
2539  ExternalReference::transcendental_cache_array_address(masm->isolate());
2540  __ mov(eax, Immediate(cache_array));
2541  int cache_array_index =
2542  type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
2543  __ mov(eax, Operand(eax, cache_array_index));
2544  // Eax points to the cache for the type type_.
2545  // If NULL, the cache hasn't been initialized yet, so go through runtime.
2546  __ test(eax, eax);
2547  __ j(zero, &runtime_call_clear_stack);
2548 #ifdef DEBUG
2549  // Check that the layout of cache elements match expectations.
2550  { TranscendentalCache::SubCache::Element test_elem[2];
2551  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2552  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2553  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2554  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2555  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2556  CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
2557  CHECK_EQ(0, elem_in0 - elem_start);
2558  CHECK_EQ(kIntSize, elem_in1 - elem_start);
2559  CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2560  }
2561 #endif
2562  // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2563  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2564  __ lea(ecx, Operand(eax, ecx, times_4, 0));
2565  // Check if cache matches: Double value is stored in uint32_t[2] array.
2566  Label cache_miss;
2567  __ cmp(ebx, Operand(ecx, 0));
2568  __ j(not_equal, &cache_miss, Label::kNear);
2569  __ cmp(edx, Operand(ecx, kIntSize));
2570  __ j(not_equal, &cache_miss, Label::kNear);
2571  // Cache hit!
2572  Counters* counters = masm->isolate()->counters();
2573  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
2574  __ mov(eax, Operand(ecx, 2 * kIntSize));
2575  if (tagged) {
2576  __ fstp(0);
2577  __ ret(kPointerSize);
2578  } else { // UNTAGGED.
2580  __ Ret();
2581  }
2582 
2583  __ bind(&cache_miss);
2584  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
2585  // Update cache with new value.
2586  // We are short on registers, so use no_reg as scratch.
2587  // This gives slightly larger code.
2588  if (tagged) {
2589  __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2590  } else { // UNTAGGED.
2591  __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2592  __ sub(esp, Immediate(kDoubleSize));
2593  __ movdbl(Operand(esp, 0), xmm1);
2594  __ fld_d(Operand(esp, 0));
2595  __ add(esp, Immediate(kDoubleSize));
2596  }
2597  GenerateOperation(masm, type_);
2598  __ mov(Operand(ecx, 0), ebx);
2599  __ mov(Operand(ecx, kIntSize), edx);
2600  __ mov(Operand(ecx, 2 * kIntSize), eax);
2602  if (tagged) {
2603  __ ret(kPointerSize);
2604  } else { // UNTAGGED.
2606  __ Ret();
2607 
2608  // Skip cache and return answer directly, only in untagged case.
2609  __ bind(&skip_cache);
2610  __ sub(esp, Immediate(kDoubleSize));
2611  __ movdbl(Operand(esp, 0), xmm1);
2612  __ fld_d(Operand(esp, 0));
2613  GenerateOperation(masm, type_);
2614  __ fstp_d(Operand(esp, 0));
2615  __ movdbl(xmm1, Operand(esp, 0));
2616  __ add(esp, Immediate(kDoubleSize));
2617  // We return the value in xmm1 without adding it to the cache, but
2618  // we cause a scavenging GC so that future allocations will succeed.
2619  {
2620  FrameScope scope(masm, StackFrame::INTERNAL);
2621  // Allocate an unused object bigger than a HeapNumber.
2622  __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2623  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2624  }
2625  __ Ret();
2626  }
2627 
2628  // Call runtime, doing whatever allocation and cleanup is necessary.
2629  if (tagged) {
2630  __ bind(&runtime_call_clear_stack);
2631  __ fstp(0);
2632  __ bind(&runtime_call);
2633  ExternalReference runtime =
2634  ExternalReference(RuntimeFunction(), masm->isolate());
2635  __ TailCallExternalReference(runtime, 1, 1);
2636  } else { // UNTAGGED.
2637  __ bind(&runtime_call_clear_stack);
2638  __ bind(&runtime_call);
2639  __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2641  {
2642  FrameScope scope(masm, StackFrame::INTERNAL);
2643  __ push(eax);
2644  __ CallRuntime(RuntimeFunction(), 1);
2645  }
2647  __ Ret();
2648  }
2649 }
2650 
2651 
2652 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2653  switch (type_) {
2654  case TranscendentalCache::SIN: return Runtime::kMath_sin;
2655  case TranscendentalCache::COS: return Runtime::kMath_cos;
2656  case TranscendentalCache::TAN: return Runtime::kMath_tan;
2657  case TranscendentalCache::LOG: return Runtime::kMath_log;
2658  default:
2659  UNIMPLEMENTED();
2660  return Runtime::kAbort;
2661  }
2662 }
2663 
2664 
2666  MacroAssembler* masm, TranscendentalCache::Type type) {
2667  // Only free register is edi.
2668  // Input value is on FP stack, and also in ebx/edx.
2669  // Input value is possibly in xmm1.
2670  // Address of result (a newly allocated HeapNumber) may be in eax.
2671  if (type == TranscendentalCache::SIN ||
2672  type == TranscendentalCache::COS ||
2673  type == TranscendentalCache::TAN) {
2674  // Both fsin and fcos require arguments in the range +/-2^63 and
2675  // return NaN for infinities and NaN. They can share all code except
2676  // the actual fsin/fcos operation.
2677  Label in_range, done;
2678  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2679  // work. We must reduce it to the appropriate range.
2680  __ mov(edi, edx);
2681  __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
2682  int supported_exponent_limit =
2684  __ cmp(edi, Immediate(supported_exponent_limit));
2685  __ j(below, &in_range, Label::kNear);
2686  // Check for infinity and NaN. Both return NaN for sin.
2687  __ cmp(edi, Immediate(0x7ff00000));
2688  Label non_nan_result;
2689  __ j(not_equal, &non_nan_result, Label::kNear);
2690  // Input is +/-Infinity or NaN. Result is NaN.
2691  __ fstp(0);
2692  // NaN is represented by 0x7ff8000000000000.
2693  __ push(Immediate(0x7ff80000));
2694  __ push(Immediate(0));
2695  __ fld_d(Operand(esp, 0));
2696  __ add(esp, Immediate(2 * kPointerSize));
2697  __ jmp(&done, Label::kNear);
2698 
2699  __ bind(&non_nan_result);
2700 
2701  // Use fpmod to restrict argument to the range +/-2*PI.
2702  __ mov(edi, eax); // Save eax before using fnstsw_ax.
2703  __ fldpi();
2704  __ fadd(0);
2705  __ fld(1);
2706  // FPU Stack: input, 2*pi, input.
2707  {
2708  Label no_exceptions;
2709  __ fwait();
2710  __ fnstsw_ax();
2711  // Clear if Illegal Operand or Zero Division exceptions are set.
2712  __ test(eax, Immediate(5));
2713  __ j(zero, &no_exceptions, Label::kNear);
2714  __ fnclex();
2715  __ bind(&no_exceptions);
2716  }
2717 
2718  // Compute st(0) % st(1)
2719  {
2720  Label partial_remainder_loop;
2721  __ bind(&partial_remainder_loop);
2722  __ fprem1();
2723  __ fwait();
2724  __ fnstsw_ax();
2725  __ test(eax, Immediate(0x400 /* C2 */));
2726  // If C2 is set, computation only has partial result. Loop to
2727  // continue computation.
2728  __ j(not_zero, &partial_remainder_loop);
2729  }
2730  // FPU Stack: input, 2*pi, input % 2*pi
2731  __ fstp(2);
2732  __ fstp(0);
2733  __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
2734 
2735  // FPU Stack: input % 2*pi
2736  __ bind(&in_range);
2737  switch (type) {
2739  __ fsin();
2740  break;
2742  __ fcos();
2743  break;
2745  // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
2746  // FP register stack.
2747  __ fptan();
2748  __ fstp(0); // Pop FP register stack.
2749  break;
2750  default:
2751  UNREACHABLE();
2752  }
2753  __ bind(&done);
2754  } else {
2756  __ fldln2();
2757  __ fxch();
2758  __ fyl2x();
2759  }
2760 }
2761 
2762 
2763 // Input: edx, eax are the left and right objects of a bit op.
2764 // Output: eax, ecx are left and right integers for a bit op.
2765 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2766  bool use_sse3,
2767  Label* conversion_failure) {
2768  // Check float operands.
2769  Label arg1_is_object, check_undefined_arg1;
2770  Label arg2_is_object, check_undefined_arg2;
2771  Label load_arg2, done;
2772 
2773  // Test if arg1 is a Smi.
2774  __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2775 
2776  __ SmiUntag(edx);
2777  __ jmp(&load_arg2);
2778 
2779  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2780  __ bind(&check_undefined_arg1);
2781  Factory* factory = masm->isolate()->factory();
2782  __ cmp(edx, factory->undefined_value());
2783  __ j(not_equal, conversion_failure);
2784  __ mov(edx, Immediate(0));
2785  __ jmp(&load_arg2);
2786 
2787  __ bind(&arg1_is_object);
2789  __ cmp(ebx, factory->heap_number_map());
2790  __ j(not_equal, &check_undefined_arg1);
2791 
2792  // Get the untagged integer version of the edx heap number in ecx.
2793  IntegerConvert(masm, edx, use_sse3, conversion_failure);
2794  __ mov(edx, ecx);
2795 
2796  // Here edx has the untagged integer, eax has a Smi or a heap number.
2797  __ bind(&load_arg2);
2798 
2799  // Test if arg2 is a Smi.
2800  __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2801 
2802  __ SmiUntag(eax);
2803  __ mov(ecx, eax);
2804  __ jmp(&done);
2805 
2806  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2807  __ bind(&check_undefined_arg2);
2808  __ cmp(eax, factory->undefined_value());
2809  __ j(not_equal, conversion_failure);
2810  __ mov(ecx, Immediate(0));
2811  __ jmp(&done);
2812 
2813  __ bind(&arg2_is_object);
2815  __ cmp(ebx, factory->heap_number_map());
2816  __ j(not_equal, &check_undefined_arg2);
2817 
2818  // Get the untagged integer version of the eax heap number in ecx.
2819  IntegerConvert(masm, eax, use_sse3, conversion_failure);
2820  __ bind(&done);
2821  __ mov(eax, edx);
2822 }
2823 
2824 
2825 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2826  bool use_sse3,
2827  Label* not_int32) {
2828  return;
2829 }
2830 
2831 
2832 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2833  Register number) {
2834  Label load_smi, done;
2835 
2836  __ JumpIfSmi(number, &load_smi, Label::kNear);
2837  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2838  __ jmp(&done, Label::kNear);
2839 
2840  __ bind(&load_smi);
2841  __ SmiUntag(number);
2842  __ push(number);
2843  __ fild_s(Operand(esp, 0));
2844  __ pop(number);
2845 
2846  __ bind(&done);
2847 }
2848 
2849 
2850 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
2851  Label load_smi_edx, load_eax, load_smi_eax, done;
2852  // Load operand in edx into xmm0.
2853  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2855 
2856  __ bind(&load_eax);
2857  // Load operand in eax into xmm1.
2858  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2860  __ jmp(&done, Label::kNear);
2861 
2862  __ bind(&load_smi_edx);
2863  __ SmiUntag(edx); // Untag smi before converting to float.
2864  __ cvtsi2sd(xmm0, edx);
2865  __ SmiTag(edx); // Retag smi for heap number overwriting test.
2866  __ jmp(&load_eax);
2867 
2868  __ bind(&load_smi_eax);
2869  __ SmiUntag(eax); // Untag smi before converting to float.
2870  __ cvtsi2sd(xmm1, eax);
2871  __ SmiTag(eax); // Retag smi for heap number overwriting test.
2872 
2873  __ bind(&done);
2874 }
2875 
2876 
2877 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
2878  Label* not_numbers) {
2879  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
2880  // Load operand in edx into xmm0, or branch to not_numbers.
2881  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2882  Factory* factory = masm->isolate()->factory();
2883  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
2884  __ j(not_equal, not_numbers); // Argument in edx is not a number.
2886  __ bind(&load_eax);
2887  // Load operand in eax into xmm1, or branch to not_numbers.
2888  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2889  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2890  __ j(equal, &load_float_eax, Label::kNear);
2891  __ jmp(not_numbers); // Argument in eax is not a number.
2892  __ bind(&load_smi_edx);
2893  __ SmiUntag(edx); // Untag smi before converting to float.
2894  __ cvtsi2sd(xmm0, edx);
2895  __ SmiTag(edx); // Retag smi for heap number overwriting test.
2896  __ jmp(&load_eax);
2897  __ bind(&load_smi_eax);
2898  __ SmiUntag(eax); // Untag smi before converting to float.
2899  __ cvtsi2sd(xmm1, eax);
2900  __ SmiTag(eax); // Retag smi for heap number overwriting test.
2901  __ jmp(&done, Label::kNear);
2902  __ bind(&load_float_eax);
2904  __ bind(&done);
2905 }
2906 
2907 
2908 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2909  Register scratch) {
2910  const Register left = edx;
2911  const Register right = eax;
2912  __ mov(scratch, left);
2913  ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2914  __ SmiUntag(scratch);
2915  __ cvtsi2sd(xmm0, scratch);
2916 
2917  __ mov(scratch, right);
2918  __ SmiUntag(scratch);
2919  __ cvtsi2sd(xmm1, scratch);
2920 }
2921 
2922 
2923 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2924  Label* non_int32,
2925  Register scratch) {
2926  __ cvttsd2si(scratch, Operand(xmm0));
2927  __ cvtsi2sd(xmm2, scratch);
2928  __ ucomisd(xmm0, xmm2);
2929  __ j(not_zero, non_int32);
2930  __ j(carry, non_int32);
2931  __ cvttsd2si(scratch, Operand(xmm1));
2932  __ cvtsi2sd(xmm2, scratch);
2933  __ ucomisd(xmm1, xmm2);
2934  __ j(not_zero, non_int32);
2935  __ j(carry, non_int32);
2936 }
2937 
2938 
2939 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2940  Register scratch,
2941  ArgLocation arg_location) {
2942  Label load_smi_1, load_smi_2, done_load_1, done;
2943  if (arg_location == ARGS_IN_REGISTERS) {
2944  __ mov(scratch, edx);
2945  } else {
2946  __ mov(scratch, Operand(esp, 2 * kPointerSize));
2947  }
2948  __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2949  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2950  __ bind(&done_load_1);
2951 
2952  if (arg_location == ARGS_IN_REGISTERS) {
2953  __ mov(scratch, eax);
2954  } else {
2955  __ mov(scratch, Operand(esp, 1 * kPointerSize));
2956  }
2957  __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2958  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2959  __ jmp(&done, Label::kNear);
2960 
2961  __ bind(&load_smi_1);
2962  __ SmiUntag(scratch);
2963  __ push(scratch);
2964  __ fild_s(Operand(esp, 0));
2965  __ pop(scratch);
2966  __ jmp(&done_load_1);
2967 
2968  __ bind(&load_smi_2);
2969  __ SmiUntag(scratch);
2970  __ push(scratch);
2971  __ fild_s(Operand(esp, 0));
2972  __ pop(scratch);
2973 
2974  __ bind(&done);
2975 }
2976 
2977 
2978 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2979  Register scratch) {
2980  const Register left = edx;
2981  const Register right = eax;
2982  __ mov(scratch, left);
2983  ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2984  __ SmiUntag(scratch);
2985  __ push(scratch);
2986  __ fild_s(Operand(esp, 0));
2987 
2988  __ mov(scratch, right);
2989  __ SmiUntag(scratch);
2990  __ mov(Operand(esp, 0), scratch);
2991  __ fild_s(Operand(esp, 0));
2992  __ pop(scratch);
2993 }
2994 
2995 
2996 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2997  Label* non_float,
2998  Register scratch) {
2999  Label test_other, done;
3000  // Test if both operands are floats or smi -> scratch=k_is_float;
3001  // Otherwise scratch = k_not_float.
3002  __ JumpIfSmi(edx, &test_other, Label::kNear);
3003  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
3004  Factory* factory = masm->isolate()->factory();
3005  __ cmp(scratch, factory->heap_number_map());
3006  __ j(not_equal, non_float); // argument in edx is not a number -> NaN
3007 
3008  __ bind(&test_other);
3009  __ JumpIfSmi(eax, &done, Label::kNear);
3010  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
3011  __ cmp(scratch, factory->heap_number_map());
3012  __ j(not_equal, non_float); // argument in eax is not a number -> NaN
3013 
3014  // Fall-through: Both operands are numbers.
3015  __ bind(&done);
3016 }
3017 
3018 
3019 void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
3020  Label* non_int32) {
3021  return;
3022 }
3023 
3024 
3025 void MathPowStub::Generate(MacroAssembler* masm) {
3026  CpuFeatures::Scope use_sse2(SSE2);
3027  Factory* factory = masm->isolate()->factory();
3028  const Register exponent = eax;
3029  const Register base = edx;
3030  const Register scratch = ecx;
3031  const XMMRegister double_result = xmm3;
3032  const XMMRegister double_base = xmm2;
3033  const XMMRegister double_exponent = xmm1;
3034  const XMMRegister double_scratch = xmm4;
3035 
3036  Label call_runtime, done, exponent_not_smi, int_exponent;
3037 
3038  // Save 1 in double_result - we need this several times later on.
3039  __ mov(scratch, Immediate(1));
3040  __ cvtsi2sd(double_result, scratch);
3041 
3042  if (exponent_type_ == ON_STACK) {
3043  Label base_is_smi, unpack_exponent;
3044  // The exponent and base are supplied as arguments on the stack.
3045  // This can only happen if the stub is called from non-optimized code.
3046  // Load input parameters from stack.
3047  __ mov(base, Operand(esp, 2 * kPointerSize));
3048  __ mov(exponent, Operand(esp, 1 * kPointerSize));
3049 
3050  __ JumpIfSmi(base, &base_is_smi, Label::kNear);
3052  factory->heap_number_map());
3053  __ j(not_equal, &call_runtime);
3054 
3055  __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
3056  __ jmp(&unpack_exponent, Label::kNear);
3057 
3058  __ bind(&base_is_smi);
3059  __ SmiUntag(base);
3060  __ cvtsi2sd(double_base, base);
3061 
3062  __ bind(&unpack_exponent);
3063  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
3064  __ SmiUntag(exponent);
3065  __ jmp(&int_exponent);
3066 
3067  __ bind(&exponent_not_smi);
3068  __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
3069  factory->heap_number_map());
3070  __ j(not_equal, &call_runtime);
3071  __ movdbl(double_exponent,
3073  } else if (exponent_type_ == TAGGED) {
3074  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
3075  __ SmiUntag(exponent);
3076  __ jmp(&int_exponent);
3077 
3078  __ bind(&exponent_not_smi);
3079  __ movdbl(double_exponent,
3081  }
3082 
3083  if (exponent_type_ != INTEGER) {
3084  Label fast_power;
3085  // Detect integer exponents stored as double.
3086  __ cvttsd2si(exponent, Operand(double_exponent));
3087  // Skip to runtime if possibly NaN (indicated by the indefinite integer).
3088  __ cmp(exponent, Immediate(0x80000000u));
3089  __ j(equal, &call_runtime);
3090  __ cvtsi2sd(double_scratch, exponent);
3091  // Already ruled out NaNs for exponent.
3092  __ ucomisd(double_exponent, double_scratch);
3093  __ j(equal, &int_exponent);
3094 
3095  if (exponent_type_ == ON_STACK) {
3096  // Detect square root case. Crankshaft detects constant +/-0.5 at
3097  // compile time and uses DoMathPowHalf instead. We then skip this check
3098  // for non-constant cases of +/-0.5 as these hardly occur.
3099  Label continue_sqrt, continue_rsqrt, not_plus_half;
3100  // Test for 0.5.
3101  // Load double_scratch with 0.5.
3102  __ mov(scratch, Immediate(0x3F000000u));
3103  __ movd(double_scratch, scratch);
3104  __ cvtss2sd(double_scratch, double_scratch);
3105  // Already ruled out NaNs for exponent.
3106  __ ucomisd(double_scratch, double_exponent);
3107  __ j(not_equal, &not_plus_half, Label::kNear);
3108 
3109  // Calculates square root of base. Check for the special case of
3110  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3111  // According to IEEE-754, single-precision -Infinity has the highest
3112  // 9 bits set and the lowest 23 bits cleared.
3113  __ mov(scratch, 0xFF800000u);
3114  __ movd(double_scratch, scratch);
3115  __ cvtss2sd(double_scratch, double_scratch);
3116  __ ucomisd(double_base, double_scratch);
3117  // Comparing -Infinity with NaN results in "unordered", which sets the
3118  // zero flag as if both were equal. However, it also sets the carry flag.
3119  __ j(not_equal, &continue_sqrt, Label::kNear);
3120  __ j(carry, &continue_sqrt, Label::kNear);
3121 
3122  // Set result to Infinity in the special case.
3123  __ xorps(double_result, double_result);
3124  __ subsd(double_result, double_scratch);
3125  __ jmp(&done);
3126 
3127  __ bind(&continue_sqrt);
3128  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
3129  __ xorps(double_scratch, double_scratch);
3130  __ addsd(double_scratch, double_base); // Convert -0 to +0.
3131  __ sqrtsd(double_result, double_scratch);
3132  __ jmp(&done);
3133 
3134  // Test for -0.5.
3135  __ bind(&not_plus_half);
3136  // Load double_exponent with -0.5 by substracting 1.
3137  __ subsd(double_scratch, double_result);
3138  // Already ruled out NaNs for exponent.
3139  __ ucomisd(double_scratch, double_exponent);
3140  __ j(not_equal, &fast_power, Label::kNear);
3141 
3142  // Calculates reciprocal of square root of base. Check for the special
3143  // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3144  // According to IEEE-754, single-precision -Infinity has the highest
3145  // 9 bits set and the lowest 23 bits cleared.
3146  __ mov(scratch, 0xFF800000u);
3147  __ movd(double_scratch, scratch);
3148  __ cvtss2sd(double_scratch, double_scratch);
3149  __ ucomisd(double_base, double_scratch);
3150  // Comparing -Infinity with NaN results in "unordered", which sets the
3151  // zero flag as if both were equal. However, it also sets the carry flag.
3152  __ j(not_equal, &continue_rsqrt, Label::kNear);
3153  __ j(carry, &continue_rsqrt, Label::kNear);
3154 
3155  // Set result to 0 in the special case.
3156  __ xorps(double_result, double_result);
3157  __ jmp(&done);
3158 
3159  __ bind(&continue_rsqrt);
3160  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
3161  __ xorps(double_exponent, double_exponent);
3162  __ addsd(double_exponent, double_base); // Convert -0 to +0.
3163  __ sqrtsd(double_exponent, double_exponent);
3164  __ divsd(double_result, double_exponent);
3165  __ jmp(&done);
3166  }
3167 
3168  // Using FPU instructions to calculate power.
3169  Label fast_power_failed;
3170  __ bind(&fast_power);
3171  __ fnclex(); // Clear flags to catch exceptions later.
3172  // Transfer (B)ase and (E)xponent onto the FPU register stack.
3173  __ sub(esp, Immediate(kDoubleSize));
3174  __ movdbl(Operand(esp, 0), double_exponent);
3175  __ fld_d(Operand(esp, 0)); // E
3176  __ movdbl(Operand(esp, 0), double_base);
3177  __ fld_d(Operand(esp, 0)); // B, E
3178 
3179  // Exponent is in st(1) and base is in st(0)
3180  // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
3181  // FYL2X calculates st(1) * log2(st(0))
3182  __ fyl2x(); // X
3183  __ fld(0); // X, X
3184  __ frndint(); // rnd(X), X
3185  __ fsub(1); // rnd(X), X-rnd(X)
3186  __ fxch(1); // X - rnd(X), rnd(X)
3187  // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
3188  __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
3189  __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
3190  __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
3191  // FSCALE calculates st(0) * 2^st(1)
3192  __ fscale(); // 2^X, rnd(X)
3193  __ fstp(1);
3194  // Bail out to runtime in case of exceptions in the status word.
3195  __ fnstsw_ax();
3196  __ test_b(eax, 0x5F); // We check for all but precision exception.
3197  __ j(not_zero, &fast_power_failed, Label::kNear);
3198  __ fstp_d(Operand(esp, 0));
3199  __ movdbl(double_result, Operand(esp, 0));
3200  __ add(esp, Immediate(kDoubleSize));
3201  __ jmp(&done);
3202 
3203  __ bind(&fast_power_failed);
3204  __ fninit();
3205  __ add(esp, Immediate(kDoubleSize));
3206  __ jmp(&call_runtime);
3207  }
3208 
3209  // Calculate power with integer exponent.
3210  __ bind(&int_exponent);
3211  const XMMRegister double_scratch2 = double_exponent;
3212  __ mov(scratch, exponent); // Back up exponent.
3213  __ movsd(double_scratch, double_base); // Back up base.
3214  __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
3215 
3216  // Get absolute value of exponent.
3217  Label no_neg, while_true, while_false;
3218  __ test(scratch, scratch);
3219  __ j(positive, &no_neg, Label::kNear);
3220  __ neg(scratch);
3221  __ bind(&no_neg);
3222 
3223  __ j(zero, &while_false, Label::kNear);
3224  __ shr(scratch, 1);
3225  // Above condition means CF==0 && ZF==0. This means that the
3226  // bit that has been shifted out is 0 and the result is not 0.
3227  __ j(above, &while_true, Label::kNear);
3228  __ movsd(double_result, double_scratch);
3229  __ j(zero, &while_false, Label::kNear);
3230 
3231  __ bind(&while_true);
3232  __ shr(scratch, 1);
3233  __ mulsd(double_scratch, double_scratch);
3234  __ j(above, &while_true, Label::kNear);
3235  __ mulsd(double_result, double_scratch);
3236  __ j(not_zero, &while_true);
3237 
3238  __ bind(&while_false);
3239  // scratch has the original value of the exponent - if the exponent is
3240  // negative, return 1/result.
3241  __ test(exponent, exponent);
3242  __ j(positive, &done);
3243  __ divsd(double_scratch2, double_result);
3244  __ movsd(double_result, double_scratch2);
3245  // Test whether result is zero. Bail out to check for subnormal result.
3246  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3247  __ xorps(double_scratch2, double_scratch2);
3248  __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
3249  // double_exponent aliased as double_scratch2 has already been overwritten
3250  // and may not have contained the exponent value in the first place when the
3251  // exponent is a smi. We reset it with exponent value before bailing out.
3252  __ j(not_equal, &done);
3253  __ cvtsi2sd(double_exponent, exponent);
3254 
3255  // Returning or bailing out.
3256  Counters* counters = masm->isolate()->counters();
3257  if (exponent_type_ == ON_STACK) {
3258  // The arguments are still on the stack.
3259  __ bind(&call_runtime);
3260  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3261 
3262  // The stub is called from non-optimized code, which expects the result
3263  // as heap number in exponent.
3264  __ bind(&done);
3265  __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
3266  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
3267  __ IncrementCounter(counters->math_pow(), 1);
3268  __ ret(2 * kPointerSize);
3269  } else {
3270  __ bind(&call_runtime);
3271  {
3272  AllowExternalCallThatCantCauseGC scope(masm);
3273  __ PrepareCallCFunction(4, scratch);
3274  __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
3275  __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
3276  __ CallCFunction(
3277  ExternalReference::power_double_double_function(masm->isolate()), 4);
3278  }
3279  // Return value is in st(0) on ia32.
3280  // Store it into the (fixed) result register.
3281  __ sub(esp, Immediate(kDoubleSize));
3282  __ fstp_d(Operand(esp, 0));
3283  __ movdbl(double_result, Operand(esp, 0));
3284  __ add(esp, Immediate(kDoubleSize));
3285 
3286  __ bind(&done);
3287  __ IncrementCounter(counters->math_pow(), 1);
3288  __ ret(0);
3289  }
3290 }
3291 
3292 
3293 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3294  // The key is in edx and the parameter count is in eax.
3295 
3296  // The displacement is used for skipping the frame pointer on the
3297  // stack. It is the offset of the last parameter (if any) relative
3298  // to the frame pointer.
3299  static const int kDisplacement = 1 * kPointerSize;
3300 
3301  // Check that the key is a smi.
3302  Label slow;
3303  __ JumpIfNotSmi(edx, &slow, Label::kNear);
3304 
3305  // Check if the calling frame is an arguments adaptor frame.
3306  Label adaptor;
3310  __ j(equal, &adaptor, Label::kNear);
3311 
3312  // Check index against formal parameters count limit passed in
3313  // through register eax. Use unsigned comparison to get negative
3314  // check for free.
3315  __ cmp(edx, eax);
3316  __ j(above_equal, &slow, Label::kNear);
3317 
3318  // Read the argument from the stack and return it.
3319  STATIC_ASSERT(kSmiTagSize == 1);
3320  STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3321  __ lea(ebx, Operand(ebp, eax, times_2, 0));
3322  __ neg(edx);
3323  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3324  __ ret(0);
3325 
3326  // Arguments adaptor case: Check index against actual arguments
3327  // limit found in the arguments adaptor frame. Use unsigned
3328  // comparison to get negative check for free.
3329  __ bind(&adaptor);
3331  __ cmp(edx, ecx);
3332  __ j(above_equal, &slow, Label::kNear);
3333 
3334  // Read the argument from the stack and return it.
3335  STATIC_ASSERT(kSmiTagSize == 1);
3336  STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3337  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
3338  __ neg(edx);
3339  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3340  __ ret(0);
3341 
3342  // Slow-case: Handle non-smi or out-of-bounds access to arguments
3343  // by calling the runtime system.
3344  __ bind(&slow);
3345  __ pop(ebx); // Return address.
3346  __ push(edx);
3347  __ push(ebx);
3348  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
3349 }
3350 
3351 
3352 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
3353  // esp[0] : return address
3354  // esp[4] : number of parameters
3355  // esp[8] : receiver displacement
3356  // esp[12] : function
3357 
3358  // Check if the calling frame is an arguments adaptor frame.
3359  Label runtime;
3363  __ j(not_equal, &runtime, Label::kNear);
3364 
3365  // Patch the arguments.length and the parameters pointer.
3367  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3368  __ lea(edx, Operand(edx, ecx, times_2,
3370  __ mov(Operand(esp, 2 * kPointerSize), edx);
3371 
3372  __ bind(&runtime);
3373  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3374 }
3375 
3376 
3377 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
3378  // esp[0] : return address
3379  // esp[4] : number of parameters (tagged)
3380  // esp[8] : receiver displacement
3381  // esp[12] : function
3382 
3383  // ebx = parameter count (tagged)
3384  __ mov(ebx, Operand(esp, 1 * kPointerSize));
3385 
3386  // Check if the calling frame is an arguments adaptor frame.
3387  // TODO(rossberg): Factor out some of the bits that are shared with the other
3388  // Generate* functions.
3389  Label runtime;
3390  Label adaptor_frame, try_allocate;
3394  __ j(equal, &adaptor_frame, Label::kNear);
3395 
3396  // No adaptor, parameter count = argument count.
3397  __ mov(ecx, ebx);
3398  __ jmp(&try_allocate, Label::kNear);
3399 
3400  // We have an adaptor frame. Patch the parameters pointer.
3401  __ bind(&adaptor_frame);
3403  __ lea(edx, Operand(edx, ecx, times_2,
3405  __ mov(Operand(esp, 2 * kPointerSize), edx);
3406 
3407  // ebx = parameter count (tagged)
3408  // ecx = argument count (tagged)
3409  // esp[4] = parameter count (tagged)
3410  // esp[8] = address of receiver argument
3411  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
3412  __ cmp(ebx, ecx);
3413  __ j(less_equal, &try_allocate, Label::kNear);
3414  __ mov(ebx, ecx);
3415 
3416  __ bind(&try_allocate);
3417 
3418  // Save mapped parameter count.
3419  __ push(ebx);
3420 
3421  // Compute the sizes of backing store, parameter map, and arguments object.
3422  // 1. Parameter map, has 2 extra words containing context and backing store.
3423  const int kParameterMapHeaderSize =
3425  Label no_parameter_map;
3426  __ test(ebx, ebx);
3427  __ j(zero, &no_parameter_map, Label::kNear);
3428  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
3429  __ bind(&no_parameter_map);
3430 
3431  // 2. Backing store.
3432  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
3433 
3434  // 3. Arguments object.
3435  __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
3436 
3437  // Do the allocation of all three objects in one go.
3438  __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
3439 
3440  // eax = address of new object(s) (tagged)
3441  // ecx = argument count (tagged)
3442  // esp[0] = mapped parameter count (tagged)
3443  // esp[8] = parameter count (tagged)
3444  // esp[12] = address of receiver argument
3445  // Get the arguments boilerplate from the current native context into edi.
3446  Label has_mapped_parameters, copy;
3449  __ mov(ebx, Operand(esp, 0 * kPointerSize));
3450  __ test(ebx, ebx);
3451  __ j(not_zero, &has_mapped_parameters, Label::kNear);
3452  __ mov(edi, Operand(edi,
3454  __ jmp(&copy, Label::kNear);
3455 
3456  __ bind(&has_mapped_parameters);
3457  __ mov(edi, Operand(edi,
3459  __ bind(&copy);
3460 
3461  // eax = address of new object (tagged)
3462  // ebx = mapped parameter count (tagged)
3463  // ecx = argument count (tagged)
3464  // edi = address of boilerplate object (tagged)
3465  // esp[0] = mapped parameter count (tagged)
3466  // esp[8] = parameter count (tagged)
3467  // esp[12] = address of receiver argument
3468  // Copy the JS object part.
3469  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3470  __ mov(edx, FieldOperand(edi, i));
3471  __ mov(FieldOperand(eax, i), edx);
3472  }
3473 
3474  // Set up the callee in-object property.
3476  __ mov(edx, Operand(esp, 4 * kPointerSize));
3477  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3478  Heap::kArgumentsCalleeIndex * kPointerSize),
3479  edx);
3480 
3481  // Use the length (smi tagged) and set that as an in-object property too.
3483  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3484  Heap::kArgumentsLengthIndex * kPointerSize),
3485  ecx);
3486 
3487  // Set up the elements pointer in the allocated arguments object.
3488  // If we allocated a parameter map, edi will point there, otherwise to the
3489  // backing store.
3490  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3492 
3493  // eax = address of new object (tagged)
3494  // ebx = mapped parameter count (tagged)
3495  // ecx = argument count (tagged)
3496  // edi = address of parameter map or backing store (tagged)
3497  // esp[0] = mapped parameter count (tagged)
3498  // esp[8] = parameter count (tagged)
3499  // esp[12] = address of receiver argument
3500  // Free a register.
3501  __ push(eax);
3502 
3503  // Initialize parameter map. If there are no mapped arguments, we're done.
3504  Label skip_parameter_map;
3505  __ test(ebx, ebx);
3506  __ j(zero, &skip_parameter_map);
3507 
3509  Immediate(FACTORY->non_strict_arguments_elements_map()));
3510  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
3512  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
3513  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
3514  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
3515 
3516  // Copy the parameter slots and the holes in the arguments.
3517  // We need to fill in mapped_parameter_count slots. They index the context,
3518  // where parameters are stored in reverse order, at
3519  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3520  // The mapped parameter thus need to get indices
3521  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
3522  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3523  // We loop from right to left.
3524  Label parameters_loop, parameters_test;
3525  __ push(ecx);
3526  __ mov(eax, Operand(esp, 2 * kPointerSize));
3527  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3528  __ add(ebx, Operand(esp, 4 * kPointerSize));
3529  __ sub(ebx, eax);
3530  __ mov(ecx, FACTORY->the_hole_value());
3531  __ mov(edx, edi);
3532  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
3533  // eax = loop variable (tagged)
3534  // ebx = mapping index (tagged)
3535  // ecx = the hole value
3536  // edx = address of parameter map (tagged)
3537  // edi = address of backing store (tagged)
3538  // esp[0] = argument count (tagged)
3539  // esp[4] = address of new object (tagged)
3540  // esp[8] = mapped parameter count (tagged)
3541  // esp[16] = parameter count (tagged)
3542  // esp[20] = address of receiver argument
3543  __ jmp(&parameters_test, Label::kNear);
3544 
3545  __ bind(&parameters_loop);
3546  __ sub(eax, Immediate(Smi::FromInt(1)));
3547  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
3549  __ add(ebx, Immediate(Smi::FromInt(1)));
3550  __ bind(&parameters_test);
3551  __ test(eax, eax);
3552  __ j(not_zero, &parameters_loop, Label::kNear);
3553  __ pop(ecx);
3554 
3555  __ bind(&skip_parameter_map);
3556 
3557  // ecx = argument count (tagged)
3558  // edi = address of backing store (tagged)
3559  // esp[0] = address of new object (tagged)
3560  // esp[4] = mapped parameter count (tagged)
3561  // esp[12] = parameter count (tagged)
3562  // esp[16] = address of receiver argument
3563  // Copy arguments header and remaining slots (if there are any).
3565  Immediate(FACTORY->fixed_array_map()));
3567 
3568  Label arguments_loop, arguments_test;
3569  __ mov(ebx, Operand(esp, 1 * kPointerSize));
3570  __ mov(edx, Operand(esp, 4 * kPointerSize));
3571  __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
3572  __ sub(edx, ebx);
3573  __ jmp(&arguments_test, Label::kNear);
3574 
3575  __ bind(&arguments_loop);
3576  __ sub(edx, Immediate(kPointerSize));
3577  __ mov(eax, Operand(edx, 0));
3579  __ add(ebx, Immediate(Smi::FromInt(1)));
3580 
3581  __ bind(&arguments_test);
3582  __ cmp(ebx, ecx);
3583  __ j(less, &arguments_loop, Label::kNear);
3584 
3585  // Restore.
3586  __ pop(eax); // Address of arguments object.
3587  __ pop(ebx); // Parameter count.
3588 
3589  // Return and remove the on-stack parameters.
3590  __ ret(3 * kPointerSize);
3591 
3592  // Do the runtime call to allocate the arguments object.
3593  __ bind(&runtime);
3594  __ pop(eax); // Remove saved parameter count.
3595  __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
3596  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3597 }
3598 
3599 
3600 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3601  // esp[0] : return address
3602  // esp[4] : number of parameters
3603  // esp[8] : receiver displacement
3604  // esp[12] : function
3605 
3606  // Check if the calling frame is an arguments adaptor frame.
3607  Label adaptor_frame, try_allocate, runtime;
3611  __ j(equal, &adaptor_frame, Label::kNear);
3612 
3613  // Get the length from the frame.
3614  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3615  __ jmp(&try_allocate, Label::kNear);
3616 
3617  // Patch the arguments.length and the parameters pointer.
3618  __ bind(&adaptor_frame);
3620  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3621  __ lea(edx, Operand(edx, ecx, times_2,
3623  __ mov(Operand(esp, 2 * kPointerSize), edx);
3624 
3625  // Try the new space allocation. Start out with computing the size of
3626  // the arguments object and the elements array.
3627  Label add_arguments_object;
3628  __ bind(&try_allocate);
3629  __ test(ecx, ecx);
3630  __ j(zero, &add_arguments_object, Label::kNear);
3631  __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3632  __ bind(&add_arguments_object);
3633  __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
3634 
3635  // Do the allocation of both objects in one go.
3636  __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3637 
3638  // Get the arguments boilerplate from the current native context.
3641  const int offset =
3643  __ mov(edi, Operand(edi, offset));
3644 
3645  // Copy the JS object part.
3646  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3647  __ mov(ebx, FieldOperand(edi, i));
3648  __ mov(FieldOperand(eax, i), ebx);
3649  }
3650 
3651  // Get the length (smi tagged) and set that as an in-object property too.
3653  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3654  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3655  Heap::kArgumentsLengthIndex * kPointerSize),
3656  ecx);
3657 
3658  // If there are no actual arguments, we're done.
3659  Label done;
3660  __ test(ecx, ecx);
3661  __ j(zero, &done, Label::kNear);
3662 
3663  // Get the parameters pointer from the stack.
3664  __ mov(edx, Operand(esp, 2 * kPointerSize));
3665 
3666  // Set up the elements pointer in the allocated arguments object and
3667  // initialize the header in the elements fixed array.
3668  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
3671  Immediate(FACTORY->fixed_array_map()));
3672 
3674  // Untag the length for the loop below.
3675  __ SmiUntag(ecx);
3676 
3677  // Copy the fixed array slots.
3678  Label loop;
3679  __ bind(&loop);
3680  __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
3682  __ add(edi, Immediate(kPointerSize));
3683  __ sub(edx, Immediate(kPointerSize));
3684  __ dec(ecx);
3685  __ j(not_zero, &loop);
3686 
3687  // Return and remove the on-stack parameters.
3688  __ bind(&done);
3689  __ ret(3 * kPointerSize);
3690 
3691  // Do the runtime call to allocate the arguments object.
3692  __ bind(&runtime);
3693  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3694 }
3695 
3696 
3697 void RegExpExecStub::Generate(MacroAssembler* masm) {
3698  // Just jump directly to runtime if native RegExp is not selected at compile
3699  // time or if regexp entry in generated code is turned off runtime switch or
3700  // at compilation.
3701 #ifdef V8_INTERPRETED_REGEXP
3702  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3703 #else // V8_INTERPRETED_REGEXP
3704 
3705  // Stack frame on entry.
3706  // esp[0]: return address
3707  // esp[4]: last_match_info (expected JSArray)
3708  // esp[8]: previous index
3709  // esp[12]: subject string
3710  // esp[16]: JSRegExp object
3711 
3712  static const int kLastMatchInfoOffset = 1 * kPointerSize;
3713  static const int kPreviousIndexOffset = 2 * kPointerSize;
3714  static const int kSubjectOffset = 3 * kPointerSize;
3715  static const int kJSRegExpOffset = 4 * kPointerSize;
3716 
3717  Label runtime, invoke_regexp;
3718 
3719  // Ensure that a RegExp stack is allocated.
3720  ExternalReference address_of_regexp_stack_memory_address =
3721  ExternalReference::address_of_regexp_stack_memory_address(
3722  masm->isolate());
3723  ExternalReference address_of_regexp_stack_memory_size =
3724  ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
3725  __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3726  __ test(ebx, ebx);
3727  __ j(zero, &runtime);
3728 
3729  // Check that the first argument is a JSRegExp object.
3730  __ mov(eax, Operand(esp, kJSRegExpOffset));
3731  STATIC_ASSERT(kSmiTag == 0);
3732  __ JumpIfSmi(eax, &runtime);
3733  __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3734  __ j(not_equal, &runtime);
3735  // Check that the RegExp has been compiled (data contains a fixed array).
3737  if (FLAG_debug_code) {
3738  __ test(ecx, Immediate(kSmiTagMask));
3739  __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3740  __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3741  __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3742  }
3743 
3744  // ecx: RegExp data (FixedArray)
3745  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3747  __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3748  __ j(not_equal, &runtime);
3749 
3750  // ecx: RegExp data (FixedArray)
3751  // Check that the number of captures fit in the static offsets vector buffer.
3753  // Calculate number of capture registers (number_of_captures + 1) * 2. This
3754  // uses the asumption that smis are 2 * their untagged value.
3755  STATIC_ASSERT(kSmiTag == 0);
3757  __ add(edx, Immediate(2)); // edx was a smi.
3758  // Check that the static offsets vector buffer is large enough.
3760  __ j(above, &runtime);
3761 
3762  // ecx: RegExp data (FixedArray)
3763  // edx: Number of capture registers
3764  // Check that the second argument is a string.
3765  __ mov(eax, Operand(esp, kSubjectOffset));
3766  __ JumpIfSmi(eax, &runtime);
3767  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3768  __ j(NegateCondition(is_string), &runtime);
3769  // Get the length of the string to ebx.
3771 
3772  // ebx: Length of subject string as a smi
3773  // ecx: RegExp data (FixedArray)
3774  // edx: Number of capture registers
3775  // Check that the third argument is a positive smi less than the subject
3776  // string length. A negative value will be greater (unsigned comparison).
3777  __ mov(eax, Operand(esp, kPreviousIndexOffset));
3778  __ JumpIfNotSmi(eax, &runtime);
3779  __ cmp(eax, ebx);
3780  __ j(above_equal, &runtime);
3781 
3782  // ecx: RegExp data (FixedArray)
3783  // edx: Number of capture registers
3784  // Check that the fourth object is a JSArray object.
3785  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3786  __ JumpIfSmi(eax, &runtime);
3787  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3788  __ j(not_equal, &runtime);
3789  // Check that the JSArray is in fast case.
3792  Factory* factory = masm->isolate()->factory();
3793  __ cmp(eax, factory->fixed_array_map());
3794  __ j(not_equal, &runtime);
3795  // Check that the last match info has space for the capture registers and the
3796  // additional information.
3798  __ SmiUntag(eax);
3799  __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
3800  __ cmp(edx, eax);
3801  __ j(greater, &runtime);
3802 
3803  // Reset offset for possibly sliced string.
3804  __ Set(edi, Immediate(0));
3805  // ecx: RegExp data (FixedArray)
3806  // Check the representation and encoding of the subject string.
3807  Label seq_ascii_string, seq_two_byte_string, check_code;
3808  __ mov(eax, Operand(esp, kSubjectOffset));
3811  // First check for flat two byte string.
3812  __ and_(ebx, kIsNotStringMask |
3817  __ j(zero, &seq_two_byte_string, Label::kNear);
3818  // Any other flat string must be a flat ASCII string. None of the following
3819  // string type tests will succeed if subject is not a string or a short
3820  // external string.
3821  __ and_(ebx, Immediate(kIsNotStringMask |
3824  __ j(zero, &seq_ascii_string, Label::kNear);
3825 
3826  // ebx: whether subject is a string and if yes, its string representation
3827  // Check for flat cons string or sliced string.
3828  // A flat cons string is a cons string where the second part is the empty
3829  // string. In that case the subject string is just the first part of the cons
3830  // string. Also in this case the first part of the cons string is known to be
3831  // a sequential string or an external string.
3832  // In the case of a sliced string its offset has to be taken into account.
3833  Label cons_string, external_string, check_encoding;
3838  __ cmp(ebx, Immediate(kExternalStringTag));
3839  __ j(less, &cons_string);
3840  __ j(equal, &external_string);
3841 
3842  // Catch non-string subject or short external string.
3844  __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
3845  __ j(not_zero, &runtime);
3846 
3847  // String is sliced.
3850  // edi: offset of sliced string, smi-tagged.
3851  // eax: parent string.
3852  __ jmp(&check_encoding, Label::kNear);
3853  // String is a cons string, check whether it is flat.
3854  __ bind(&cons_string);
3855  __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
3856  __ j(not_equal, &runtime);
3858  __ bind(&check_encoding);
3860  // eax: first part of cons string or parent of sliced string.
3861  // ebx: map of first part of cons string or map of parent of sliced string.
3862  // Is first part of cons or parent of slice a flat two byte string?
3866  __ j(zero, &seq_two_byte_string, Label::kNear);
3867  // Any other flat string must be sequential ASCII or external.
3870  __ j(not_zero, &external_string);
3871 
3872  __ bind(&seq_ascii_string);
3873  // eax: subject string (flat ASCII)
3874  // ecx: RegExp data (FixedArray)
3876  __ Set(ecx, Immediate(1)); // Type is ASCII.
3877  __ jmp(&check_code, Label::kNear);
3878 
3879  __ bind(&seq_two_byte_string);
3880  // eax: subject string (flat two byte)
3881  // ecx: RegExp data (FixedArray)
3883  __ Set(ecx, Immediate(0)); // Type is two byte.
3884 
3885  __ bind(&check_code);
3886  // Check that the irregexp code has been generated for the actual string
3887  // encoding. If it has, the field contains a code object otherwise it contains
3888  // a smi (code flushing support).
3889  __ JumpIfSmi(edx, &runtime);
3890 
3891  // eax: subject string
3892  // edx: code
3893  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
3894  // Load used arguments before starting to push arguments for call to native
3895  // RegExp code to avoid handling changing stack height.
3896  __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3897  __ SmiUntag(ebx); // Previous index from smi.
3898 
3899  // eax: subject string
3900  // ebx: previous index
3901  // edx: code
3902  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3903  // All checks done. Now push arguments for native regexp code.
3904  Counters* counters = masm->isolate()->counters();
3905  __ IncrementCounter(counters->regexp_entry_native(), 1);
3906 
3907  // Isolates: note we add an additional parameter here (isolate pointer).
3908  static const int kRegExpExecuteArguments = 9;
3909  __ EnterApiExitFrame(kRegExpExecuteArguments);
3910 
3911  // Argument 9: Pass current isolate address.
3912  __ mov(Operand(esp, 8 * kPointerSize),
3913  Immediate(ExternalReference::isolate_address()));
3914 
3915  // Argument 8: Indicate that this is a direct call from JavaScript.
3916  __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
3917 
3918  // Argument 7: Start (high end) of backtracking stack memory area.
3919  __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3920  __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3921  __ mov(Operand(esp, 6 * kPointerSize), esi);
3922 
3923  // Argument 6: Set the number of capture registers to zero to force global
3924  // regexps to behave as non-global. This does not affect non-global regexps.
3925  __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
3926 
3927  // Argument 5: static offsets vector buffer.
3928  __ mov(Operand(esp, 4 * kPointerSize),
3929  Immediate(ExternalReference::address_of_static_offsets_vector(
3930  masm->isolate())));
3931 
3932  // Argument 2: Previous index.
3933  __ mov(Operand(esp, 1 * kPointerSize), ebx);
3934 
3935  // Argument 1: Original subject string.
3936  // The original subject is in the previous stack frame. Therefore we have to
3937  // use ebp, which points exactly to one pointer size below the previous esp.
3938  // (Because creating a new stack frame pushes the previous ebp onto the stack
3939  // and thereby moves up esp by one kPointerSize.)
3940  __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
3941  __ mov(Operand(esp, 0 * kPointerSize), esi);
3942 
3943  // esi: original subject string
3944  // eax: underlying subject string
3945  // ebx: previous index
3946  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3947  // edx: code
3948  // Argument 4: End of string data
3949  // Argument 3: Start of string data
3950  // Prepare start and end index of the input.
3951  // Load the length from the original sliced string if that is the case.
3953  __ add(esi, edi); // Calculate input end wrt offset.
3954  __ SmiUntag(edi);
3955  __ add(ebx, edi); // Calculate input start wrt offset.
3956 
3957  // ebx: start index of the input string
3958  // esi: end index of the input string
3959  Label setup_two_byte, setup_rest;
3960  __ test(ecx, ecx);
3961  __ j(zero, &setup_two_byte, Label::kNear);
3962  __ SmiUntag(esi);
3964  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3966  __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3967  __ jmp(&setup_rest, Label::kNear);
3968 
3969  __ bind(&setup_two_byte);
3970  STATIC_ASSERT(kSmiTag == 0);
3971  STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
3973  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3975  __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3976 
3977  __ bind(&setup_rest);
3978 
3979  // Locate the code entry and call it.
3980  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3981  __ call(edx);
3982 
3983  // Drop arguments and come back to JS mode.
3984  __ LeaveApiExitFrame();
3985 
3986  // Check the result.
3987  Label success;
3988  __ cmp(eax, 1);
3989  // We expect exactly one result since we force the called regexp to behave
3990  // as non-global.
3991  __ j(equal, &success);
3992  Label failure;
3994  __ j(equal, &failure);
3996  // If not exception it can only be retry. Handle that in the runtime system.
3997  __ j(not_equal, &runtime);
3998  // Result must now be exception. If there is no pending exception already a
3999  // stack overflow (on the backtrack stack) was detected in RegExp code but
4000  // haven't created the exception yet. Handle that in the runtime system.
4001  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4002  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4003  masm->isolate());
4004  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4005  __ mov(eax, Operand::StaticVariable(pending_exception));
4006  __ cmp(edx, eax);
4007  __ j(equal, &runtime);
4008  // For exception, throw the exception again.
4009 
4010  // Clear the pending exception variable.
4011  __ mov(Operand::StaticVariable(pending_exception), edx);
4012 
4013  // Special handling of termination exceptions which are uncatchable
4014  // by javascript code.
4015  __ cmp(eax, factory->termination_exception());
4016  Label throw_termination_exception;
4017  __ j(equal, &throw_termination_exception, Label::kNear);
4018 
4019  // Handle normal exception by following handler chain.
4020  __ Throw(eax);
4021 
4022  __ bind(&throw_termination_exception);
4023  __ ThrowUncatchable(eax);
4024 
4025  __ bind(&failure);
4026  // For failure to match, return null.
4027  __ mov(eax, factory->null_value());
4028  __ ret(4 * kPointerSize);
4029 
4030  // Load RegExp data.
4031  __ bind(&success);
4032  __ mov(eax, Operand(esp, kJSRegExpOffset));
4035  // Calculate number of capture registers (number_of_captures + 1) * 2.
4036  STATIC_ASSERT(kSmiTag == 0);
4038  __ add(edx, Immediate(2)); // edx was a smi.
4039 
4040  // edx: Number of capture registers
4041  // Load last_match_info which is still known to be a fast case JSArray.
4042  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4044 
4045  // ebx: last_match_info backing store (FixedArray)
4046  // edx: number of capture registers
4047  // Store the capture count.
4048  __ SmiTag(edx); // Number of capture registers to smi.
4050  __ SmiUntag(edx); // Number of capture registers back from smi.
4051  // Store last subject and last input.
4052  __ mov(eax, Operand(esp, kSubjectOffset));
4054  __ RecordWriteField(ebx,
4056  eax,
4057  edi,
4058  kDontSaveFPRegs);
4059  __ mov(eax, Operand(esp, kSubjectOffset));
4061  __ RecordWriteField(ebx,
4063  eax,
4064  edi,
4065  kDontSaveFPRegs);
4066 
4067  // Get the static offsets vector filled by the native regexp code.
4068  ExternalReference address_of_static_offsets_vector =
4069  ExternalReference::address_of_static_offsets_vector(masm->isolate());
4070  __ mov(ecx, Immediate(address_of_static_offsets_vector));
4071 
4072  // ebx: last_match_info backing store (FixedArray)
4073  // ecx: offsets vector
4074  // edx: number of capture registers
4075  Label next_capture, done;
4076  // Capture register counter starts from number of capture registers and
4077  // counts down until wraping after zero.
4078  __ bind(&next_capture);
4079  __ sub(edx, Immediate(1));
4080  __ j(negative, &done, Label::kNear);
4081  // Read the value from the static offsets vector buffer.
4082  __ mov(edi, Operand(ecx, edx, times_int_size, 0));
4083  __ SmiTag(edi);
4084  // Store the smi value in the last match info.
4085  __ mov(FieldOperand(ebx,
4086  edx,
4089  edi);
4090  __ jmp(&next_capture);
4091  __ bind(&done);
4092 
4093  // Return last match info.
4094  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4095  __ ret(4 * kPointerSize);
4096 
4097  // External string. Short external strings have already been ruled out.
4098  // eax: subject string (expected to be external)
4099  // ebx: scratch
4100  __ bind(&external_string);
4103  if (FLAG_debug_code) {
4104  // Assert that we do not have a cons or slice (indirect strings) here.
4105  // Sequential strings have already been ruled out.
4106  __ test_b(ebx, kIsIndirectStringMask);
4107  __ Assert(zero, "external string expected, but not found");
4108  }
4110  // Move the pointer so that offset-wise, it looks like a sequential string.
4114  __ test_b(ebx, kStringEncodingMask);
4115  __ j(not_zero, &seq_ascii_string);
4116  __ jmp(&seq_two_byte_string);
4117 
4118  // Do the runtime call to execute the regexp.
4119  __ bind(&runtime);
4120  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4121 #endif // V8_INTERPRETED_REGEXP
4122 }
4123 
4124 
4125 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4126  const int kMaxInlineLength = 100;
4127  Label slowcase;
4128  Label done;
4129  __ mov(ebx, Operand(esp, kPointerSize * 3));
4130  __ JumpIfNotSmi(ebx, &slowcase);
4131  __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
4132  __ j(above, &slowcase);
4133  // Smi-tagging is equivalent to multiplying by 2.
4134  STATIC_ASSERT(kSmiTag == 0);
4135  STATIC_ASSERT(kSmiTagSize == 1);
4136  // Allocate RegExpResult followed by FixedArray with size in ebx.
4137  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4138  // Elements: [Map][Length][..elements..]
4139  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4141  ebx, // In: Number of elements (times 2, being a smi)
4142  eax, // Out: Start of allocation (tagged).
4143  ecx, // Out: End of allocation.
4144  edx, // Scratch register
4145  &slowcase,
4146  TAG_OBJECT);
4147  // eax: Start of allocated area, object-tagged.
4148 
4149  // Set JSArray map to global.regexp_result_map().
4150  // Set empty properties FixedArray.
4151  // Set elements to point to FixedArray allocated right after the JSArray.
4152  // Interleave operations for better latency.
4154  Factory* factory = masm->isolate()->factory();
4155  __ mov(ecx, Immediate(factory->empty_fixed_array()));
4156  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4162 
4163  // Set input, index and length fields from arguments.
4164  __ mov(ecx, Operand(esp, kPointerSize * 1));
4166  __ mov(ecx, Operand(esp, kPointerSize * 2));
4168  __ mov(ecx, Operand(esp, kPointerSize * 3));
4170 
4171  // Fill out the elements FixedArray.
4172  // eax: JSArray.
4173  // ebx: FixedArray.
4174  // ecx: Number of elements in array, as smi.
4175 
4176  // Set map.
4178  Immediate(factory->fixed_array_map()));
4179  // Set length.
4181  // Fill contents of fixed-array with undefined.
4182  __ SmiUntag(ecx);
4183  __ mov(edx, Immediate(factory->undefined_value()));
4185  // Fill fixed array elements with undefined.
4186  // eax: JSArray.
4187  // ecx: Number of elements to fill.
4188  // ebx: Start of elements in FixedArray.
4189  // edx: undefined.
4190  Label loop;
4191  __ test(ecx, ecx);
4192  __ bind(&loop);
4193  __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
4194  __ sub(ecx, Immediate(1));
4195  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4196  __ jmp(&loop);
4197 
4198  __ bind(&done);
4199  __ ret(3 * kPointerSize);
4200 
4201  __ bind(&slowcase);
4202  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4203 }
4204 
4205 
4206 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
4207  Register object,
4208  Register result,
4209  Register scratch1,
4210  Register scratch2,
4211  bool object_is_smi,
4212  Label* not_found) {
4213  // Use of registers. Register result is used as a temporary.
4214  Register number_string_cache = result;
4215  Register mask = scratch1;
4216  Register scratch = scratch2;
4217 
4218  // Load the number string cache.
4219  ExternalReference roots_array_start =
4220  ExternalReference::roots_array_start(masm->isolate());
4221  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
4222  __ mov(number_string_cache,
4223  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
4224  // Make the hash mask from the length of the number string cache. It
4225  // contains two elements (number and string) for each cache entry.
4226  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
4227  __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
4228  __ sub(mask, Immediate(1)); // Make mask.
4229 
4230  // Calculate the entry in the number string cache. The hash value in the
4231  // number string cache for smis is just the smi value, and the hash for
4232  // doubles is the xor of the upper and lower words. See
4233  // Heap::GetNumberStringCache.
4234  Label smi_hash_calculated;
4235  Label load_result_from_cache;
4236  if (object_is_smi) {
4237  __ mov(scratch, object);
4238  __ SmiUntag(scratch);
4239  } else {
4240  Label not_smi;
4241  STATIC_ASSERT(kSmiTag == 0);
4242  __ JumpIfNotSmi(object, &not_smi, Label::kNear);
4243  __ mov(scratch, object);
4244  __ SmiUntag(scratch);
4245  __ jmp(&smi_hash_calculated, Label::kNear);
4246  __ bind(&not_smi);
4247  __ cmp(FieldOperand(object, HeapObject::kMapOffset),
4248  masm->isolate()->factory()->heap_number_map());
4249  __ j(not_equal, not_found);
4250  STATIC_ASSERT(8 == kDoubleSize);
4251  __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
4252  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
4253  // Object is heap number and hash is now in scratch. Calculate cache index.
4254  __ and_(scratch, mask);
4255  Register index = scratch;
4256  Register probe = mask;
4257  __ mov(probe,
4258  FieldOperand(number_string_cache,
4259  index,
4262  __ JumpIfSmi(probe, not_found);
4264  CpuFeatures::Scope fscope(SSE2);
4265  __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
4266  __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
4267  __ ucomisd(xmm0, xmm1);
4268  } else {
4269  __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
4270  __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
4271  __ FCmp();
4272  }
4273  __ j(parity_even, not_found); // Bail out if NaN is involved.
4274  __ j(not_equal, not_found); // The cache did not contain this value.
4275  __ jmp(&load_result_from_cache, Label::kNear);
4276  }
4277 
4278  __ bind(&smi_hash_calculated);
4279  // Object is smi and hash is now in scratch. Calculate cache index.
4280  __ and_(scratch, mask);
4281  Register index = scratch;
4282  // Check if the entry is the smi we are looking for.
4283  __ cmp(object,
4284  FieldOperand(number_string_cache,
4285  index,
4288  __ j(not_equal, not_found);
4289 
4290  // Get the result from the cache.
4291  __ bind(&load_result_from_cache);
4292  __ mov(result,
4293  FieldOperand(number_string_cache,
4294  index,
4296  FixedArray::kHeaderSize + kPointerSize));
4297  Counters* counters = masm->isolate()->counters();
4298  __ IncrementCounter(counters->number_to_string_native(), 1);
4299 }
4300 
4301 
4302 void NumberToStringStub::Generate(MacroAssembler* masm) {
4303  Label runtime;
4304 
4305  __ mov(ebx, Operand(esp, kPointerSize));
4306 
4307  // Generate code to lookup number in the number string cache.
4308  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
4309  __ ret(1 * kPointerSize);
4310 
4311  __ bind(&runtime);
4312  // Handle number to string in the runtime system if not found in the cache.
4313  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
4314 }
4315 
4316 
4317 static int NegativeComparisonResult(Condition cc) {
4318  ASSERT(cc != equal);
4319  ASSERT((cc == less) || (cc == less_equal)
4320  || (cc == greater) || (cc == greater_equal));
4321  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4322 }
4323 
4324 void CompareStub::Generate(MacroAssembler* masm) {
4325  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4326 
4327  Label check_unequal_objects;
4328 
4329  // Compare two smis if required.
4330  if (include_smi_compare_) {
4331  Label non_smi, smi_done;
4332  __ mov(ecx, edx);
4333  __ or_(ecx, eax);
4334  __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
4335  __ sub(edx, eax); // Return on the result of the subtraction.
4336  __ j(no_overflow, &smi_done, Label::kNear);
4337  __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
4338  __ bind(&smi_done);
4339  __ mov(eax, edx);
4340  __ ret(0);
4341  __ bind(&non_smi);
4342  } else if (FLAG_debug_code) {
4343  __ mov(ecx, edx);
4344  __ or_(ecx, eax);
4345  __ test(ecx, Immediate(kSmiTagMask));
4346  __ Assert(not_zero, "Unexpected smi operands.");
4347  }
4348 
4349  // NOTICE! This code is only reached after a smi-fast-case check, so
4350  // it is certain that at least one operand isn't a smi.
4351 
4352  // Identical objects can be compared fast, but there are some tricky cases
4353  // for NaN and undefined.
4354  {
4355  Label not_identical;
4356  __ cmp(eax, edx);
4357  __ j(not_equal, &not_identical);
4358 
4359  if (cc_ != equal) {
4360  // Check for undefined. undefined OP undefined is false even though
4361  // undefined == undefined.
4362  Label check_for_nan;
4363  __ cmp(edx, masm->isolate()->factory()->undefined_value());
4364  __ j(not_equal, &check_for_nan, Label::kNear);
4365  __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4366  __ ret(0);
4367  __ bind(&check_for_nan);
4368  }
4369 
4370  // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
4371  // so we do the second best thing - test it ourselves.
4372  // Note: if cc_ != equal, never_nan_nan_ is not used.
4373  if (never_nan_nan_ && (cc_ == equal)) {
4374  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4375  __ ret(0);
4376  } else {
4377  Label heap_number;
4379  Immediate(masm->isolate()->factory()->heap_number_map()));
4380  __ j(equal, &heap_number, Label::kNear);
4381  if (cc_ != equal) {
4382  // Call runtime on identical JSObjects. Otherwise return equal.
4383  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4384  __ j(above_equal, &not_identical);
4385  }
4386  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4387  __ ret(0);
4388 
4389  __ bind(&heap_number);
4390  // It is a heap number, so return non-equal if it's NaN and equal if
4391  // it's not NaN.
4392  // The representation of NaN values has all exponent bits (52..62) set,
4393  // and not all mantissa bits (0..51) clear.
4394  // We only accept QNaNs, which have bit 51 set.
4395  // Read top bits of double representation (second word of value).
4396 
4397  // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4398  // all bits in the mask are set. We only need to check the word
4399  // that contains the exponent and high bit of the mantissa.
4400  STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4402  __ Set(eax, Immediate(0));
4403  // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4404  // bits.
4405  __ add(edx, edx);
4406  __ cmp(edx, kQuietNaNHighBitsMask << 1);
4407  if (cc_ == equal) {
4408  STATIC_ASSERT(EQUAL != 1);
4409  __ setcc(above_equal, eax);
4410  __ ret(0);
4411  } else {
4412  Label nan;
4413  __ j(above_equal, &nan, Label::kNear);
4414  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4415  __ ret(0);
4416  __ bind(&nan);
4417  __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4418  __ ret(0);
4419  }
4420  }
4421 
4422  __ bind(&not_identical);
4423  }
4424 
4425  // Strict equality can quickly decide whether objects are equal.
4426  // Non-strict object equality is slower, so it is handled later in the stub.
4427  if (cc_ == equal && strict_) {
4428  Label slow; // Fallthrough label.
4429  Label not_smis;
4430  // If we're doing a strict equality comparison, we don't have to do
4431  // type conversion, so we generate code to do fast comparison for objects
4432  // and oddballs. Non-smi numbers and strings still go through the usual
4433  // slow-case code.
4434  // If either is a Smi (we know that not both are), then they can only
4435  // be equal if the other is a HeapNumber. If so, use the slow case.
4436  STATIC_ASSERT(kSmiTag == 0);
4437  ASSERT_EQ(0, Smi::FromInt(0));
4438  __ mov(ecx, Immediate(kSmiTagMask));
4439  __ and_(ecx, eax);
4440  __ test(ecx, edx);
4441  __ j(not_zero, &not_smis, Label::kNear);
4442  // One operand is a smi.
4443 
4444  // Check whether the non-smi is a heap number.
4445  STATIC_ASSERT(kSmiTagMask == 1);
4446  // ecx still holds eax & kSmiTag, which is either zero or one.
4447  __ sub(ecx, Immediate(0x01));
4448  __ mov(ebx, edx);
4449  __ xor_(ebx, eax);
4450  __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
4451  __ xor_(ebx, eax);
4452  // if eax was smi, ebx is now edx, else eax.
4453 
4454  // Check if the non-smi operand is a heap number.
4456  Immediate(masm->isolate()->factory()->heap_number_map()));
4457  // If heap number, handle it in the slow case.
4458  __ j(equal, &slow, Label::kNear);
4459  // Return non-equal (ebx is not zero)
4460  __ mov(eax, ebx);
4461  __ ret(0);
4462 
4463  __ bind(&not_smis);
4464  // If either operand is a JSObject or an oddball value, then they are not
4465  // equal since their pointers are different
4466  // There is no test for undetectability in strict equality.
4467 
4468  // Get the type of the first operand.
4469  // If the first object is a JS object, we have done pointer comparison.
4470  Label first_non_object;
4472  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4473  __ j(below, &first_non_object, Label::kNear);
4474 
4475  // Return non-zero (eax is not zero)
4476  Label return_not_equal;
4478  __ bind(&return_not_equal);
4479  __ ret(0);
4480 
4481  __ bind(&first_non_object);
4482  // Check for oddballs: true, false, null, undefined.
4483  __ CmpInstanceType(ecx, ODDBALL_TYPE);
4484  __ j(equal, &return_not_equal);
4485 
4486  __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
4487  __ j(above_equal, &return_not_equal);
4488 
4489  // Check for oddballs: true, false, null, undefined.
4490  __ CmpInstanceType(ecx, ODDBALL_TYPE);
4491  __ j(equal, &return_not_equal);
4492 
4493  // Fall through to the general case.
4494  __ bind(&slow);
4495  }
4496 
4497  // Generate the number comparison code.
4498  if (include_number_compare_) {
4499  Label non_number_comparison;
4500  Label unordered;
4502  CpuFeatures::Scope use_sse2(SSE2);
4503  CpuFeatures::Scope use_cmov(CMOV);
4504 
4505  FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4506  __ ucomisd(xmm0, xmm1);
4507 
4508  // Don't base result on EFLAGS when a NaN is involved.
4509  __ j(parity_even, &unordered, Label::kNear);
4510  // Return a result of -1, 0, or 1, based on EFLAGS.
4511  __ mov(eax, 0); // equal
4512  __ mov(ecx, Immediate(Smi::FromInt(1)));
4513  __ cmov(above, eax, ecx);
4514  __ mov(ecx, Immediate(Smi::FromInt(-1)));
4515  __ cmov(below, eax, ecx);
4516  __ ret(0);
4517  } else {
4518  FloatingPointHelper::CheckFloatOperands(
4519  masm, &non_number_comparison, ebx);
4520  FloatingPointHelper::LoadFloatOperand(masm, eax);
4521  FloatingPointHelper::LoadFloatOperand(masm, edx);
4522  __ FCmp();
4523 
4524  // Don't base result on EFLAGS when a NaN is involved.
4525  __ j(parity_even, &unordered, Label::kNear);
4526 
4527  Label below_label, above_label;
4528  // Return a result of -1, 0, or 1, based on EFLAGS.
4529  __ j(below, &below_label, Label::kNear);
4530  __ j(above, &above_label, Label::kNear);
4531 
4532  __ Set(eax, Immediate(0));
4533  __ ret(0);
4534 
4535  __ bind(&below_label);
4536  __ mov(eax, Immediate(Smi::FromInt(-1)));
4537  __ ret(0);
4538 
4539  __ bind(&above_label);
4540  __ mov(eax, Immediate(Smi::FromInt(1)));
4541  __ ret(0);
4542  }
4543 
4544  // If one of the numbers was NaN, then the result is always false.
4545  // The cc is never not-equal.
4546  __ bind(&unordered);
4547  ASSERT(cc_ != not_equal);
4548  if (cc_ == less || cc_ == less_equal) {
4549  __ mov(eax, Immediate(Smi::FromInt(1)));
4550  } else {
4551  __ mov(eax, Immediate(Smi::FromInt(-1)));
4552  }
4553  __ ret(0);
4554 
4555  // The number comparison code did not provide a valid result.
4556  __ bind(&non_number_comparison);
4557  }
4558 
4559  // Fast negative check for symbol-to-symbol equality.
4560  Label check_for_strings;
4561  if (cc_ == equal) {
4562  BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4563  BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4564 
4565  // We've already checked for object identity, so if both operands
4566  // are symbols they aren't equal. Register eax already holds a
4567  // non-zero value, which indicates not equal, so just return.
4568  __ ret(0);
4569  }
4570 
4571  __ bind(&check_for_strings);
4572 
4573  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4574  &check_unequal_objects);
4575 
4576  // Inline comparison of ASCII strings.
4577  if (cc_ == equal) {
4579  edx,
4580  eax,
4581  ecx,
4582  ebx);
4583  } else {
4585  edx,
4586  eax,
4587  ecx,
4588  ebx,
4589  edi);
4590  }
4591 #ifdef DEBUG
4592  __ Abort("Unexpected fall-through from string comparison");
4593 #endif
4594 
4595  __ bind(&check_unequal_objects);
4596  if (cc_ == equal && !strict_) {
4597  // Non-strict equality. Objects are unequal if
4598  // they are both JSObjects and not undetectable,
4599  // and their pointers are different.
4600  Label not_both_objects;
4601  Label return_unequal;
4602  // At most one is a smi, so we can test for smi by adding the two.
4603  // A smi plus a heap object has the low bit set, a heap object plus
4604  // a heap object has the low bit clear.
4605  STATIC_ASSERT(kSmiTag == 0);
4606  STATIC_ASSERT(kSmiTagMask == 1);
4607  __ lea(ecx, Operand(eax, edx, times_1, 0));
4608  __ test(ecx, Immediate(kSmiTagMask));
4609  __ j(not_zero, &not_both_objects, Label::kNear);
4610  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4611  __ j(below, &not_both_objects, Label::kNear);
4612  __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
4613  __ j(below, &not_both_objects, Label::kNear);
4614  // We do not bail out after this point. Both are JSObjects, and
4615  // they are equal if and only if both are undetectable.
4616  // The and of the undetectable flags is 1 if and only if they are equal.
4618  1 << Map::kIsUndetectable);
4619  __ j(zero, &return_unequal, Label::kNear);
4621  1 << Map::kIsUndetectable);
4622  __ j(zero, &return_unequal, Label::kNear);
4623  // The objects are both undetectable, so they both compare as the value
4624  // undefined, and are equal.
4625  __ Set(eax, Immediate(EQUAL));
4626  __ bind(&return_unequal);
4627  // Return non-equal by returning the non-zero object pointer in eax,
4628  // or return equal if we fell through to here.
4629  __ ret(0); // rax, rdx were pushed
4630  __ bind(&not_both_objects);
4631  }
4632 
4633  // Push arguments below the return address.
4634  __ pop(ecx);
4635  __ push(edx);
4636  __ push(eax);
4637 
4638  // Figure out which native to call and setup the arguments.
4639  Builtins::JavaScript builtin;
4640  if (cc_ == equal) {
4641  builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4642  } else {
4643  builtin = Builtins::COMPARE;
4644  __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4645  }
4646 
4647  // Restore return address on the stack.
4648  __ push(ecx);
4649 
4650  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4651  // tagged as a small integer.
4652  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4653 }
4654 
4655 
4656 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4657  Label* label,
4658  Register object,
4659  Register scratch) {
4660  __ JumpIfSmi(object, label);
4661  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4662  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4663  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4664  __ cmp(scratch, kSymbolTag | kStringTag);
4665  __ j(not_equal, label);
4666 }
4667 
4668 
4669 void StackCheckStub::Generate(MacroAssembler* masm) {
4670  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4671 }
4672 
4673 
4674 void InterruptStub::Generate(MacroAssembler* masm) {
4675  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
4676 }
4677 
4678 
4679 static void GenerateRecordCallTarget(MacroAssembler* masm) {
4680  // Cache the called function in a global property cell. Cache states
4681  // are uninitialized, monomorphic (indicated by a JSFunction), and
4682  // megamorphic.
4683  // ebx : cache cell for call target
4684  // edi : the function to call
4685  Isolate* isolate = masm->isolate();
4686  Label initialize, done;
4687 
4688  // Load the cache state into ecx.
4690 
4691  // A monomorphic cache hit or an already megamorphic state: invoke the
4692  // function without changing the state.
4693  __ cmp(ecx, edi);
4694  __ j(equal, &done, Label::kNear);
4695  __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4696  __ j(equal, &done, Label::kNear);
4697 
4698  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
4699  // megamorphic.
4700  __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
4701  __ j(equal, &initialize, Label::kNear);
4702  // MegamorphicSentinel is an immortal immovable object (undefined) so no
4703  // write-barrier is needed.
4705  Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4706  __ jmp(&done, Label::kNear);
4707 
4708  // An uninitialized cache is patched with the function.
4709  __ bind(&initialize);
4711  // No need for a write barrier here - cells are rescanned.
4712 
4713  __ bind(&done);
4714 }
4715 
4716 
4717 void CallFunctionStub::Generate(MacroAssembler* masm) {
4718  // ebx : cache cell for call target
4719  // edi : the function to call
4720  Isolate* isolate = masm->isolate();
4721  Label slow, non_function;
4722 
4723  // The receiver might implicitly be the global object. This is
4724  // indicated by passing the hole as the receiver to the call
4725  // function stub.
4726  if (ReceiverMightBeImplicit()) {
4727  Label receiver_ok;
4728  // Get the receiver from the stack.
4729  // +1 ~ return address
4730  __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4731  // Call as function is indicated with the hole.
4732  __ cmp(eax, isolate->factory()->the_hole_value());
4733  __ j(not_equal, &receiver_ok, Label::kNear);
4734  // Patch the receiver on the stack with the global receiver object.
4735  __ mov(ecx, GlobalObjectOperand());
4737  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
4738  __ bind(&receiver_ok);
4739  }
4740 
4741  // Check that the function really is a JavaScript function.
4742  __ JumpIfSmi(edi, &non_function);
4743  // Goto slow case if we do not have a function.
4744  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4745  __ j(not_equal, &slow);
4746 
4747  if (RecordCallTarget()) {
4748  GenerateRecordCallTarget(masm);
4749  }
4750 
4751  // Fast-case: Just invoke the function.
4752  ParameterCount actual(argc_);
4753 
4754  if (ReceiverMightBeImplicit()) {
4755  Label call_as_function;
4756  __ cmp(eax, isolate->factory()->the_hole_value());
4757  __ j(equal, &call_as_function);
4758  __ InvokeFunction(edi,
4759  actual,
4760  JUMP_FUNCTION,
4761  NullCallWrapper(),
4762  CALL_AS_METHOD);
4763  __ bind(&call_as_function);
4764  }
4765  __ InvokeFunction(edi,
4766  actual,
4767  JUMP_FUNCTION,
4768  NullCallWrapper(),
4770 
4771  // Slow-case: Non-function called.
4772  __ bind(&slow);
4773  if (RecordCallTarget()) {
4774  // If there is a call target cache, mark it megamorphic in the
4775  // non-function case. MegamorphicSentinel is an immortal immovable
4776  // object (undefined) so no write barrier is needed.
4778  Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4779  }
4780  // Check for function proxy.
4781  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4782  __ j(not_equal, &non_function);
4783  __ pop(ecx);
4784  __ push(edi); // put proxy as additional argument under return address
4785  __ push(ecx);
4786  __ Set(eax, Immediate(argc_ + 1));
4787  __ Set(ebx, Immediate(0));
4788  __ SetCallKind(ecx, CALL_AS_FUNCTION);
4789  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
4790  {
4791  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4792  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4793  }
4794 
4795  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4796  // of the original receiver from the call site).
4797  __ bind(&non_function);
4798  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4799  __ Set(eax, Immediate(argc_));
4800  __ Set(ebx, Immediate(0));
4801  __ SetCallKind(ecx, CALL_AS_METHOD);
4802  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4803  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4804  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4805 }
4806 
4807 
4808 void CallConstructStub::Generate(MacroAssembler* masm) {
4809  // eax : number of arguments
4810  // ebx : cache cell for call target
4811  // edi : constructor function
4812  Label slow, non_function_call;
4813 
4814  // Check that function is not a smi.
4815  __ JumpIfSmi(edi, &non_function_call);
4816  // Check that function is a JSFunction.
4817  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4818  __ j(not_equal, &slow);
4819 
4820  if (RecordCallTarget()) {
4821  GenerateRecordCallTarget(masm);
4822  }
4823 
4824  // Jump to the function-specific construct stub.
4828  __ jmp(ebx);
4829 
4830  // edi: called object
4831  // eax: number of arguments
4832  // ecx: object map
4833  Label do_call;
4834  __ bind(&slow);
4835  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4836  __ j(not_equal, &non_function_call);
4837  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
4838  __ jmp(&do_call);
4839 
4840  __ bind(&non_function_call);
4841  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
4842  __ bind(&do_call);
4843  // Set expected number of arguments to zero (not changing eax).
4844  __ Set(ebx, Immediate(0));
4845  Handle<Code> arguments_adaptor =
4846  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4847  __ SetCallKind(ecx, CALL_AS_METHOD);
4848  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
4849 }
4850 
4851 
4852 bool CEntryStub::NeedsImmovableCode() {
4853  return false;
4854 }
4855 
4856 
4858  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
4859  result_size_ == 1;
4860 }
4861 
4862 
4863 void CodeStub::GenerateStubsAheadOfTime() {
4866  // It is important that the store buffer overflow stubs are generated first.
4868 }
4869 
4870 
4871 void CodeStub::GenerateFPStubs() {
4872  CEntryStub save_doubles(1, kSaveFPRegs);
4873  Handle<Code> code = save_doubles.GetCode();
4874  code->set_is_pregenerated(true);
4875  code->GetIsolate()->set_fp_stubs_generated(true);
4876 }
4877 
4878 
4880  CEntryStub stub(1, kDontSaveFPRegs);
4881  Handle<Code> code = stub.GetCode();
4882  code->set_is_pregenerated(true);
4883 }
4884 
4885 
4886 void CEntryStub::GenerateCore(MacroAssembler* masm,
4887  Label* throw_normal_exception,
4888  Label* throw_termination_exception,
4889  Label* throw_out_of_memory_exception,
4890  bool do_gc,
4891  bool always_allocate_scope) {
4892  // eax: result parameter for PerformGC, if any
4893  // ebx: pointer to C function (C callee-saved)
4894  // ebp: frame pointer (restored after C call)
4895  // esp: stack pointer (restored after C call)
4896  // edi: number of arguments including receiver (C callee-saved)
4897  // esi: pointer to the first argument (C callee-saved)
4898 
4899  // Result returned in eax, or eax+edx if result_size_ is 2.
4900 
4901  // Check stack alignment.
4902  if (FLAG_debug_code) {
4903  __ CheckStackAlignment();
4904  }
4905 
4906  if (do_gc) {
4907  // Pass failure code returned from last attempt as first argument to
4908  // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4909  // stack alignment is known to be correct. This function takes one argument
4910  // which is passed on the stack, and we know that the stack has been
4911  // prepared to pass at least one argument.
4912  __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
4914  }
4915 
4916  ExternalReference scope_depth =
4917  ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
4918  if (always_allocate_scope) {
4919  __ inc(Operand::StaticVariable(scope_depth));
4920  }
4921 
4922  // Call C function.
4923  __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
4924  __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
4925  __ mov(Operand(esp, 2 * kPointerSize),
4926  Immediate(ExternalReference::isolate_address()));
4927  __ call(ebx);
4928  // Result is in eax or edx:eax - do not destroy these registers!
4929 
4930  if (always_allocate_scope) {
4931  __ dec(Operand::StaticVariable(scope_depth));
4932  }
4933 
4934  // Make sure we're not trying to return 'the hole' from the runtime
4935  // call as this may lead to crashes in the IC code later.
4936  if (FLAG_debug_code) {
4937  Label okay;
4938  __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4939  __ j(not_equal, &okay, Label::kNear);
4940  __ int3();
4941  __ bind(&okay);
4942  }
4943 
4944  // Check for failure result.
4945  Label failure_returned;
4946  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4947  __ lea(ecx, Operand(eax, 1));
4948  // Lower 2 bits of ecx are 0 iff eax has failure tag.
4949  __ test(ecx, Immediate(kFailureTagMask));
4950  __ j(zero, &failure_returned);
4951 
4952  ExternalReference pending_exception_address(
4953  Isolate::kPendingExceptionAddress, masm->isolate());
4954 
4955  // Check that there is no pending exception, otherwise we
4956  // should have returned some failure value.
4957  if (FLAG_debug_code) {
4958  __ push(edx);
4959  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4960  Label okay;
4961  __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4962  // Cannot use check here as it attempts to generate call into runtime.
4963  __ j(equal, &okay, Label::kNear);
4964  __ int3();
4965  __ bind(&okay);
4966  __ pop(edx);
4967  }
4968 
4969  // Exit the JavaScript to C++ exit frame.
4970  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
4971  __ ret(0);
4972 
4973  // Handling of failure.
4974  __ bind(&failure_returned);
4975 
4976  Label retry;
4977  // If the returned exception is RETRY_AFTER_GC continue at retry label
4979  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4980  __ j(zero, &retry, Label::kNear);
4981 
4982  // Special handling of out of memory exceptions.
4983  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4984  __ j(equal, throw_out_of_memory_exception);
4985 
4986  // Retrieve the pending exception and clear the variable.
4987  __ mov(eax, Operand::StaticVariable(pending_exception_address));
4988  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4989  __ mov(Operand::StaticVariable(pending_exception_address), edx);
4990 
4991  // Special handling of termination exceptions which are uncatchable
4992  // by javascript code.
4993  __ cmp(eax, masm->isolate()->factory()->termination_exception());
4994  __ j(equal, throw_termination_exception);
4995 
4996  // Handle normal exception.
4997  __ jmp(throw_normal_exception);
4998 
4999  // Retry.
5000  __ bind(&retry);
5001 }
5002 
5003 
5004 void CEntryStub::Generate(MacroAssembler* masm) {
5005  // eax: number of arguments including receiver
5006  // ebx: pointer to C function (C callee-saved)
5007  // ebp: frame pointer (restored after C call)
5008  // esp: stack pointer (restored after C call)
5009  // esi: current context (C callee-saved)
5010  // edi: JS function of the caller (C callee-saved)
5011 
5012  // NOTE: Invocations of builtins may return failure objects instead
5013  // of a proper result. The builtin entry handles this by performing
5014  // a garbage collection and retrying the builtin (twice).
5015 
5016  // Enter the exit frame that transitions from JavaScript to C++.
5017  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
5018 
5019  // eax: result parameter for PerformGC, if any (setup below)
5020  // ebx: pointer to builtin function (C callee-saved)
5021  // ebp: frame pointer (restored after C call)
5022  // esp: stack pointer (restored after C call)
5023  // edi: number of arguments including receiver (C callee-saved)
5024  // esi: argv pointer (C callee-saved)
5025 
5026  Label throw_normal_exception;
5027  Label throw_termination_exception;
5028  Label throw_out_of_memory_exception;
5029 
5030  // Call into the runtime system.
5031  GenerateCore(masm,
5032  &throw_normal_exception,
5033  &throw_termination_exception,
5034  &throw_out_of_memory_exception,
5035  false,
5036  false);
5037 
5038  // Do space-specific GC and retry runtime call.
5039  GenerateCore(masm,
5040  &throw_normal_exception,
5041  &throw_termination_exception,
5042  &throw_out_of_memory_exception,
5043  true,
5044  false);
5045 
5046  // Do full GC and retry runtime call one final time.
5047  Failure* failure = Failure::InternalError();
5048  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
5049  GenerateCore(masm,
5050  &throw_normal_exception,
5051  &throw_termination_exception,
5052  &throw_out_of_memory_exception,
5053  true,
5054  true);
5055 
5056  __ bind(&throw_out_of_memory_exception);
5057  // Set external caught exception to false.
5058  Isolate* isolate = masm->isolate();
5059  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
5060  isolate);
5061  __ mov(Operand::StaticVariable(external_caught), Immediate(false));
5062 
5063  // Set pending exception and eax to out of memory exception.
5064  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
5065  isolate);
5066  __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
5067  __ mov(Operand::StaticVariable(pending_exception), eax);
5068  // Fall through to the next label.
5069 
5070  __ bind(&throw_termination_exception);
5071  __ ThrowUncatchable(eax);
5072 
5073  __ bind(&throw_normal_exception);
5074  __ Throw(eax);
5075 }
5076 
5077 
5078 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
5079  Label invoke, handler_entry, exit;
5080  Label not_outermost_js, not_outermost_js_2;
5081 
5082  // Set up frame.
5083  __ push(ebp);
5084  __ mov(ebp, esp);
5085 
5086  // Push marker in two places.
5087  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
5088  __ push(Immediate(Smi::FromInt(marker))); // context slot
5089  __ push(Immediate(Smi::FromInt(marker))); // function slot
5090  // Save callee-saved registers (C calling conventions).
5091  __ push(edi);
5092  __ push(esi);
5093  __ push(ebx);
5094 
5095  // Save copies of the top frame descriptor on the stack.
5096  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
5097  __ push(Operand::StaticVariable(c_entry_fp));
5098 
5099  // If this is the outermost JS call, set js_entry_sp value.
5100  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
5101  masm->isolate());
5102  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
5103  __ j(not_equal, &not_outermost_js, Label::kNear);
5104  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
5105  __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5106  __ jmp(&invoke, Label::kNear);
5107  __ bind(&not_outermost_js);
5108  __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
5109 
5110  // Jump to a faked try block that does the invoke, with a faked catch
5111  // block that sets the pending exception.
5112  __ jmp(&invoke);
5113  __ bind(&handler_entry);
5114  handler_offset_ = handler_entry.pos();
5115  // Caught exception: Store result (exception) in the pending exception
5116  // field in the JSEnv and return a failure sentinel.
5117  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
5118  masm->isolate());
5119  __ mov(Operand::StaticVariable(pending_exception), eax);
5120  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
5121  __ jmp(&exit);
5122 
5123  // Invoke: Link this frame into the handler chain. There's only one
5124  // handler block in this code object, so its index is 0.
5125  __ bind(&invoke);
5126  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
5127 
5128  // Clear any pending exceptions.
5129  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
5130  __ mov(Operand::StaticVariable(pending_exception), edx);
5131 
5132  // Fake a receiver (NULL).
5133  __ push(Immediate(0)); // receiver
5134 
5135  // Invoke the function by calling through JS entry trampoline builtin and
5136  // pop the faked function when we return. Notice that we cannot store a
5137  // reference to the trampoline code directly in this stub, because the
5138  // builtin stubs may not have been generated yet.
5139  if (is_construct) {
5140  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
5141  masm->isolate());
5142  __ mov(edx, Immediate(construct_entry));
5143  } else {
5144  ExternalReference entry(Builtins::kJSEntryTrampoline,
5145  masm->isolate());
5146  __ mov(edx, Immediate(entry));
5147  }
5148  __ mov(edx, Operand(edx, 0)); // deref address
5150  __ call(edx);
5151 
5152  // Unlink this frame from the handler chain.
5153  __ PopTryHandler();
5154 
5155  __ bind(&exit);
5156  // Check if the current stack frame is marked as the outermost JS frame.
5157  __ pop(ebx);
5158  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5159  __ j(not_equal, &not_outermost_js_2);
5160  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
5161  __ bind(&not_outermost_js_2);
5162 
5163  // Restore the top frame descriptor from the stack.
5164  __ pop(Operand::StaticVariable(ExternalReference(
5165  Isolate::kCEntryFPAddress,
5166  masm->isolate())));
5167 
5168  // Restore callee-saved registers (C calling conventions).
5169  __ pop(ebx);
5170  __ pop(esi);
5171  __ pop(edi);
5172  __ add(esp, Immediate(2 * kPointerSize)); // remove markers
5173 
5174  // Restore frame pointer and return.
5175  __ pop(ebp);
5176  __ ret(0);
5177 }
5178 
5179 
5180 // Generate stub code for instanceof.
5181 // This code can patch a call site inlined cache of the instance of check,
5182 // which looks like this.
5183 //
5184 // 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
5185 // 75 0a jne <some near label>
5186 // b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
5187 //
5188 // If call site patching is requested the stack will have the delta from the
5189 // return address to the cmp instruction just below the return address. This
5190 // also means that call site patching can only take place with arguments in
5191 // registers. TOS looks like this when call site patching is requested
5192 //
5193 // esp[0] : return address
5194 // esp[4] : delta from return address to cmp instruction
5195 //
5196 void InstanceofStub::Generate(MacroAssembler* masm) {
5197  // Call site inlining and patching implies arguments in registers.
5198  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
5199 
5200  // Fixed register usage throughout the stub.
5201  Register object = eax; // Object (lhs).
5202  Register map = ebx; // Map of the object.
5203  Register function = edx; // Function (rhs).
5204  Register prototype = edi; // Prototype of the function.
5205  Register scratch = ecx;
5206 
5207  // Constants describing the call site code to patch.
5208  static const int kDeltaToCmpImmediate = 2;
5209  static const int kDeltaToMov = 8;
5210  static const int kDeltaToMovImmediate = 9;
5211  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
5212  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
5213  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
5214 
5215  ExternalReference roots_array_start =
5216  ExternalReference::roots_array_start(masm->isolate());
5217 
5218  ASSERT_EQ(object.code(), InstanceofStub::left().code());
5219  ASSERT_EQ(function.code(), InstanceofStub::right().code());
5220 
5221  // Get the object and function - they are always both needed.
5222  Label slow, not_js_object;
5223  if (!HasArgsInRegisters()) {
5224  __ mov(object, Operand(esp, 2 * kPointerSize));
5225  __ mov(function, Operand(esp, 1 * kPointerSize));
5226  }
5227 
5228  // Check that the left hand is a JS object.
5229  __ JumpIfSmi(object, &not_js_object);
5230  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
5231 
5232  // If there is a call site cache don't look in the global cache, but do the
5233  // real lookup and update the call site cache.
5234  if (!HasCallSiteInlineCheck()) {
5235  // Look up the function and the map in the instanceof cache.
5236  Label miss;
5237  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5238  __ cmp(function, Operand::StaticArray(scratch,
5240  roots_array_start));
5241  __ j(not_equal, &miss, Label::kNear);
5242  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5243  __ cmp(map, Operand::StaticArray(
5244  scratch, times_pointer_size, roots_array_start));
5245  __ j(not_equal, &miss, Label::kNear);
5246  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5247  __ mov(eax, Operand::StaticArray(
5248  scratch, times_pointer_size, roots_array_start));
5249  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5250  __ bind(&miss);
5251  }
5252 
5253  // Get the prototype of the function.
5254  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
5255 
5256  // Check that the function prototype is a JS object.
5257  __ JumpIfSmi(prototype, &slow);
5258  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
5259 
5260  // Update the global instanceof or call site inlined cache with the current
5261  // map and function. The cached answer will be set when it is known below.
5262  if (!HasCallSiteInlineCheck()) {
5263  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5264  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5265  map);
5266  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5267  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5268  function);
5269  } else {
5270  // The constants for the code patching are based on no push instructions
5271  // at the call site.
5272  ASSERT(HasArgsInRegisters());
5273  // Get return address and delta to inlined map check.
5274  __ mov(scratch, Operand(esp, 0 * kPointerSize));
5275  __ sub(scratch, Operand(esp, 1 * kPointerSize));
5276  if (FLAG_debug_code) {
5277  __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
5278  __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
5279  __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
5280  __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
5281  }
5282  __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
5283  __ mov(Operand(scratch, 0), map);
5284  }
5285 
5286  // Loop through the prototype chain of the object looking for the function
5287  // prototype.
5288  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
5289  Label loop, is_instance, is_not_instance;
5290  __ bind(&loop);
5291  __ cmp(scratch, prototype);
5292  __ j(equal, &is_instance, Label::kNear);
5293  Factory* factory = masm->isolate()->factory();
5294  __ cmp(scratch, Immediate(factory->null_value()));
5295  __ j(equal, &is_not_instance, Label::kNear);
5296  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5297  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
5298  __ jmp(&loop);
5299 
5300  __ bind(&is_instance);
5301  if (!HasCallSiteInlineCheck()) {
5302  __ Set(eax, Immediate(0));
5303  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5304  __ mov(Operand::StaticArray(scratch,
5305  times_pointer_size, roots_array_start), eax);
5306  } else {
5307  // Get return address and delta to inlined map check.
5308  __ mov(eax, factory->true_value());
5309  __ mov(scratch, Operand(esp, 0 * kPointerSize));
5310  __ sub(scratch, Operand(esp, 1 * kPointerSize));
5311  if (FLAG_debug_code) {
5312  __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5313  __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5314  }
5315  __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5316  if (!ReturnTrueFalseObject()) {
5317  __ Set(eax, Immediate(0));
5318  }
5319  }
5320  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5321 
5322  __ bind(&is_not_instance);
5323  if (!HasCallSiteInlineCheck()) {
5324  __ Set(eax, Immediate(Smi::FromInt(1)));
5325  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5326  __ mov(Operand::StaticArray(
5327  scratch, times_pointer_size, roots_array_start), eax);
5328  } else {
5329  // Get return address and delta to inlined map check.
5330  __ mov(eax, factory->false_value());
5331  __ mov(scratch, Operand(esp, 0 * kPointerSize));
5332  __ sub(scratch, Operand(esp, 1 * kPointerSize));
5333  if (FLAG_debug_code) {
5334  __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5335  __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5336  }
5337  __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5338  if (!ReturnTrueFalseObject()) {
5339  __ Set(eax, Immediate(Smi::FromInt(1)));
5340  }
5341  }
5342  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5343 
5344  Label object_not_null, object_not_null_or_smi;
5345  __ bind(&not_js_object);
5346  // Before null, smi and string value checks, check that the rhs is a function
5347  // as for a non-function rhs an exception needs to be thrown.
5348  __ JumpIfSmi(function, &slow, Label::kNear);
5349  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5350  __ j(not_equal, &slow, Label::kNear);
5351 
5352  // Null is not instance of anything.
5353  __ cmp(object, factory->null_value());
5354  __ j(not_equal, &object_not_null, Label::kNear);
5355  __ Set(eax, Immediate(Smi::FromInt(1)));
5356  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5357 
5358  __ bind(&object_not_null);
5359  // Smi values is not instance of anything.
5360  __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
5361  __ Set(eax, Immediate(Smi::FromInt(1)));
5362  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5363 
5364  __ bind(&object_not_null_or_smi);
5365  // String values is not instance of anything.
5366  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5367  __ j(NegateCondition(is_string), &slow, Label::kNear);
5368  __ Set(eax, Immediate(Smi::FromInt(1)));
5369  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5370 
5371  // Slow-case: Go through the JavaScript implementation.
5372  __ bind(&slow);
5373  if (!ReturnTrueFalseObject()) {
5374  // Tail call the builtin which returns 0 or 1.
5375  if (HasArgsInRegisters()) {
5376  // Push arguments below return address.
5377  __ pop(scratch);
5378  __ push(object);
5379  __ push(function);
5380  __ push(scratch);
5381  }
5382  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5383  } else {
5384  // Call the builtin and convert 0/1 to true/false.
5385  {
5386  FrameScope scope(masm, StackFrame::INTERNAL);
5387  __ push(object);
5388  __ push(function);
5389  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
5390  }
5391  Label true_value, done;
5392  __ test(eax, eax);
5393  __ j(zero, &true_value, Label::kNear);
5394  __ mov(eax, factory->false_value());
5395  __ jmp(&done, Label::kNear);
5396  __ bind(&true_value);
5397  __ mov(eax, factory->true_value());
5398  __ bind(&done);
5399  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5400  }
5401 }
5402 
5403 
5404 Register InstanceofStub::left() { return eax; }
5405 
5406 
5407 Register InstanceofStub::right() { return edx; }
5408 
5409 
5410 int CompareStub::MinorKey() {
5411  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5412  // stubs the never NaN NaN condition is only taken into account if the
5413  // condition is equals.
5414  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5415  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5416  return ConditionField::encode(static_cast<unsigned>(cc_))
5417  | RegisterField::encode(false) // lhs_ and rhs_ are not used
5418  | StrictField::encode(strict_)
5419  | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
5420  | IncludeNumberCompareField::encode(include_number_compare_)
5421  | IncludeSmiCompareField::encode(include_smi_compare_);
5422 }
5423 
5424 
5425 // Unfortunately you have to run without snapshots to see most of these
5426 // names in the profile since most compare stubs end up in the snapshot.
5427 void CompareStub::PrintName(StringStream* stream) {
5428  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5429  const char* cc_name;
5430  switch (cc_) {
5431  case less: cc_name = "LT"; break;
5432  case greater: cc_name = "GT"; break;
5433  case less_equal: cc_name = "LE"; break;
5434  case greater_equal: cc_name = "GE"; break;
5435  case equal: cc_name = "EQ"; break;
5436  case not_equal: cc_name = "NE"; break;
5437  default: cc_name = "UnknownCondition"; break;
5438  }
5439  bool is_equality = cc_ == equal || cc_ == not_equal;
5440  stream->Add("CompareStub_%s", cc_name);
5441  if (strict_ && is_equality) stream->Add("_STRICT");
5442  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5443  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5444  if (!include_smi_compare_) stream->Add("_NO_SMI");
5445 }
5446 
5447 
5448 // -------------------------------------------------------------------------
5449 // StringCharCodeAtGenerator
5450 
5451 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5452  // If the receiver is a smi trigger the non-string case.
5453  STATIC_ASSERT(kSmiTag == 0);
5454  __ JumpIfSmi(object_, receiver_not_string_);
5455 
5456  // Fetch the instance type of the receiver into result register.
5457  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5458  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5459  // If the receiver is not a string trigger the non-string case.
5460  __ test(result_, Immediate(kIsNotStringMask));
5461  __ j(not_zero, receiver_not_string_);
5462 
5463  // If the index is non-smi trigger the non-smi case.
5464  STATIC_ASSERT(kSmiTag == 0);
5465  __ JumpIfNotSmi(index_, &index_not_smi_);
5466  __ bind(&got_smi_index_);
5467 
5468  // Check for index out of range.
5469  __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
5470  __ j(above_equal, index_out_of_range_);
5471 
5472  __ SmiUntag(index_);
5473 
5474  Factory* factory = masm->isolate()->factory();
5476  masm, factory, object_, index_, result_, &call_runtime_);
5477 
5478  __ SmiTag(result_);
5479  __ bind(&exit_);
5480 }
5481 
5482 
5484  MacroAssembler* masm,
5485  const RuntimeCallHelper& call_helper) {
5486  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5487 
5488  // Index is not a smi.
5489  __ bind(&index_not_smi_);
5490  // If index is a heap number, try converting it to an integer.
5491  __ CheckMap(index_,
5492  masm->isolate()->factory()->heap_number_map(),
5493  index_not_number_,
5495  call_helper.BeforeCall(masm);
5496  __ push(object_);
5497  __ push(index_); // Consumed by runtime conversion function.
5498  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5499  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5500  } else {
5501  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5502  // NumberToSmi discards numbers that are not exact integers.
5503  __ CallRuntime(Runtime::kNumberToSmi, 1);
5504  }
5505  if (!index_.is(eax)) {
5506  // Save the conversion result before the pop instructions below
5507  // have a chance to overwrite it.
5508  __ mov(index_, eax);
5509  }
5510  __ pop(object_);
5511  // Reload the instance type.
5512  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5513  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5514  call_helper.AfterCall(masm);
5515  // If index is still not a smi, it must be out of range.
5516  STATIC_ASSERT(kSmiTag == 0);
5517  __ JumpIfNotSmi(index_, index_out_of_range_);
5518  // Otherwise, return to the fast path.
5519  __ jmp(&got_smi_index_);
5520 
5521  // Call runtime. We get here when the receiver is a string and the
5522  // index is a number, but the code of getting the actual character
5523  // is too complex (e.g., when the string needs to be flattened).
5524  __ bind(&call_runtime_);
5525  call_helper.BeforeCall(masm);
5526  __ push(object_);
5527  __ SmiTag(index_);
5528  __ push(index_);
5529  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5530  if (!result_.is(eax)) {
5531  __ mov(result_, eax);
5532  }
5533  call_helper.AfterCall(masm);
5534  __ jmp(&exit_);
5535 
5536  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5537 }
5538 
5539 
5540 // -------------------------------------------------------------------------
5541 // StringCharFromCodeGenerator
5542 
5543 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5544  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5545  STATIC_ASSERT(kSmiTag == 0);
5548  __ test(code_,
5549  Immediate(kSmiTagMask |
5551  __ j(not_zero, &slow_case_);
5552 
5553  Factory* factory = masm->isolate()->factory();
5554  __ Set(result_, Immediate(factory->single_character_string_cache()));
5555  STATIC_ASSERT(kSmiTag == 0);
5556  STATIC_ASSERT(kSmiTagSize == 1);
5558  // At this point code register contains smi tagged ASCII char code.
5559  __ mov(result_, FieldOperand(result_,
5560  code_, times_half_pointer_size,
5562  __ cmp(result_, factory->undefined_value());
5563  __ j(equal, &slow_case_);
5564  __ bind(&exit_);
5565 }
5566 
5567 
5569  MacroAssembler* masm,
5570  const RuntimeCallHelper& call_helper) {
5571  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5572 
5573  __ bind(&slow_case_);
5574  call_helper.BeforeCall(masm);
5575  __ push(code_);
5576  __ CallRuntime(Runtime::kCharFromCode, 1);
5577  if (!result_.is(eax)) {
5578  __ mov(result_, eax);
5579  }
5580  call_helper.AfterCall(masm);
5581  __ jmp(&exit_);
5582 
5583  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5584 }
5585 
5586 
5587 // -------------------------------------------------------------------------
5588 // StringCharAtGenerator
5589 
5590 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5591  char_code_at_generator_.GenerateFast(masm);
5592  char_from_code_generator_.GenerateFast(masm);
5593 }
5594 
5595 
5597  MacroAssembler* masm,
5598  const RuntimeCallHelper& call_helper) {
5599  char_code_at_generator_.GenerateSlow(masm, call_helper);
5600  char_from_code_generator_.GenerateSlow(masm, call_helper);
5601 }
5602 
5603 
5604 void StringAddStub::Generate(MacroAssembler* masm) {
5605  Label call_runtime, call_builtin;
5606  Builtins::JavaScript builtin_id = Builtins::ADD;
5607 
5608  // Load the two arguments.
5609  __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5610  __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5611 
5612  // Make sure that both arguments are strings if not known in advance.
5613  if (flags_ == NO_STRING_ADD_FLAGS) {
5614  __ JumpIfSmi(eax, &call_runtime);
5615  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5616  __ j(above_equal, &call_runtime);
5617 
5618  // First argument is a a string, test second.
5619  __ JumpIfSmi(edx, &call_runtime);
5620  __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5621  __ j(above_equal, &call_runtime);
5622  } else {
5623  // Here at least one of the arguments is definitely a string.
5624  // We convert the one that is not known to be a string.
5625  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5626  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5627  GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5628  &call_builtin);
5629  builtin_id = Builtins::STRING_ADD_RIGHT;
5630  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5631  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5632  GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5633  &call_builtin);
5634  builtin_id = Builtins::STRING_ADD_LEFT;
5635  }
5636  }
5637 
5638  // Both arguments are strings.
5639  // eax: first string
5640  // edx: second string
5641  // Check if either of the strings are empty. In that case return the other.
5642  Label second_not_zero_length, both_not_zero_length;
5644  STATIC_ASSERT(kSmiTag == 0);
5645  __ test(ecx, ecx);
5646  __ j(not_zero, &second_not_zero_length, Label::kNear);
5647  // Second string is empty, result is first string which is already in eax.
5648  Counters* counters = masm->isolate()->counters();
5649  __ IncrementCounter(counters->string_add_native(), 1);
5650  __ ret(2 * kPointerSize);
5651  __ bind(&second_not_zero_length);
5653  STATIC_ASSERT(kSmiTag == 0);
5654  __ test(ebx, ebx);
5655  __ j(not_zero, &both_not_zero_length, Label::kNear);
5656  // First string is empty, result is second string which is in edx.
5657  __ mov(eax, edx);
5658  __ IncrementCounter(counters->string_add_native(), 1);
5659  __ ret(2 * kPointerSize);
5660 
5661  // Both strings are non-empty.
5662  // eax: first string
5663  // ebx: length of first string as a smi
5664  // ecx: length of second string as a smi
5665  // edx: second string
5666  // Look at the length of the result of adding the two strings.
5667  Label string_add_flat_result, longer_than_two;
5668  __ bind(&both_not_zero_length);
5669  __ add(ebx, ecx);
5671  // Handle exceptionally long strings in the runtime system.
5672  __ j(overflow, &call_runtime);
5673  // Use the symbol table when adding two one character strings, as it
5674  // helps later optimizations to return a symbol here.
5675  __ cmp(ebx, Immediate(Smi::FromInt(2)));
5676  __ j(not_equal, &longer_than_two);
5677 
5678  // Check that both strings are non-external ASCII strings.
5679  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
5680 
5681  // Get the two characters forming the new string.
5684 
5685  // Try to lookup two character string in symbol table. If it is not found
5686  // just allocate a new one.
5687  Label make_two_character_string, make_two_character_string_no_reload;
5689  masm, ebx, ecx, eax, edx, edi,
5690  &make_two_character_string_no_reload, &make_two_character_string);
5691  __ IncrementCounter(counters->string_add_native(), 1);
5692  __ ret(2 * kPointerSize);
5693 
5694  // Allocate a two character string.
5695  __ bind(&make_two_character_string);
5696  // Reload the arguments.
5697  __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5698  __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5699  // Get the two characters forming the new string.
5702  __ bind(&make_two_character_string_no_reload);
5703  __ IncrementCounter(counters->string_add_make_two_char(), 1);
5704  __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
5705  // Pack both characters in ebx.
5706  __ shl(ecx, kBitsPerByte);
5707  __ or_(ebx, ecx);
5708  // Set the characters in the new string.
5710  __ IncrementCounter(counters->string_add_native(), 1);
5711  __ ret(2 * kPointerSize);
5712 
5713  __ bind(&longer_than_two);
5714  // Check if resulting string will be flat.
5715  __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
5716  __ j(below, &string_add_flat_result);
5717 
5718  // If result is not supposed to be flat allocate a cons string object. If both
5719  // strings are ASCII the result is an ASCII cons string.
5720  Label non_ascii, allocated, ascii_data;
5725  __ and_(ecx, edi);
5728  __ test(ecx, Immediate(kStringEncodingMask));
5729  __ j(zero, &non_ascii);
5730  __ bind(&ascii_data);
5731  // Allocate an ASCII cons string.
5732  __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
5733  __ bind(&allocated);
5734  // Fill the fields of the cons string.
5735  __ AssertSmi(ebx);
5738  Immediate(String::kEmptyHashField));
5741  __ mov(eax, ecx);
5742  __ IncrementCounter(counters->string_add_native(), 1);
5743  __ ret(2 * kPointerSize);
5744  __ bind(&non_ascii);
5745  // At least one of the strings is two-byte. Check whether it happens
5746  // to contain only ASCII characters.
5747  // ecx: first instance type AND second instance type.
5748  // edi: second instance type.
5749  __ test(ecx, Immediate(kAsciiDataHintMask));
5750  __ j(not_zero, &ascii_data);
5753  __ xor_(edi, ecx);
5757  __ j(equal, &ascii_data);
5758  // Allocate a two byte cons string.
5759  __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
5760  __ jmp(&allocated);
5761 
5762  // We cannot encounter sliced strings or cons strings here since:
5764  // Handle creating a flat result from either external or sequential strings.
5765  // Locate the first characters' locations.
5766  // eax: first string
5767  // ebx: length of resulting flat string as a smi
5768  // edx: second string
5769  Label first_prepared, second_prepared;
5770  Label first_is_sequential, second_is_sequential;
5771  __ bind(&string_add_flat_result);
5774  // ecx: instance type of first string
5776  __ test_b(ecx, kStringRepresentationMask);
5777  __ j(zero, &first_is_sequential, Label::kNear);
5778  // Rule out short external string and load string resource.
5780  __ test_b(ecx, kShortExternalStringMask);
5781  __ j(not_zero, &call_runtime);
5784  __ jmp(&first_prepared, Label::kNear);
5785  __ bind(&first_is_sequential);
5786  __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5787  __ bind(&first_prepared);
5788 
5791  // Check whether both strings have same encoding.
5792  // edi: instance type of second string
5793  __ xor_(ecx, edi);
5794  __ test_b(ecx, kStringEncodingMask);
5795  __ j(not_zero, &call_runtime);
5797  __ test_b(edi, kStringRepresentationMask);
5798  __ j(zero, &second_is_sequential, Label::kNear);
5799  // Rule out short external string and load string resource.
5801  __ test_b(edi, kShortExternalStringMask);
5802  __ j(not_zero, &call_runtime);
5805  __ jmp(&second_prepared, Label::kNear);
5806  __ bind(&second_is_sequential);
5807  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5808  __ bind(&second_prepared);
5809 
5810  // Push the addresses of both strings' first characters onto the stack.
5811  __ push(edx);
5812  __ push(eax);
5813 
5814  Label non_ascii_string_add_flat_result, call_runtime_drop_two;
5815  // edi: instance type of second string
5816  // First string and second string have the same encoding.
5818  __ test_b(edi, kStringEncodingMask);
5819  __ j(zero, &non_ascii_string_add_flat_result);
5820 
5821  // Both strings are ASCII strings.
5822  // ebx: length of resulting flat string as a smi
5823  __ SmiUntag(ebx);
5824  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5825  // eax: result string
5826  __ mov(ecx, eax);
5827  // Locate first character of result.
5828  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5829  // Load first argument's length and first character location. Account for
5830  // values currently on the stack when fetching arguments from it.
5831  __ mov(edx, Operand(esp, 4 * kPointerSize));
5833  __ SmiUntag(edi);
5834  __ pop(edx);
5835  // eax: result string
5836  // ecx: first character of result
5837  // edx: first char of first argument
5838  // edi: length of first argument
5840  // Load second argument's length and first character location. Account for
5841  // values currently on the stack when fetching arguments from it.
5842  __ mov(edx, Operand(esp, 2 * kPointerSize));
5844  __ SmiUntag(edi);
5845  __ pop(edx);
5846  // eax: result string
5847  // ecx: next character of result
5848  // edx: first char of second argument
5849  // edi: length of second argument
5851  __ IncrementCounter(counters->string_add_native(), 1);
5852  __ ret(2 * kPointerSize);
5853 
5854  // Handle creating a flat two byte result.
5855  // eax: first string - known to be two byte
5856  // ebx: length of resulting flat string as a smi
5857  // edx: second string
5858  __ bind(&non_ascii_string_add_flat_result);
5859  // Both strings are two byte strings.
5860  __ SmiUntag(ebx);
5861  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5862  // eax: result string
5863  __ mov(ecx, eax);
5864  // Locate first character of result.
5866  // Load second argument's length and first character location. Account for
5867  // values currently on the stack when fetching arguments from it.
5868  __ mov(edx, Operand(esp, 4 * kPointerSize));
5870  __ SmiUntag(edi);
5871  __ pop(edx);
5872  // eax: result string
5873  // ecx: first character of result
5874  // edx: first char of first argument
5875  // edi: length of first argument
5877  // Load second argument's length and first character location. Account for
5878  // values currently on the stack when fetching arguments from it.
5879  __ mov(edx, Operand(esp, 2 * kPointerSize));
5881  __ SmiUntag(edi);
5882  __ pop(edx);
5883  // eax: result string
5884  // ecx: next character of result
5885  // edx: first char of second argument
5886  // edi: length of second argument
5888  __ IncrementCounter(counters->string_add_native(), 1);
5889  __ ret(2 * kPointerSize);
5890 
5891  // Recover stack pointer before jumping to runtime.
5892  __ bind(&call_runtime_drop_two);
5893  __ Drop(2);
5894  // Just jump to runtime to add the two strings.
5895  __ bind(&call_runtime);
5896  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5897 
5898  if (call_builtin.is_linked()) {
5899  __ bind(&call_builtin);
5900  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5901  }
5902 }
5903 
5904 
5905 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5906  int stack_offset,
5907  Register arg,
5908  Register scratch1,
5909  Register scratch2,
5910  Register scratch3,
5911  Label* slow) {
5912  // First check if the argument is already a string.
5913  Label not_string, done;
5914  __ JumpIfSmi(arg, &not_string);
5915  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5916  __ j(below, &done);
5917 
5918  // Check the number to string cache.
5919  Label not_cached;
5920  __ bind(&not_string);
5921  // Puts the cached result into scratch1.
5923  arg,
5924  scratch1,
5925  scratch2,
5926  scratch3,
5927  false,
5928  &not_cached);
5929  __ mov(arg, scratch1);
5930  __ mov(Operand(esp, stack_offset), arg);
5931  __ jmp(&done);
5932 
5933  // Check if the argument is a safe string wrapper.
5934  __ bind(&not_cached);
5935  __ JumpIfSmi(arg, slow);
5936  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
5937  __ j(not_equal, slow);
5938  __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5940  __ j(zero, slow);
5941  __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5942  __ mov(Operand(esp, stack_offset), arg);
5943 
5944  __ bind(&done);
5945 }
5946 
5947 
5948 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5949  Register dest,
5950  Register src,
5951  Register count,
5952  Register scratch,
5953  bool ascii) {
5954  Label loop;
5955  __ bind(&loop);
5956  // This loop just copies one character at a time, as it is only used for very
5957  // short strings.
5958  if (ascii) {
5959  __ mov_b(scratch, Operand(src, 0));
5960  __ mov_b(Operand(dest, 0), scratch);
5961  __ add(src, Immediate(1));
5962  __ add(dest, Immediate(1));
5963  } else {
5964  __ mov_w(scratch, Operand(src, 0));
5965  __ mov_w(Operand(dest, 0), scratch);
5966  __ add(src, Immediate(2));
5967  __ add(dest, Immediate(2));
5968  }
5969  __ sub(count, Immediate(1));
5970  __ j(not_zero, &loop);
5971 }
5972 
5973 
5974 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5975  Register dest,
5976  Register src,
5977  Register count,
5978  Register scratch,
5979  bool ascii) {
5980  // Copy characters using rep movs of doublewords.
5981  // The destination is aligned on a 4 byte boundary because we are
5982  // copying to the beginning of a newly allocated string.
5983  ASSERT(dest.is(edi)); // rep movs destination
5984  ASSERT(src.is(esi)); // rep movs source
5985  ASSERT(count.is(ecx)); // rep movs count
5986  ASSERT(!scratch.is(dest));
5987  ASSERT(!scratch.is(src));
5988  ASSERT(!scratch.is(count));
5989 
5990  // Nothing to do for zero characters.
5991  Label done;
5992  __ test(count, count);
5993  __ j(zero, &done);
5994 
5995  // Make count the number of bytes to copy.
5996  if (!ascii) {
5997  __ shl(count, 1);
5998  }
5999 
6000  // Don't enter the rep movs if there are less than 4 bytes to copy.
6001  Label last_bytes;
6002  __ test(count, Immediate(~3));
6003  __ j(zero, &last_bytes, Label::kNear);
6004 
6005  // Copy from edi to esi using rep movs instruction.
6006  __ mov(scratch, count);
6007  __ sar(count, 2); // Number of doublewords to copy.
6008  __ cld();
6009  __ rep_movs();
6010 
6011  // Find number of bytes left.
6012  __ mov(count, scratch);
6013  __ and_(count, 3);
6014 
6015  // Check if there are more bytes to copy.
6016  __ bind(&last_bytes);
6017  __ test(count, count);
6018  __ j(zero, &done);
6019 
6020  // Copy remaining characters.
6021  Label loop;
6022  __ bind(&loop);
6023  __ mov_b(scratch, Operand(src, 0));
6024  __ mov_b(Operand(dest, 0), scratch);
6025  __ add(src, Immediate(1));
6026  __ add(dest, Immediate(1));
6027  __ sub(count, Immediate(1));
6028  __ j(not_zero, &loop);
6029 
6030  __ bind(&done);
6031 }
6032 
6033 
6034 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
6035  Register c1,
6036  Register c2,
6037  Register scratch1,
6038  Register scratch2,
6039  Register scratch3,
6040  Label* not_probed,
6041  Label* not_found) {
6042  // Register scratch3 is the general scratch register in this function.
6043  Register scratch = scratch3;
6044 
6045  // Make sure that both characters are not digits as such strings has a
6046  // different hash algorithm. Don't try to look for these in the symbol table.
6047  Label not_array_index;
6048  __ mov(scratch, c1);
6049  __ sub(scratch, Immediate(static_cast<int>('0')));
6050  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
6051  __ j(above, &not_array_index, Label::kNear);
6052  __ mov(scratch, c2);
6053  __ sub(scratch, Immediate(static_cast<int>('0')));
6054  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
6055  __ j(below_equal, not_probed);
6056 
6057  __ bind(&not_array_index);
6058  // Calculate the two character string hash.
6059  Register hash = scratch1;
6060  GenerateHashInit(masm, hash, c1, scratch);
6061  GenerateHashAddCharacter(masm, hash, c2, scratch);
6062  GenerateHashGetHash(masm, hash, scratch);
6063 
6064  // Collect the two characters in a register.
6065  Register chars = c1;
6066  __ shl(c2, kBitsPerByte);
6067  __ or_(chars, c2);
6068 
6069  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
6070  // hash: hash of two character string.
6071 
6072  // Load the symbol table.
6073  Register symbol_table = c2;
6074  ExternalReference roots_array_start =
6075  ExternalReference::roots_array_start(masm->isolate());
6076  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
6077  __ mov(symbol_table,
6078  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
6079 
6080  // Calculate capacity mask from the symbol table capacity.
6081  Register mask = scratch2;
6082  __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
6083  __ SmiUntag(mask);
6084  __ sub(mask, Immediate(1));
6085 
6086  // Registers
6087  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
6088  // hash: hash of two character string
6089  // symbol_table: symbol table
6090  // mask: capacity mask
6091  // scratch: -
6092 
6093  // Perform a number of probes in the symbol table.
6094  static const int kProbes = 4;
6095  Label found_in_symbol_table;
6096  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
6097  Register candidate = scratch; // Scratch register contains candidate.
6098  for (int i = 0; i < kProbes; i++) {
6099  // Calculate entry in symbol table.
6100  __ mov(scratch, hash);
6101  if (i > 0) {
6102  __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
6103  }
6104  __ and_(scratch, mask);
6105 
6106  // Load the entry from the symbol table.
6108  __ mov(candidate,
6109  FieldOperand(symbol_table,
6110  scratch,
6113 
6114  // If entry is undefined no string with this hash can be found.
6115  Factory* factory = masm->isolate()->factory();
6116  __ cmp(candidate, factory->undefined_value());
6117  __ j(equal, not_found);
6118  __ cmp(candidate, factory->the_hole_value());
6119  __ j(equal, &next_probe[i]);
6120 
6121  // If length is not 2 the string is not a candidate.
6122  __ cmp(FieldOperand(candidate, String::kLengthOffset),
6123  Immediate(Smi::FromInt(2)));
6124  __ j(not_equal, &next_probe[i]);
6125 
6126  // As we are out of registers save the mask on the stack and use that
6127  // register as a temporary.
6128  __ push(mask);
6129  Register temp = mask;
6130 
6131  // Check that the candidate is a non-external ASCII string.
6132  __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
6133  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
6134  __ JumpIfInstanceTypeIsNotSequentialAscii(
6135  temp, temp, &next_probe_pop_mask[i]);
6136 
6137  // Check if the two characters match.
6138  __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
6139  __ and_(temp, 0x0000ffff);
6140  __ cmp(chars, temp);
6141  __ j(equal, &found_in_symbol_table);
6142  __ bind(&next_probe_pop_mask[i]);
6143  __ pop(mask);
6144  __ bind(&next_probe[i]);
6145  }
6146 
6147  // No matching 2 character string found by probing.
6148  __ jmp(not_found);
6149 
6150  // Scratch register contains result when we fall through to here.
6151  Register result = candidate;
6152  __ bind(&found_in_symbol_table);
6153  __ pop(mask); // Pop saved mask from the stack.
6154  if (!result.is(eax)) {
6155  __ mov(eax, result);
6156  }
6157 }
6158 
6159 
6160 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6161  Register hash,
6162  Register character,
6163  Register scratch) {
6164  // hash = (seed + character) + ((seed + character) << 10);
6165  if (Serializer::enabled()) {
6166  ExternalReference roots_array_start =
6167  ExternalReference::roots_array_start(masm->isolate());
6168  __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
6169  __ mov(scratch, Operand::StaticArray(scratch,
6171  roots_array_start));
6172  __ SmiUntag(scratch);
6173  __ add(scratch, character);
6174  __ mov(hash, scratch);
6175  __ shl(scratch, 10);
6176  __ add(hash, scratch);
6177  } else {
6178  int32_t seed = masm->isolate()->heap()->HashSeed();
6179  __ lea(scratch, Operand(character, seed));
6180  __ shl(scratch, 10);
6181  __ lea(hash, Operand(scratch, character, times_1, seed));
6182  }
6183  // hash ^= hash >> 6;
6184  __ mov(scratch, hash);
6185  __ shr(scratch, 6);
6186  __ xor_(hash, scratch);
6187 }
6188 
6189 
6190 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6191  Register hash,
6192  Register character,
6193  Register scratch) {
6194  // hash += character;
6195  __ add(hash, character);
6196  // hash += hash << 10;
6197  __ mov(scratch, hash);
6198  __ shl(scratch, 10);
6199  __ add(hash, scratch);
6200  // hash ^= hash >> 6;
6201  __ mov(scratch, hash);
6202  __ shr(scratch, 6);
6203  __ xor_(hash, scratch);
6204 }
6205 
6206 
6207 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6208  Register hash,
6209  Register scratch) {
6210  // hash += hash << 3;
6211  __ mov(scratch, hash);
6212  __ shl(scratch, 3);
6213  __ add(hash, scratch);
6214  // hash ^= hash >> 11;
6215  __ mov(scratch, hash);
6216  __ shr(scratch, 11);
6217  __ xor_(hash, scratch);
6218  // hash += hash << 15;
6219  __ mov(scratch, hash);
6220  __ shl(scratch, 15);
6221  __ add(hash, scratch);
6222 
6223  __ and_(hash, String::kHashBitMask);
6224 
6225  // if (hash == 0) hash = 27;
6226  Label hash_not_zero;
6227  __ j(not_zero, &hash_not_zero, Label::kNear);
6228  __ mov(hash, Immediate(StringHasher::kZeroHash));
6229  __ bind(&hash_not_zero);
6230 }
6231 
6232 
6233 void SubStringStub::Generate(MacroAssembler* masm) {
6234  Label runtime;
6235 
6236  // Stack frame on entry.
6237  // esp[0]: return address
6238  // esp[4]: to
6239  // esp[8]: from
6240  // esp[12]: string
6241 
6242  // Make sure first argument is a string.
6243  __ mov(eax, Operand(esp, 3 * kPointerSize));
6244  STATIC_ASSERT(kSmiTag == 0);
6245  __ JumpIfSmi(eax, &runtime);
6246  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
6247  __ j(NegateCondition(is_string), &runtime);
6248 
6249  // eax: string
6250  // ebx: instance type
6251 
6252  // Calculate length of sub string using the smi values.
6253  __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
6254  __ JumpIfNotSmi(ecx, &runtime);
6255  __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
6256  __ JumpIfNotSmi(edx, &runtime);
6257  __ sub(ecx, edx);
6259  Label not_original_string;
6260  // Shorter than original string's length: an actual substring.
6261  __ j(below, &not_original_string, Label::kNear);
6262  // Longer than original string's length or negative: unsafe arguments.
6263  __ j(above, &runtime);
6264  // Return original string.
6265  Counters* counters = masm->isolate()->counters();
6266  __ IncrementCounter(counters->sub_string_native(), 1);
6267  __ ret(3 * kPointerSize);
6268  __ bind(&not_original_string);
6269 
6270  // eax: string
6271  // ebx: instance type
6272  // ecx: sub string length (smi)
6273  // edx: from index (smi)
6274  // Deal with different string types: update the index if necessary
6275  // and put the underlying string into edi.
6276  Label underlying_unpacked, sliced_string, seq_or_external_string;
6277  // If the string is not indirect, it can only be sequential or external.
6280  __ test(ebx, Immediate(kIsIndirectStringMask));
6281  __ j(zero, &seq_or_external_string, Label::kNear);
6282 
6283  Factory* factory = masm->isolate()->factory();
6284  __ test(ebx, Immediate(kSlicedNotConsMask));
6285  __ j(not_zero, &sliced_string, Label::kNear);
6286  // Cons string. Check whether it is flat, then fetch first part.
6287  // Flat cons strings have an empty second part.
6289  factory->empty_string());
6290  __ j(not_equal, &runtime);
6292  // Update instance type.
6295  __ jmp(&underlying_unpacked, Label::kNear);
6296 
6297  __ bind(&sliced_string);
6298  // Sliced string. Fetch parent and adjust start index by offset.
6301  // Update instance type.
6304  __ jmp(&underlying_unpacked, Label::kNear);
6305 
6306  __ bind(&seq_or_external_string);
6307  // Sequential or external string. Just move string to the expected register.
6308  __ mov(edi, eax);
6309 
6310  __ bind(&underlying_unpacked);
6311 
6312  if (FLAG_string_slices) {
6313  Label copy_routine;
6314  // edi: underlying subject string
6315  // ebx: instance type of underlying subject string
6316  // edx: adjusted start index (smi)
6317  // ecx: length (smi)
6318  __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
6319  // Short slice. Copy instead of slicing.
6320  __ j(less, &copy_routine);
6321  // Allocate new sliced string. At this point we do not reload the instance
6322  // type including the string encoding because we simply rely on the info
6323  // provided by the original string. It does not matter if the original
6324  // string's encoding is wrong because we always have to recheck encoding of
6325  // the newly created string's parent anyways due to externalized strings.
6326  Label two_byte_slice, set_slice_header;
6329  __ test(ebx, Immediate(kStringEncodingMask));
6330  __ j(zero, &two_byte_slice, Label::kNear);
6331  __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
6332  __ jmp(&set_slice_header, Label::kNear);
6333  __ bind(&two_byte_slice);
6334  __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
6335  __ bind(&set_slice_header);
6338  Immediate(String::kEmptyHashField));
6341  __ IncrementCounter(counters->sub_string_native(), 1);
6342  __ ret(3 * kPointerSize);
6343 
6344  __ bind(&copy_routine);
6345  }
6346 
6347  // edi: underlying subject string
6348  // ebx: instance type of underlying subject string
6349  // edx: adjusted start index (smi)
6350  // ecx: length (smi)
6351  // The subject string can only be external or sequential string of either
6352  // encoding at this point.
6353  Label two_byte_sequential, runtime_drop_two, sequential_string;
6356  __ test_b(ebx, kExternalStringTag);
6357  __ j(zero, &sequential_string);
6358 
6359  // Handle external string.
6360  // Rule out short external strings.
6362  __ test_b(ebx, kShortExternalStringMask);
6363  __ j(not_zero, &runtime);
6365  // Move the pointer so that offset-wise, it looks like a sequential string.
6368 
6369  __ bind(&sequential_string);
6370  // Stash away (adjusted) index and (underlying) string.
6371  __ push(edx);
6372  __ push(edi);
6373  __ SmiUntag(ecx);
6375  __ test_b(ebx, kStringEncodingMask);
6376  __ j(zero, &two_byte_sequential);
6377 
6378  // Sequential ASCII string. Allocate the result.
6379  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6380 
6381  // eax: result string
6382  // ecx: result string length
6383  __ mov(edx, esi); // esi used by following code.
6384  // Locate first character of result.
6385  __ mov(edi, eax);
6386  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6387  // Load string argument and locate character of sub string start.
6388  __ pop(esi);
6389  __ pop(ebx);
6390  __ SmiUntag(ebx);
6392 
6393  // eax: result string
6394  // ecx: result length
6395  // edx: original value of esi
6396  // edi: first character of result
6397  // esi: character of sub string start
6399  __ mov(esi, edx); // Restore esi.
6400  __ IncrementCounter(counters->sub_string_native(), 1);
6401  __ ret(3 * kPointerSize);
6402 
6403  __ bind(&two_byte_sequential);
6404  // Sequential two-byte string. Allocate the result.
6405  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6406 
6407  // eax: result string
6408  // ecx: result string length
6409  __ mov(edx, esi); // esi used by following code.
6410  // Locate first character of result.
6411  __ mov(edi, eax);
6412  __ add(edi,
6414  // Load string argument and locate character of sub string start.
6415  __ pop(esi);
6416  __ pop(ebx);
6417  // As from is a smi it is 2 times the value which matches the size of a two
6418  // byte character.
6419  STATIC_ASSERT(kSmiTag == 0);
6422 
6423  // eax: result string
6424  // ecx: result length
6425  // edx: original value of esi
6426  // edi: first character of result
6427  // esi: character of sub string start
6429  __ mov(esi, edx); // Restore esi.
6430  __ IncrementCounter(counters->sub_string_native(), 1);
6431  __ ret(3 * kPointerSize);
6432 
6433  // Drop pushed values on the stack before tail call.
6434  __ bind(&runtime_drop_two);
6435  __ Drop(2);
6436 
6437  // Just jump to runtime to create the sub string.
6438  __ bind(&runtime);
6439  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6440 }
6441 
6442 
6443 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6444  Register left,
6445  Register right,
6446  Register scratch1,
6447  Register scratch2) {
6448  Register length = scratch1;
6449 
6450  // Compare lengths.
6451  Label strings_not_equal, check_zero_length;
6452  __ mov(length, FieldOperand(left, String::kLengthOffset));
6453  __ cmp(length, FieldOperand(right, String::kLengthOffset));
6454  __ j(equal, &check_zero_length, Label::kNear);
6455  __ bind(&strings_not_equal);
6456  __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
6457  __ ret(0);
6458 
6459  // Check if the length is zero.
6460  Label compare_chars;
6461  __ bind(&check_zero_length);
6462  STATIC_ASSERT(kSmiTag == 0);
6463  __ test(length, length);
6464  __ j(not_zero, &compare_chars, Label::kNear);
6465  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6466  __ ret(0);
6467 
6468  // Compare characters.
6469  __ bind(&compare_chars);
6470  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
6471  &strings_not_equal, Label::kNear);
6472 
6473  // Characters are equal.
6474  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6475  __ ret(0);
6476 }
6477 
6478 
6479 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6480  Register left,
6481  Register right,
6482  Register scratch1,
6483  Register scratch2,
6484  Register scratch3) {
6485  Counters* counters = masm->isolate()->counters();
6486  __ IncrementCounter(counters->string_compare_native(), 1);
6487 
6488  // Find minimum length.
6489  Label left_shorter;
6490  __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
6491  __ mov(scratch3, scratch1);
6492  __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
6493 
6494  Register length_delta = scratch3;
6495 
6496  __ j(less_equal, &left_shorter, Label::kNear);
6497  // Right string is shorter. Change scratch1 to be length of right string.
6498  __ sub(scratch1, length_delta);
6499  __ bind(&left_shorter);
6500 
6501  Register min_length = scratch1;
6502 
6503  // If either length is zero, just compare lengths.
6504  Label compare_lengths;
6505  __ test(min_length, min_length);
6506  __ j(zero, &compare_lengths, Label::kNear);
6507 
6508  // Compare characters.
6509  Label result_not_equal;
6510  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
6511  &result_not_equal, Label::kNear);
6512 
6513  // Compare lengths - strings up to min-length are equal.
6514  __ bind(&compare_lengths);
6515  __ test(length_delta, length_delta);
6516  __ j(not_zero, &result_not_equal, Label::kNear);
6517 
6518  // Result is EQUAL.
6519  STATIC_ASSERT(EQUAL == 0);
6520  STATIC_ASSERT(kSmiTag == 0);
6521  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6522  __ ret(0);
6523 
6524  Label result_greater;
6525  __ bind(&result_not_equal);
6526  __ j(greater, &result_greater, Label::kNear);
6527 
6528  // Result is LESS.
6529  __ Set(eax, Immediate(Smi::FromInt(LESS)));
6530  __ ret(0);
6531 
6532  // Result is GREATER.
6533  __ bind(&result_greater);
6534  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
6535  __ ret(0);
6536 }
6537 
6538 
6539 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6540  MacroAssembler* masm,
6541  Register left,
6542  Register right,
6543  Register length,
6544  Register scratch,
6545  Label* chars_not_equal,
6546  Label::Distance chars_not_equal_near) {
6547  // Change index to run from -length to -1 by adding length to string
6548  // start. This means that loop ends when index reaches zero, which
6549  // doesn't need an additional compare.
6550  __ SmiUntag(length);
6551  __ lea(left,
6553  __ lea(right,
6555  __ neg(length);
6556  Register index = length; // index = -length;
6557 
6558  // Compare loop.
6559  Label loop;
6560  __ bind(&loop);
6561  __ mov_b(scratch, Operand(left, index, times_1, 0));
6562  __ cmpb(scratch, Operand(right, index, times_1, 0));
6563  __ j(not_equal, chars_not_equal, chars_not_equal_near);
6564  __ inc(index);
6565  __ j(not_zero, &loop);
6566 }
6567 
6568 
6569 void StringCompareStub::Generate(MacroAssembler* masm) {
6570  Label runtime;
6571 
6572  // Stack frame on entry.
6573  // esp[0]: return address
6574  // esp[4]: right string
6575  // esp[8]: left string
6576 
6577  __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
6578  __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
6579 
6580  Label not_same;
6581  __ cmp(edx, eax);
6582  __ j(not_equal, &not_same, Label::kNear);
6583  STATIC_ASSERT(EQUAL == 0);
6584  STATIC_ASSERT(kSmiTag == 0);
6585  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6586  __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
6587  __ ret(2 * kPointerSize);
6588 
6589  __ bind(&not_same);
6590 
6591  // Check that both objects are sequential ASCII strings.
6592  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6593 
6594  // Compare flat ASCII strings.
6595  // Drop arguments from the stack.
6596  __ pop(ecx);
6597  __ add(esp, Immediate(2 * kPointerSize));
6598  __ push(ecx);
6600 
6601  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6602  // tagged as a small integer.
6603  __ bind(&runtime);
6604  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6605 }
6606 
6607 
6608 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6609  ASSERT(state_ == CompareIC::SMIS);
6610  Label miss;
6611  __ mov(ecx, edx);
6612  __ or_(ecx, eax);
6613  __ JumpIfNotSmi(ecx, &miss, Label::kNear);
6614 
6615  if (GetCondition() == equal) {
6616  // For equality we do not care about the sign of the result.
6617  __ sub(eax, edx);
6618  } else {
6619  Label done;
6620  __ sub(edx, eax);
6621  __ j(no_overflow, &done, Label::kNear);
6622  // Correct sign of result in case of overflow.
6623  __ not_(edx);
6624  __ bind(&done);
6625  __ mov(eax, edx);
6626  }
6627  __ ret(0);
6628 
6629  __ bind(&miss);
6630  GenerateMiss(masm);
6631 }
6632 
6633 
6634 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6635  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6636 
6637  Label generic_stub;
6638  Label unordered, maybe_undefined1, maybe_undefined2;
6639  Label miss;
6640  __ mov(ecx, edx);
6641  __ and_(ecx, eax);
6642  __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6643 
6644  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6645  __ j(not_equal, &maybe_undefined1, Label::kNear);
6646  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6647  __ j(not_equal, &maybe_undefined2, Label::kNear);
6648 
6649  // Inlining the double comparison and falling back to the general compare
6650  // stub if NaN is involved or SS2 or CMOV is unsupported.
6652  CpuFeatures::Scope scope1(SSE2);
6653  CpuFeatures::Scope scope2(CMOV);
6654 
6655  // Load left and right operand
6658 
6659  // Compare operands
6660  __ ucomisd(xmm0, xmm1);
6661 
6662  // Don't base result on EFLAGS when a NaN is involved.
6663  __ j(parity_even, &unordered, Label::kNear);
6664 
6665  // Return a result of -1, 0, or 1, based on EFLAGS.
6666  // Performing mov, because xor would destroy the flag register.
6667  __ mov(eax, 0); // equal
6668  __ mov(ecx, Immediate(Smi::FromInt(1)));
6669  __ cmov(above, eax, ecx);
6670  __ mov(ecx, Immediate(Smi::FromInt(-1)));
6671  __ cmov(below, eax, ecx);
6672  __ ret(0);
6673  }
6674 
6675  __ bind(&unordered);
6676  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6677  __ bind(&generic_stub);
6678  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6679 
6680  __ bind(&maybe_undefined1);
6682  __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
6683  __ j(not_equal, &miss);
6684  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6685  __ j(not_equal, &maybe_undefined2, Label::kNear);
6686  __ jmp(&unordered);
6687  }
6688 
6689  __ bind(&maybe_undefined2);
6691  __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
6692  __ j(equal, &unordered);
6693  }
6694 
6695  __ bind(&miss);
6696  GenerateMiss(masm);
6697 }
6698 
6699 
6700 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6701  ASSERT(state_ == CompareIC::SYMBOLS);
6702  ASSERT(GetCondition() == equal);
6703 
6704  // Registers containing left and right operands respectively.
6705  Register left = edx;
6706  Register right = eax;
6707  Register tmp1 = ecx;
6708  Register tmp2 = ebx;
6709 
6710  // Check that both operands are heap objects.
6711  Label miss;
6712  __ mov(tmp1, left);
6713  STATIC_ASSERT(kSmiTag == 0);
6714  __ and_(tmp1, right);
6715  __ JumpIfSmi(tmp1, &miss, Label::kNear);
6716 
6717  // Check that both operands are symbols.
6718  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6719  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6720  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6721  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6722  STATIC_ASSERT(kSymbolTag != 0);
6723  __ and_(tmp1, tmp2);
6724  __ test(tmp1, Immediate(kIsSymbolMask));
6725  __ j(zero, &miss, Label::kNear);
6726 
6727  // Symbols are compared by identity.
6728  Label done;
6729  __ cmp(left, right);
6730  // Make sure eax is non-zero. At this point input operands are
6731  // guaranteed to be non-zero.
6732  ASSERT(right.is(eax));
6733  __ j(not_equal, &done, Label::kNear);
6734  STATIC_ASSERT(EQUAL == 0);
6735  STATIC_ASSERT(kSmiTag == 0);
6736  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6737  __ bind(&done);
6738  __ ret(0);
6739 
6740  __ bind(&miss);
6741  GenerateMiss(masm);
6742 }
6743 
6744 
6745 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6746  ASSERT(state_ == CompareIC::STRINGS);
6747  Label miss;
6748 
6749  bool equality = Token::IsEqualityOp(op_);
6750 
6751  // Registers containing left and right operands respectively.
6752  Register left = edx;
6753  Register right = eax;
6754  Register tmp1 = ecx;
6755  Register tmp2 = ebx;
6756  Register tmp3 = edi;
6757 
6758  // Check that both operands are heap objects.
6759  __ mov(tmp1, left);
6760  STATIC_ASSERT(kSmiTag == 0);
6761  __ and_(tmp1, right);
6762  __ JumpIfSmi(tmp1, &miss);
6763 
6764  // Check that both operands are strings. This leaves the instance
6765  // types loaded in tmp1 and tmp2.
6766  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6767  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6768  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6769  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6770  __ mov(tmp3, tmp1);
6772  __ or_(tmp3, tmp2);
6773  __ test(tmp3, Immediate(kIsNotStringMask));
6774  __ j(not_zero, &miss);
6775 
6776  // Fast check for identical strings.
6777  Label not_same;
6778  __ cmp(left, right);
6779  __ j(not_equal, &not_same, Label::kNear);
6780  STATIC_ASSERT(EQUAL == 0);
6781  STATIC_ASSERT(kSmiTag == 0);
6782  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6783  __ ret(0);
6784 
6785  // Handle not identical strings.
6786  __ bind(&not_same);
6787 
6788  // Check that both strings are symbols. If they are, we're done
6789  // because we already know they are not identical. But in the case of
6790  // non-equality compare, we still need to determine the order.
6791  if (equality) {
6792  Label do_compare;
6793  STATIC_ASSERT(kSymbolTag != 0);
6794  __ and_(tmp1, tmp2);
6795  __ test(tmp1, Immediate(kIsSymbolMask));
6796  __ j(zero, &do_compare, Label::kNear);
6797  // Make sure eax is non-zero. At this point input operands are
6798  // guaranteed to be non-zero.
6799  ASSERT(right.is(eax));
6800  __ ret(0);
6801  __ bind(&do_compare);
6802  }
6803 
6804  // Check that both strings are sequential ASCII.
6805  Label runtime;
6806  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
6807 
6808  // Compare flat ASCII strings. Returns when done.
6809  if (equality) {
6811  masm, left, right, tmp1, tmp2);
6812  } else {
6814  masm, left, right, tmp1, tmp2, tmp3);
6815  }
6816 
6817  // Handle more complex cases in runtime.
6818  __ bind(&runtime);
6819  __ pop(tmp1); // Return address.
6820  __ push(left);
6821  __ push(right);
6822  __ push(tmp1);
6823  if (equality) {
6824  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6825  } else {
6826  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6827  }
6828 
6829  __ bind(&miss);
6830  GenerateMiss(masm);
6831 }
6832 
6833 
6834 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6835  ASSERT(state_ == CompareIC::OBJECTS);
6836  Label miss;
6837  __ mov(ecx, edx);
6838  __ and_(ecx, eax);
6839  __ JumpIfSmi(ecx, &miss, Label::kNear);
6840 
6841  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6842  __ j(not_equal, &miss, Label::kNear);
6843  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6844  __ j(not_equal, &miss, Label::kNear);
6845 
6846  ASSERT(GetCondition() == equal);
6847  __ sub(eax, edx);
6848  __ ret(0);
6849 
6850  __ bind(&miss);
6851  GenerateMiss(masm);
6852 }
6853 
6854 
6855 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6856  Label miss;
6857  __ mov(ecx, edx);
6858  __ and_(ecx, eax);
6859  __ JumpIfSmi(ecx, &miss, Label::kNear);
6860 
6863  __ cmp(ecx, known_map_);
6864  __ j(not_equal, &miss, Label::kNear);
6865  __ cmp(ebx, known_map_);
6866  __ j(not_equal, &miss, Label::kNear);
6867 
6868  __ sub(eax, edx);
6869  __ ret(0);
6870 
6871  __ bind(&miss);
6872  GenerateMiss(masm);
6873 }
6874 
6875 
6876 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6877  {
6878  // Call the runtime system in a fresh internal frame.
6879  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6880  masm->isolate());
6881  FrameScope scope(masm, StackFrame::INTERNAL);
6882  __ push(edx); // Preserve edx and eax.
6883  __ push(eax);
6884  __ push(edx); // And also use them as the arguments.
6885  __ push(eax);
6886  __ push(Immediate(Smi::FromInt(op_)));
6887  __ CallExternalReference(miss, 3);
6888  // Compute the entry point of the rewritten stub.
6890  __ pop(eax);
6891  __ pop(edx);
6892  }
6893 
6894  // Do a tail call to the rewritten stub.
6895  __ jmp(edi);
6896 }
6897 
6898 
6899 // Helper function used to check that the dictionary doesn't contain
6900 // the property. This function may return false negatives, so miss_label
6901 // must always call a backup property check that is complete.
6902 // This function is safe to call if the receiver has fast properties.
6903 // Name must be a symbol and receiver must be a heap object.
6904 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6905  Label* miss,
6906  Label* done,
6907  Register properties,
6908  Handle<String> name,
6909  Register r0) {
6910  ASSERT(name->IsSymbol());
6911 
6912  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6913  // not equal to the name and kProbes-th slot is not used (its name is the
6914  // undefined value), it guarantees the hash table doesn't contain the
6915  // property. It's true even if some slots represent deleted properties
6916  // (their names are the hole value).
6917  for (int i = 0; i < kInlinedProbes; i++) {
6918  // Compute the masked index: (hash + i + i * i) & mask.
6919  Register index = r0;
6920  // Capacity is smi 2^n.
6921  __ mov(index, FieldOperand(properties, kCapacityOffset));
6922  __ dec(index);
6923  __ and_(index,
6924  Immediate(Smi::FromInt(name->Hash() +
6925  StringDictionary::GetProbeOffset(i))));
6926 
6927  // Scale the index by multiplying by the entry size.
6929  __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
6930  Register entity_name = r0;
6931  // Having undefined at this place means the name is not contained.
6932  ASSERT_EQ(kSmiTagSize, 1);
6933  __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
6934  kElementsStartOffset - kHeapObjectTag));
6935  __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
6936  __ j(equal, done);
6937 
6938  // Stop if found the property.
6939  __ cmp(entity_name, Handle<String>(name));
6940  __ j(equal, miss);
6941 
6942  Label the_hole;
6943  // Check for the hole and skip.
6944  __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
6945  __ j(equal, &the_hole, Label::kNear);
6946 
6947  // Check if the entry name is not a symbol.
6948  __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
6949  __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
6950  kIsSymbolMask);
6951  __ j(zero, miss);
6952  __ bind(&the_hole);
6953  }
6954 
6955  StringDictionaryLookupStub stub(properties,
6956  r0,
6957  r0,
6959  __ push(Immediate(Handle<Object>(name)));
6960  __ push(Immediate(name->Hash()));
6961  __ CallStub(&stub);
6962  __ test(r0, r0);
6963  __ j(not_zero, miss);
6964  __ jmp(done);
6965 }
6966 
6967 
6968 // Probe the string dictionary in the |elements| register. Jump to the
6969 // |done| label if a property with the given name is found leaving the
6970 // index into the dictionary in |r0|. Jump to the |miss| label
6971 // otherwise.
6972 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6973  Label* miss,
6974  Label* done,
6975  Register elements,
6976  Register name,
6977  Register r0,
6978  Register r1) {
6979  ASSERT(!elements.is(r0));
6980  ASSERT(!elements.is(r1));
6981  ASSERT(!name.is(r0));
6982  ASSERT(!name.is(r1));
6983 
6984  __ AssertString(name);
6985 
6986  __ mov(r1, FieldOperand(elements, kCapacityOffset));
6987  __ shr(r1, kSmiTagSize); // convert smi to int
6988  __ dec(r1);
6989 
6990  // Generate an unrolled loop that performs a few probes before
6991  // giving up. Measurements done on Gmail indicate that 2 probes
6992  // cover ~93% of loads from dictionaries.
6993  for (int i = 0; i < kInlinedProbes; i++) {
6994  // Compute the masked index: (hash + i + i * i) & mask.
6995  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6996  __ shr(r0, String::kHashShift);
6997  if (i > 0) {
6998  __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
6999  }
7000  __ and_(r0, r1);
7001 
7002  // Scale the index by multiplying by the entry size.
7004  __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
7005 
7006  // Check if the key is identical to the name.
7007  __ cmp(name, Operand(elements,
7008  r0,
7009  times_4,
7010  kElementsStartOffset - kHeapObjectTag));
7011  __ j(equal, done);
7012  }
7013 
7014  StringDictionaryLookupStub stub(elements,
7015  r1,
7016  r0,
7017  POSITIVE_LOOKUP);
7018  __ push(name);
7019  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
7020  __ shr(r0, String::kHashShift);
7021  __ push(r0);
7022  __ CallStub(&stub);
7023 
7024  __ test(r1, r1);
7025  __ j(zero, miss);
7026  __ jmp(done);
7027 }
7028 
7029 
7030 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7031  // This stub overrides SometimesSetsUpAFrame() to return false. That means
7032  // we cannot call anything that could cause a GC from this stub.
7033  // Stack frame on entry:
7034  // esp[0 * kPointerSize]: return address.
7035  // esp[1 * kPointerSize]: key's hash.
7036  // esp[2 * kPointerSize]: key.
7037  // Registers:
7038  // dictionary_: StringDictionary to probe.
7039  // result_: used as scratch.
7040  // index_: will hold an index of entry if lookup is successful.
7041  // might alias with result_.
7042  // Returns:
7043  // result_ is zero if lookup failed, non zero otherwise.
7044 
7045  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7046 
7047  Register scratch = result_;
7048 
7049  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
7050  __ dec(scratch);
7051  __ SmiUntag(scratch);
7052  __ push(scratch);
7053 
7054  // If names of slots in range from 1 to kProbes - 1 for the hash value are
7055  // not equal to the name and kProbes-th slot is not used (its name is the
7056  // undefined value), it guarantees the hash table doesn't contain the
7057  // property. It's true even if some slots represent deleted properties
7058  // (their names are the null value).
7059  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7060  // Compute the masked index: (hash + i + i * i) & mask.
7061  __ mov(scratch, Operand(esp, 2 * kPointerSize));
7062  if (i > 0) {
7063  __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
7064  }
7065  __ and_(scratch, Operand(esp, 0));
7066 
7067  // Scale the index by multiplying by the entry size.
7069  __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
7070 
7071  // Having undefined at this place means the name is not contained.
7072  ASSERT_EQ(kSmiTagSize, 1);
7073  __ mov(scratch, Operand(dictionary_,
7074  index_,
7076  kElementsStartOffset - kHeapObjectTag));
7077  __ cmp(scratch, masm->isolate()->factory()->undefined_value());
7078  __ j(equal, &not_in_dictionary);
7079 
7080  // Stop if found the property.
7081  __ cmp(scratch, Operand(esp, 3 * kPointerSize));
7082  __ j(equal, &in_dictionary);
7083 
7084  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7085  // If we hit a non symbol key during negative lookup
7086  // we have to bailout as this key might be equal to the
7087  // key we are looking for.
7088 
7089  // Check if the entry name is not a symbol.
7090  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
7091  __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
7092  kIsSymbolMask);
7093  __ j(zero, &maybe_in_dictionary);
7094  }
7095  }
7096 
7097  __ bind(&maybe_in_dictionary);
7098  // If we are doing negative lookup then probing failure should be
7099  // treated as a lookup success. For positive lookup probing failure
7100  // should be treated as lookup failure.
7101  if (mode_ == POSITIVE_LOOKUP) {
7102  __ mov(result_, Immediate(0));
7103  __ Drop(1);
7104  __ ret(2 * kPointerSize);
7105  }
7106 
7107  __ bind(&in_dictionary);
7108  __ mov(result_, Immediate(1));
7109  __ Drop(1);
7110  __ ret(2 * kPointerSize);
7111 
7112  __ bind(&not_in_dictionary);
7113  __ mov(result_, Immediate(0));
7114  __ Drop(1);
7115  __ ret(2 * kPointerSize);
7116 }
7117 
7118 
7119 struct AheadOfTimeWriteBarrierStubList {
7120  Register object, value, address;
7121  RememberedSetAction action;
7122 };
7123 
7124 
7125 #define REG(Name) { kRegister_ ## Name ## _Code }
7126 
7127 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7128  // Used in RegExpExecStub.
7129  { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
7130  // Used in CompileArrayPushCall.
7131  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7132  { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
7133  // Used in CompileStoreGlobal and CallFunctionStub.
7134  { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
7135  // Used in StoreStubCompiler::CompileStoreField and
7136  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7137  { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
7138  // GenerateStoreField calls the stub with two different permutations of
7139  // registers. This is the second.
7140  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7141  // StoreIC::GenerateNormal via GenerateDictionaryStore
7142  { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
7143  // KeyedStoreIC::GenerateGeneric.
7144  { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
7145  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7146  { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
7147  { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
7148  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7149  // and ElementsTransitionGenerator::GenerateSmiToDouble
7150  // and ElementsTransitionGenerator::GenerateDoubleToObject
7151  { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
7152  { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
7153  // ElementsTransitionGenerator::GenerateDoubleToObject
7154  { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
7155  { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
7156  // StoreArrayLiteralElementStub::Generate
7157  { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
7158  // FastNewClosureStub
7159  { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
7160  // Null termination.
7161  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7162 };
7163 
7164 #undef REG
7165 
7167  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7168  !entry->object.is(no_reg);
7169  entry++) {
7170  if (object_.is(entry->object) &&
7171  value_.is(entry->value) &&
7172  address_.is(entry->address) &&
7173  remembered_set_action_ == entry->action &&
7174  save_fp_regs_mode_ == kDontSaveFPRegs) {
7175  return true;
7176  }
7177  }
7178  return false;
7179 }
7180 
7181 
7184  stub1.GetCode()->set_is_pregenerated(true);
7185 
7186  CpuFeatures::TryForceFeatureScope scope(SSE2);
7189  stub2.GetCode()->set_is_pregenerated(true);
7190  }
7191 }
7192 
7193 
7195  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7196  !entry->object.is(no_reg);
7197  entry++) {
7198  RecordWriteStub stub(entry->object,
7199  entry->value,
7200  entry->address,
7201  entry->action,
7202  kDontSaveFPRegs);
7203  stub.GetCode()->set_is_pregenerated(true);
7204  }
7205 }
7206 
7207 
7208 bool CodeStub::CanUseFPRegisters() {
7210 }
7211 
7212 
7213 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7214 // the value has just been written into the object, now this stub makes sure
7215 // we keep the GC informed. The word in the object where the value has been
7216 // written is in the address register.
7217 void RecordWriteStub::Generate(MacroAssembler* masm) {
7218  Label skip_to_incremental_noncompacting;
7219  Label skip_to_incremental_compacting;
7220 
7221  // The first two instructions are generated with labels so as to get the
7222  // offset fixed up correctly by the bind(Label*) call. We patch it back and
7223  // forth between a compare instructions (a nop in this position) and the
7224  // real branch when we start and stop incremental heap marking.
7225  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
7226  __ jmp(&skip_to_incremental_compacting, Label::kFar);
7227 
7228  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7229  __ RememberedSetHelper(object_,
7230  address_,
7231  value_,
7232  save_fp_regs_mode_,
7234  } else {
7235  __ ret(0);
7236  }
7237 
7238  __ bind(&skip_to_incremental_noncompacting);
7239  GenerateIncremental(masm, INCREMENTAL);
7240 
7241  __ bind(&skip_to_incremental_compacting);
7242  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7243 
7244  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7245  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7246  masm->set_byte_at(0, kTwoByteNopInstruction);
7247  masm->set_byte_at(2, kFiveByteNopInstruction);
7248 }
7249 
7250 
7251 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7252  regs_.Save(masm);
7253 
7254  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7255  Label dont_need_remembered_set;
7256 
7257  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7258  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7259  regs_.scratch0(),
7260  &dont_need_remembered_set);
7261 
7262  __ CheckPageFlag(regs_.object(),
7263  regs_.scratch0(),
7265  not_zero,
7266  &dont_need_remembered_set);
7267 
7268  // First notify the incremental marker if necessary, then update the
7269  // remembered set.
7270  CheckNeedsToInformIncrementalMarker(
7271  masm,
7272  kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
7273  mode);
7274  InformIncrementalMarker(masm, mode);
7275  regs_.Restore(masm);
7276  __ RememberedSetHelper(object_,
7277  address_,
7278  value_,
7279  save_fp_regs_mode_,
7281 
7282  __ bind(&dont_need_remembered_set);
7283  }
7284 
7285  CheckNeedsToInformIncrementalMarker(
7286  masm,
7287  kReturnOnNoNeedToInformIncrementalMarker,
7288  mode);
7289  InformIncrementalMarker(masm, mode);
7290  regs_.Restore(masm);
7291  __ ret(0);
7292 }
7293 
7294 
7295 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7296  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7297  int argument_count = 3;
7298  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7299  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
7300  if (mode == INCREMENTAL_COMPACTION) {
7301  __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
7302  } else {
7303  ASSERT(mode == INCREMENTAL);
7304  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7305  __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
7306  }
7307  __ mov(Operand(esp, 2 * kPointerSize),
7308  Immediate(ExternalReference::isolate_address()));
7309 
7310  AllowExternalCallThatCantCauseGC scope(masm);
7311  if (mode == INCREMENTAL_COMPACTION) {
7312  __ CallCFunction(
7313  ExternalReference::incremental_evacuation_record_write_function(
7314  masm->isolate()),
7315  argument_count);
7316  } else {
7317  ASSERT(mode == INCREMENTAL);
7318  __ CallCFunction(
7319  ExternalReference::incremental_marking_record_write_function(
7320  masm->isolate()),
7321  argument_count);
7322  }
7323  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7324 }
7325 
7326 
7327 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7328  MacroAssembler* masm,
7329  OnNoNeedToInformIncrementalMarker on_no_need,
7330  Mode mode) {
7331  Label object_is_black, need_incremental, need_incremental_pop_object;
7332 
7333  __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
7334  __ and_(regs_.scratch0(), regs_.object());
7335  __ mov(regs_.scratch1(),
7336  Operand(regs_.scratch0(),
7338  __ sub(regs_.scratch1(), Immediate(1));
7339  __ mov(Operand(regs_.scratch0(),
7341  regs_.scratch1());
7342  __ j(negative, &need_incremental);
7343 
7344  // Let's look at the color of the object: If it is not black we don't have
7345  // to inform the incremental marker.
7346  __ JumpIfBlack(regs_.object(),
7347  regs_.scratch0(),
7348  regs_.scratch1(),
7349  &object_is_black,
7350  Label::kNear);
7351 
7352  regs_.Restore(masm);
7353  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7354  __ RememberedSetHelper(object_,
7355  address_,
7356  value_,
7357  save_fp_regs_mode_,
7359  } else {
7360  __ ret(0);
7361  }
7362 
7363  __ bind(&object_is_black);
7364 
7365  // Get the value from the slot.
7366  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7367 
7368  if (mode == INCREMENTAL_COMPACTION) {
7369  Label ensure_not_white;
7370 
7371  __ CheckPageFlag(regs_.scratch0(), // Contains value.
7372  regs_.scratch1(), // Scratch.
7374  zero,
7375  &ensure_not_white,
7376  Label::kNear);
7377 
7378  __ CheckPageFlag(regs_.object(),
7379  regs_.scratch1(), // Scratch.
7381  not_zero,
7382  &ensure_not_white,
7383  Label::kNear);
7384 
7385  __ jmp(&need_incremental);
7386 
7387  __ bind(&ensure_not_white);
7388  }
7389 
7390  // We need an extra register for this, so we push the object register
7391  // temporarily.
7392  __ push(regs_.object());
7393  __ EnsureNotWhite(regs_.scratch0(), // The value.
7394  regs_.scratch1(), // Scratch.
7395  regs_.object(), // Scratch.
7396  &need_incremental_pop_object,
7397  Label::kNear);
7398  __ pop(regs_.object());
7399 
7400  regs_.Restore(masm);
7401  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7402  __ RememberedSetHelper(object_,
7403  address_,
7404  value_,
7405  save_fp_regs_mode_,
7407  } else {
7408  __ ret(0);
7409  }
7410 
7411  __ bind(&need_incremental_pop_object);
7412  __ pop(regs_.object());
7413 
7414  __ bind(&need_incremental);
7415 
7416  // Fall through when we need to inform the incremental marker.
7417 }
7418 
7419 
7420 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7421  // ----------- S t a t e -------------
7422  // -- eax : element value to store
7423  // -- ebx : array literal
7424  // -- edi : map of array literal
7425  // -- ecx : element index as smi
7426  // -- edx : array literal index in function
7427  // -- esp[0] : return address
7428  // -----------------------------------
7429 
7430  Label element_done;
7431  Label double_elements;
7432  Label smi_element;
7433  Label slow_elements;
7434  Label slow_elements_from_double;
7435  Label fast_elements;
7436 
7437  __ CheckFastElements(edi, &double_elements);
7438 
7439  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
7440  __ JumpIfSmi(eax, &smi_element);
7441  __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
7442 
7443  // Store into the array literal requires a elements transition. Call into
7444  // the runtime.
7445 
7446  __ bind(&slow_elements);
7447  __ pop(edi); // Pop return address and remember to put back later for tail
7448  // call.
7449  __ push(ebx);
7450  __ push(ecx);
7451  __ push(eax);
7454  __ push(edx);
7455  __ push(edi); // Return return address so that tail call returns to right
7456  // place.
7457  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7458 
7459  __ bind(&slow_elements_from_double);
7460  __ pop(edx);
7461  __ jmp(&slow_elements);
7462 
7463  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7464  __ bind(&fast_elements);
7468  __ mov(Operand(ecx, 0), eax);
7469  // Update the write barrier for the array store.
7470  __ RecordWrite(ebx, ecx, eax,
7473  OMIT_SMI_CHECK);
7474  __ ret(0);
7475 
7476  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7477  // and value is Smi.
7478  __ bind(&smi_element);
7482  __ ret(0);
7483 
7484  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
7485  __ bind(&double_elements);
7486 
7487  __ push(edx);
7489  __ StoreNumberToDoubleElements(eax,
7490  edx,
7491  ecx,
7492  edi,
7493  xmm0,
7494  &slow_elements_from_double,
7495  false);
7496  __ pop(edx);
7497  __ ret(0);
7498 }
7499 
7500 
7501 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
7502  if (entry_hook_ != NULL) {
7503  ProfileEntryHookStub stub;
7504  masm->CallStub(&stub);
7505  }
7506 }
7507 
7508 
7509 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
7510  // Ecx is the only volatile register we must save.
7511  __ push(ecx);
7512 
7513  // Calculate and push the original stack pointer.
7514  __ lea(eax, Operand(esp, kPointerSize));
7515  __ push(eax);
7516 
7517  // Calculate and push the function address.
7518  __ mov(eax, Operand(eax, 0));
7519  __ sub(eax, Immediate(Assembler::kCallInstructionLength));
7520  __ push(eax);
7521 
7522  // Call the entry hook.
7523  int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
7524  __ call(Operand(hook_location, RelocInfo::NONE));
7525  __ add(esp, Immediate(2 * kPointerSize));
7526 
7527  // Restore ecx.
7528  __ pop(ecx);
7529  __ ret(0);
7530 }
7531 
7532 #undef __
7533 
7534 } } // namespace v8::internal
7535 
7536 #endif // V8_TARGET_ARCH_IA32
static const int kResourceDataOffset
Definition: objects.h:7747
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:5160
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:4016
static const int kCodeOffset
Definition: objects.h:5796
static const int kEvacuationCandidateMask
Definition: spaces.h:411
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:6182
static const int kMaxAsciiCharCode
Definition: objects.h:7327
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:6183
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:425
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const int kArgumentsObjectSize
Definition: heap.h:895
const XMMRegister xmm4
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:469
const int kFailureTypeTagSize
Definition: objects.h:1081
static const uint32_t kExponentMask
Definition: objects.h:1352
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2241
static Failure * InternalError()
Definition: objects-inl.h:1019
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void PerformGC(Object *result)
Definition: runtime.cc:13279
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:981
void Generate(MacroAssembler *masm)
static const byte kTwoByteNopInstruction
static const int kOptimizedCodeMapOffset
Definition: objects.h:5797
const Register esp
static const int kDataOffset
Definition: objects.h:6624
static const int kGlobalReceiverOffset
Definition: objects.h:6288
static const int kJSRegexpStaticOffsetsVectorSize
Definition: isolate.h:994
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1029
static const int kEmptyHashField
Definition: objects.h:7379
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
Definition: objects.h:1356
int int32_t
Definition: unicode.cc:47
static const intptr_t kPageAlignmentMask
Definition: spaces.h:720
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5339
static bool IsSupported(CpuFeature f)
static Failure * Exception()
Definition: objects-inl.h:1024
void Generate(MacroAssembler *masm)
static bool enabled()
Definition: serialize.h:481
static const int kCallInstructionLength
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:270
const int kPointerSizeLog2
Definition: globals.h:232
static const int kInstanceSizeOffset
Definition: objects.h:5147
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5344
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2306
const uint32_t kStringRepresentationMask
Definition: objects.h:474
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
MemOperand GlobalObjectOperand()
static const int kSize
Definition: objects.h:8355
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:6187
const uint32_t kAsciiDataHintTag
Definition: objects.h:498
const uint32_t kShortExternalStringMask
Definition: objects.h:502
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:191
ProfileEntryHookStub()
Definition: code-stubs.h:1161
const int kIntSize
Definition: globals.h:217
unsigned int seed
Definition: test-strings.cc:18
static const int kZeroHash
Definition: objects.h:7017
const Register edi
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7319
static const int kSize
Definition: objects.h:8333
static const int kLastCaptureCountOffset
Definition: jsregexp.h:189
static const int kFirstOffset
Definition: objects.h:7653
static const int kMinLength
Definition: objects.h:7666
const uint32_t kNotStringTag
Definition: objects.h:457
static const int kParentOffset
Definition: objects.h:7705
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1359
static const int kLiteralsOffset
Definition: objects.h:6188
const Register ebp
#define UNREACHABLE()
Definition: checks.h:50
static const int kArgumentsObjectSizeStrict
Definition: heap.h:898
static void GenerateCopyCharactersREP(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7318
const uint32_t kIsSymbolMask
Definition: objects.h:462
static const int kExponentShift
Definition: objects.h:1357
const intptr_t kFailureTagMask
Definition: v8globals.h:64
const Register eax
static const int kValueOffset
Definition: objects.h:1342
const int kFailureTagSize
Definition: v8globals.h:63
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:218
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6670
static const int kInputOffset
Definition: objects.h:8354
static const size_t kWriteBarrierCounterOffset
Definition: spaces.h:504
static bool IsBitOp(Value op)
Definition: token.h:256
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
void Generate(MacroAssembler *masm)
const int kPointerSize
Definition: globals.h:220
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5177
static void MaybeCallEntryHook(MacroAssembler *masm)
Operand FieldOperand(Register object, int offset)
const Register ecx
const int kHeapObjectTag
Definition: v8.h:4009
const uint32_t kAsciiDataHintMask
Definition: objects.h:497
#define __
static const byte kFiveByteNopInstruction
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2171
static const int kMinLength
Definition: objects.h:7717
const uint32_t kShortExternalStringTag
Definition: objects.h:503
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7517
static const int kNextFunctionLinkOffset
Definition: objects.h:6190
const int kBitsPerByte
Definition: globals.h:237
static int SizeFor(int length)
Definition: objects.h:2434
const Register r0
static const int kElementsOffset
Definition: objects.h:2172
bool IsPowerOf2(T x)
Definition: utils.h:50
const uint32_t kStringTag
Definition: objects.h:456
const uint32_t kQuietNaNHighBitsMask
Definition: v8globals.h:101
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7706
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8332
static int SizeFor(int length)
Definition: objects.h:2353
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
Definition: jsregexp.h:186
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2296
void Generate(MacroAssembler *masm)
#define ISOLATE
Definition: isolate.h:1435
static const int kMapOffset
Definition: objects.h:1261
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:414
const uint32_t kIsNotStringMask
Definition: objects.h:455
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:492
static const int kLengthOffset
Definition: objects.h:2295
const Register ebx
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:7654
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kArgumentsLengthIndex
Definition: heap.h:901
static const int kFirstCaptureOffset
Definition: jsregexp.h:195
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7345
static const uint32_t kSignMask
Definition: objects.h:1351
static const int kLastInputOffset
Definition: jsregexp.h:193
const int kSmiShiftSize
Definition: v8.h:4060
const int kSmiTagSize
Definition: v8.h:4015
static const int kHeaderSize
Definition: objects.h:4549
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6666
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const Register esi
static XMMRegister from_code(int code)
static void GenerateAheadOfTime()
static const uint32_t kMantissaMask
Definition: objects.h:1353
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
Definition: flags.cc:495
static const int kArgumentsCalleeIndex
Definition: heap.h:903
const int kSmiTag
Definition: v8.h:4014
static Operand FixedArrayElementOperand(Register array, Register index_as_smi, int additional_offset=0)
Definition: codegen-ia32.h:63
static const int kIsUndetectable
Definition: objects.h:5171
static const int kHeaderSize
Definition: objects.h:2173
void Generate(MacroAssembler *masm)
static const int kEntryLength
Definition: objects.h:5403
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:62
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
#define FACTORY
Definition: isolate.h:1434
static const int kDataTagOffset
Definition: objects.h:6664
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static const int kPrototypeOffset
Definition: objects.h:5126
static const int kSize
Definition: objects.h:6191
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const int kMaxLength
Definition: objects.h:7386
static const int kValueOffset
Definition: objects.h:6385
bool Contains(Type type) const
Definition: code-stubs.h:1055
const uint32_t kSymbolTag
Definition: objects.h:464
static const int kNativeContextOffset
Definition: objects.h:6286
const uint32_t kAsciiStringTag
Definition: objects.h:470
static const int kConstructStubOffset
Definition: objects.h:5799
static const int kNumRegisters
static const int kHashShift
Definition: objects.h:7341
const XMMRegister xmm2
const Register edx
static const int kSharedFunctionInfoOffset
Definition: objects.h:6185
#define FUNCTION_ADDR(f)
Definition: globals.h:293
static const int kMaxValue
Definition: objects.h:1050
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:5161
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:630
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1348
static const int kDataUC16CodeOffset
Definition: objects.h:6668
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:468
static const int kInstanceTypeOffset
Definition: objects.h:5158
static const int kIndexOffset
Definition: objects.h:8353
static const int kMantissaOffset
Definition: objects.h:1347
void Generate(MacroAssembler *masm)
const XMMRegister xmm0
void Generate(MacroAssembler *masm)