v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
code-stubs-ia32.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "isolate.h"
35 #include "jsregexp.h"
36 #include "regexp-macro-assembler.h"
37 #include "stub-cache.h"
38 #include "codegen.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 #define __ ACCESS_MASM(masm)
44 
45 void ToNumberStub::Generate(MacroAssembler* masm) {
46  // The ToNumber stub takes one argument in eax.
47  Label check_heap_number, call_builtin;
48  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
49  __ ret(0);
50 
51  __ bind(&check_heap_number);
53  Factory* factory = masm->isolate()->factory();
54  __ cmp(ebx, Immediate(factory->heap_number_map()));
55  __ j(not_equal, &call_builtin, Label::kNear);
56  __ ret(0);
57 
58  __ bind(&call_builtin);
59  __ pop(ecx); // Pop return address.
60  __ push(eax);
61  __ push(ecx); // Push return address.
62  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
63 }
64 
65 
66 void FastNewClosureStub::Generate(MacroAssembler* masm) {
67  // Create a new closure from the given function info in new
68  // space. Set the context to the current context in esi.
69  Label gc;
70  __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
71 
72  // Get the function info from the stack.
73  __ mov(edx, Operand(esp, 1 * kPointerSize));
74 
75  int map_index = (language_mode_ == CLASSIC_MODE)
78 
79  // Compute the function map in the current global context and set that
80  // as the map of the allocated object.
83  __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
85 
86  // Initialize the rest of the function. We don't have to update the
87  // write barrier because the allocated object is in new space.
88  Factory* factory = masm->isolate()->factory();
89  __ mov(ebx, Immediate(factory->empty_fixed_array()));
93  Immediate(factory->the_hole_value()));
98  Immediate(factory->undefined_value()));
99 
100  // Initialize the code pointer in the function to be the one
101  // found in the shared function info object.
105 
106  // Return and remove the on-stack parameter.
107  __ ret(1 * kPointerSize);
108 
109  // Create a new closure through the slower runtime call.
110  __ bind(&gc);
111  __ pop(ecx); // Temporarily remove return address.
112  __ pop(edx);
113  __ push(esi);
114  __ push(edx);
115  __ push(Immediate(factory->false_value()));
116  __ push(ecx); // Restore return address.
117  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
118 }
119 
120 
121 void FastNewContextStub::Generate(MacroAssembler* masm) {
122  // Try to allocate the context in new space.
123  Label gc;
124  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
125  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
126  eax, ebx, ecx, &gc, TAG_OBJECT);
127 
128  // Get the function from the stack.
129  __ mov(ecx, Operand(esp, 1 * kPointerSize));
130 
131  // Set up the object header.
132  Factory* factory = masm->isolate()->factory();
134  factory->function_context_map());
136  Immediate(Smi::FromInt(length)));
137 
138  // Set up the fixed slots.
139  __ Set(ebx, Immediate(0)); // Set to NULL.
143 
144  // Copy the global object from the previous context.
147 
148  // Initialize the rest of the slots to undefined.
149  __ mov(ebx, factory->undefined_value());
150  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
151  __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
152  }
153 
154  // Return and remove the on-stack parameter.
155  __ mov(esi, eax);
156  __ ret(1 * kPointerSize);
157 
158  // Need to collect. Call into runtime system.
159  __ bind(&gc);
160  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
161 }
162 
163 
164 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
165  // Stack layout on entry:
166  //
167  // [esp + (1 * kPointerSize)]: function
168  // [esp + (2 * kPointerSize)]: serialized scope info
169 
170  // Try to allocate the context in new space.
171  Label gc;
172  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
173  __ AllocateInNewSpace(FixedArray::SizeFor(length),
174  eax, ebx, ecx, &gc, TAG_OBJECT);
175 
176  // Get the function or sentinel from the stack.
177  __ mov(ecx, Operand(esp, 1 * kPointerSize));
178 
179  // Get the serialized scope info from the stack.
180  __ mov(ebx, Operand(esp, 2 * kPointerSize));
181 
182  // Set up the object header.
183  Factory* factory = masm->isolate()->factory();
185  factory->block_context_map());
187  Immediate(Smi::FromInt(length)));
188 
189  // If this block context is nested in the global context we get a smi
190  // sentinel instead of a function. The block context should get the
191  // canonical empty function of the global context as its closure which
192  // we still have to look up.
193  Label after_sentinel;
194  __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
195  if (FLAG_debug_code) {
196  const char* message = "Expected 0 as a Smi sentinel";
197  __ cmp(ecx, 0);
198  __ Assert(equal, message);
199  }
200  __ mov(ecx, GlobalObjectOperand());
203  __ bind(&after_sentinel);
204 
205  // Set up the fixed slots.
209 
210  // Copy the global object from the previous context.
213 
214  // Initialize the rest of the slots to the hole value.
215  if (slots_ == 1) {
216  __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
217  factory->the_hole_value());
218  } else {
219  __ mov(ebx, factory->the_hole_value());
220  for (int i = 0; i < slots_; i++) {
221  __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
222  }
223  }
224 
225  // Return and remove the on-stack parameters.
226  __ mov(esi, eax);
227  __ ret(2 * kPointerSize);
228 
229  // Need to collect. Call into runtime system.
230  __ bind(&gc);
231  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
232 }
233 
234 
235 static void GenerateFastCloneShallowArrayCommon(
236  MacroAssembler* masm,
237  int length,
239  Label* fail) {
240  // Registers on entry:
241  //
242  // ecx: boilerplate literal array.
244 
245  // All sizes here are multiples of kPointerSize.
246  int elements_size = 0;
247  if (length > 0) {
249  ? FixedDoubleArray::SizeFor(length)
250  : FixedArray::SizeFor(length);
251  }
252  int size = JSArray::kSize + elements_size;
253 
254  // Allocate both the JS array and the elements array in one big
255  // allocation. This avoids multiple limit checks.
256  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
257 
258  // Copy the JS array part.
259  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
260  if ((i != JSArray::kElementsOffset) || (length == 0)) {
261  __ mov(ebx, FieldOperand(ecx, i));
262  __ mov(FieldOperand(eax, i), ebx);
263  }
264  }
265 
266  if (length > 0) {
267  // Get hold of the elements array of the boilerplate and setup the
268  // elements pointer in the resulting object.
270  __ lea(edx, Operand(eax, JSArray::kSize));
272 
273  // Copy the elements array.
275  for (int i = 0; i < elements_size; i += kPointerSize) {
276  __ mov(ebx, FieldOperand(ecx, i));
277  __ mov(FieldOperand(edx, i), ebx);
278  }
279  } else {
281  int i;
282  for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
283  __ mov(ebx, FieldOperand(ecx, i));
284  __ mov(FieldOperand(edx, i), ebx);
285  }
286  while (i < elements_size) {
287  __ fld_d(FieldOperand(ecx, i));
288  __ fstp_d(FieldOperand(edx, i));
289  i += kDoubleSize;
290  }
291  ASSERT(i == elements_size);
292  }
293  }
294 }
295 
296 
297 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
298  // Stack layout on entry:
299  //
300  // [esp + kPointerSize]: constant elements.
301  // [esp + (2 * kPointerSize)]: literal index.
302  // [esp + (3 * kPointerSize)]: literals array.
303 
304  // Load boilerplate object into ecx and check if we need to create a
305  // boilerplate.
306  __ mov(ecx, Operand(esp, 3 * kPointerSize));
307  __ mov(eax, Operand(esp, 2 * kPointerSize));
310  STATIC_ASSERT(kSmiTag == 0);
313  Factory* factory = masm->isolate()->factory();
314  __ cmp(ecx, factory->undefined_value());
315  Label slow_case;
316  __ j(equal, &slow_case);
317 
318  FastCloneShallowArrayStub::Mode mode = mode_;
319  // ecx is boilerplate object.
320  if (mode == CLONE_ANY_ELEMENTS) {
321  Label double_elements, check_fast_elements;
323  __ CheckMap(ebx, factory->fixed_cow_array_map(),
324  &check_fast_elements, DONT_DO_SMI_CHECK);
325  GenerateFastCloneShallowArrayCommon(masm, 0,
326  COPY_ON_WRITE_ELEMENTS, &slow_case);
327  __ ret(3 * kPointerSize);
328 
329  __ bind(&check_fast_elements);
330  __ CheckMap(ebx, factory->fixed_array_map(),
331  &double_elements, DONT_DO_SMI_CHECK);
332  GenerateFastCloneShallowArrayCommon(masm, length_,
333  CLONE_ELEMENTS, &slow_case);
334  __ ret(3 * kPointerSize);
335 
336  __ bind(&double_elements);
337  mode = CLONE_DOUBLE_ELEMENTS;
338  // Fall through to generate the code to handle double elements.
339  }
340 
341  if (FLAG_debug_code) {
342  const char* message;
343  Handle<Map> expected_map;
344  if (mode == CLONE_ELEMENTS) {
345  message = "Expected (writable) fixed array";
346  expected_map = factory->fixed_array_map();
347  } else if (mode == CLONE_DOUBLE_ELEMENTS) {
348  message = "Expected (writable) fixed double array";
349  expected_map = factory->fixed_double_array_map();
350  } else {
352  message = "Expected copy-on-write fixed array";
353  expected_map = factory->fixed_cow_array_map();
354  }
355  __ push(ecx);
357  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
358  __ Assert(equal, message);
359  __ pop(ecx);
360  }
361 
362  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
363  // Return and remove the on-stack parameters.
364  __ ret(3 * kPointerSize);
365 
366  __ bind(&slow_case);
367  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
368 }
369 
370 
371 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
372  // Stack layout on entry:
373  //
374  // [esp + kPointerSize]: object literal flags.
375  // [esp + (2 * kPointerSize)]: constant properties.
376  // [esp + (3 * kPointerSize)]: literal index.
377  // [esp + (4 * kPointerSize)]: literals array.
378 
379  // Load boilerplate object into ecx and check if we need to create a
380  // boilerplate.
381  Label slow_case;
382  __ mov(ecx, Operand(esp, 4 * kPointerSize));
383  __ mov(eax, Operand(esp, 3 * kPointerSize));
386  STATIC_ASSERT(kSmiTag == 0);
389  Factory* factory = masm->isolate()->factory();
390  __ cmp(ecx, factory->undefined_value());
391  __ j(equal, &slow_case);
392 
393  // Check that the boilerplate contains only fast properties and we can
394  // statically determine the instance size.
395  int size = JSObject::kHeaderSize + length_ * kPointerSize;
398  __ cmp(eax, Immediate(size >> kPointerSizeLog2));
399  __ j(not_equal, &slow_case);
400 
401  // Allocate the JS object and copy header together with all in-object
402  // properties from the boilerplate.
403  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
404  for (int i = 0; i < size; i += kPointerSize) {
405  __ mov(ebx, FieldOperand(ecx, i));
406  __ mov(FieldOperand(eax, i), ebx);
407  }
408 
409  // Return and remove the on-stack parameters.
410  __ ret(4 * kPointerSize);
411 
412  __ bind(&slow_case);
413  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
414 }
415 
416 
417 // The stub expects its argument on the stack and returns its result in tos_:
418 // zero for false, and a non-zero value for true.
419 void ToBooleanStub::Generate(MacroAssembler* masm) {
420  // This stub overrides SometimesSetsUpAFrame() to return false. That means
421  // we cannot call anything that could cause a GC from this stub.
422  Label patch;
423  Factory* factory = masm->isolate()->factory();
424  const Register argument = eax;
425  const Register map = edx;
426 
427  if (!types_.IsEmpty()) {
428  __ mov(argument, Operand(esp, 1 * kPointerSize));
429  }
430 
431  // undefined -> false
432  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
433 
434  // Boolean -> its value
435  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
436  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
437 
438  // 'null' -> false.
439  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
440 
441  if (types_.Contains(SMI)) {
442  // Smis: 0 -> false, all other -> true
443  Label not_smi;
444  __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
445  // argument contains the correct return value already.
446  if (!tos_.is(argument)) {
447  __ mov(tos_, argument);
448  }
449  __ ret(1 * kPointerSize);
450  __ bind(&not_smi);
451  } else if (types_.NeedsMap()) {
452  // If we need a map later and have a Smi -> patch.
453  __ JumpIfSmi(argument, &patch, Label::kNear);
454  }
455 
456  if (types_.NeedsMap()) {
457  __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
458 
459  if (types_.CanBeUndetectable()) {
460  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
461  1 << Map::kIsUndetectable);
462  // Undetectable -> false.
463  Label not_undetectable;
464  __ j(zero, &not_undetectable, Label::kNear);
465  __ Set(tos_, Immediate(0));
466  __ ret(1 * kPointerSize);
467  __ bind(&not_undetectable);
468  }
469  }
470 
471  if (types_.Contains(SPEC_OBJECT)) {
472  // spec object -> true.
473  Label not_js_object;
474  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
475  __ j(below, &not_js_object, Label::kNear);
476  // argument contains the correct return value already.
477  if (!tos_.is(argument)) {
478  __ Set(tos_, Immediate(1));
479  }
480  __ ret(1 * kPointerSize);
481  __ bind(&not_js_object);
482  }
483 
484  if (types_.Contains(STRING)) {
485  // String value -> false iff empty.
486  Label not_string;
487  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
488  __ j(above_equal, &not_string, Label::kNear);
489  __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
490  __ ret(1 * kPointerSize); // the string length is OK as the return value
491  __ bind(&not_string);
492  }
493 
494  if (types_.Contains(HEAP_NUMBER)) {
495  // heap number -> false iff +0, -0, or NaN.
496  Label not_heap_number, false_result;
497  __ cmp(map, factory->heap_number_map());
498  __ j(not_equal, &not_heap_number, Label::kNear);
499  __ fldz();
500  __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
501  __ FCmp();
502  __ j(zero, &false_result, Label::kNear);
503  // argument contains the correct return value already.
504  if (!tos_.is(argument)) {
505  __ Set(tos_, Immediate(1));
506  }
507  __ ret(1 * kPointerSize);
508  __ bind(&false_result);
509  __ Set(tos_, Immediate(0));
510  __ ret(1 * kPointerSize);
511  __ bind(&not_heap_number);
512  }
513 
514  __ bind(&patch);
515  GenerateTypeTransition(masm);
516 }
517 
518 
519 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
520  // We don't allow a GC during a store buffer overflow so there is no need to
521  // store the registers in any particular way, but we do have to store and
522  // restore them.
523  __ pushad();
524  if (save_doubles_ == kSaveFPRegs) {
525  CpuFeatures::Scope scope(SSE2);
526  __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
527  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
528  XMMRegister reg = XMMRegister::from_code(i);
529  __ movdbl(Operand(esp, i * kDoubleSize), reg);
530  }
531  }
532  const int argument_count = 1;
533 
534  AllowExternalCallThatCantCauseGC scope(masm);
535  __ PrepareCallCFunction(argument_count, ecx);
536  __ mov(Operand(esp, 0 * kPointerSize),
537  Immediate(ExternalReference::isolate_address()));
538  __ CallCFunction(
539  ExternalReference::store_buffer_overflow_function(masm->isolate()),
540  argument_count);
541  if (save_doubles_ == kSaveFPRegs) {
542  CpuFeatures::Scope scope(SSE2);
543  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
544  XMMRegister reg = XMMRegister::from_code(i);
545  __ movdbl(reg, Operand(esp, i * kDoubleSize));
546  }
547  __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
548  }
549  __ popad();
550  __ ret(0);
551 }
552 
553 
554 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
555  Type type,
556  Heap::RootListIndex value,
557  bool result) {
558  const Register argument = eax;
559  if (types_.Contains(type)) {
560  // If we see an expected oddball, return its ToBoolean value tos_.
561  Label different_value;
562  __ CompareRoot(argument, value);
563  __ j(not_equal, &different_value, Label::kNear);
564  if (!result) {
565  // If we have to return zero, there is no way around clearing tos_.
566  __ Set(tos_, Immediate(0));
567  } else if (!tos_.is(argument)) {
568  // If we have to return non-zero, we can re-use the argument if it is the
569  // same register as the result, because we never see Smi-zero here.
570  __ Set(tos_, Immediate(1));
571  }
572  __ ret(1 * kPointerSize);
573  __ bind(&different_value);
574  }
575 }
576 
577 
578 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
579  __ pop(ecx); // Get return address, operand is now on top of stack.
580  __ push(Immediate(Smi::FromInt(tos_.code())));
581  __ push(Immediate(Smi::FromInt(types_.ToByte())));
582  __ push(ecx); // Push return address.
583  // Patch the caller to an appropriate specialized stub and return the
584  // operation result to the caller of the stub.
585  __ TailCallExternalReference(
586  ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
587  3,
588  1);
589 }
590 
591 
592 class FloatingPointHelper : public AllStatic {
593  public:
594  enum ArgLocation {
595  ARGS_ON_STACK,
596  ARGS_IN_REGISTERS
597  };
598 
599  // Code pattern for loading a floating point value. Input value must
600  // be either a smi or a heap number object (fp value). Requirements:
601  // operand in register number. Returns operand as floating point number
602  // on FPU stack.
603  static void LoadFloatOperand(MacroAssembler* masm, Register number);
604 
605  // Code pattern for loading floating point values. Input values must
606  // be either smi or heap number objects (fp values). Requirements:
607  // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
608  // Returns operands as floating point numbers on FPU stack.
609  static void LoadFloatOperands(MacroAssembler* masm,
610  Register scratch,
611  ArgLocation arg_location = ARGS_ON_STACK);
612 
613  // Similar to LoadFloatOperand but assumes that both operands are smis.
614  // Expects operands in edx, eax.
615  static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
616 
617  // Test if operands are smi or number objects (fp). Requirements:
618  // operand_1 in eax, operand_2 in edx; falls through on float
619  // operands, jumps to the non_float label otherwise.
620  static void CheckFloatOperands(MacroAssembler* masm,
621  Label* non_float,
622  Register scratch);
623 
624  // Checks that the two floating point numbers on top of the FPU stack
625  // have int32 values.
626  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
627  Label* non_int32);
628 
629  // Takes the operands in edx and eax and loads them as integers in eax
630  // and ecx.
631  static void LoadUnknownsAsIntegers(MacroAssembler* masm,
632  bool use_sse3,
633  Label* operand_conversion_failure);
634 
635  // Must only be called after LoadUnknownsAsIntegers. Assumes that the
636  // operands are pushed on the stack, and that their conversions to int32
637  // are in eax and ecx. Checks that the original numbers were in the int32
638  // range.
639  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
640  bool use_sse3,
641  Label* not_int32);
642 
643  // Assumes that operands are smis or heap numbers and loads them
644  // into xmm0 and xmm1. Operands are in edx and eax.
645  // Leaves operands unchanged.
646  static void LoadSSE2Operands(MacroAssembler* masm);
647 
648  // Test if operands are numbers (smi or HeapNumber objects), and load
649  // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
650  // either operand is not a number. Operands are in edx and eax.
651  // Leaves operands unchanged.
652  static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
653 
654  // Similar to LoadSSE2Operands but assumes that both operands are smis.
655  // Expects operands in edx, eax.
656  static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
657 
658  // Checks that the two floating point numbers loaded into xmm0 and xmm1
659  // have int32 values.
660  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
661  Label* non_int32,
662  Register scratch);
663 };
664 
665 
666 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
667 // is faster than using the built-in instructions on floating point registers.
668 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
669 // trashed registers.
670 static void IntegerConvert(MacroAssembler* masm,
671  Register source,
672  bool use_sse3,
673  Label* conversion_failure) {
674  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
675  Label done, right_exponent, normal_exponent;
676  Register scratch = ebx;
677  Register scratch2 = edi;
678  // Get exponent word.
679  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
680  // Get exponent alone in scratch2.
681  __ mov(scratch2, scratch);
682  __ and_(scratch2, HeapNumber::kExponentMask);
683  if (use_sse3) {
684  CpuFeatures::Scope scope(SSE3);
685  // Check whether the exponent is too big for a 64 bit signed integer.
686  static const uint32_t kTooBigExponent =
688  __ cmp(scratch2, Immediate(kTooBigExponent));
689  __ j(greater_equal, conversion_failure);
690  // Load x87 register with heap number.
691  __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
692  // Reserve space for 64 bit answer.
693  __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
694  // Do conversion, which cannot fail because we checked the exponent.
695  __ fisttp_d(Operand(esp, 0));
696  __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
697  __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
698  } else {
699  // Load ecx with zero. We use this either for the final shift or
700  // for the answer.
701  __ xor_(ecx, ecx);
702  // Check whether the exponent matches a 32 bit signed int that cannot be
703  // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
704  // exponent is 30 (biased). This is the exponent that we are fastest at and
705  // also the highest exponent we can handle here.
706  const uint32_t non_smi_exponent =
708  __ cmp(scratch2, Immediate(non_smi_exponent));
709  // If we have a match of the int32-but-not-Smi exponent then skip some
710  // logic.
711  __ j(equal, &right_exponent, Label::kNear);
712  // If the exponent is higher than that then go to slow case. This catches
713  // numbers that don't fit in a signed int32, infinities and NaNs.
714  __ j(less, &normal_exponent, Label::kNear);
715 
716  {
717  // Handle a big exponent. The only reason we have this code is that the
718  // >>> operator has a tendency to generate numbers with an exponent of 31.
719  const uint32_t big_non_smi_exponent =
721  __ cmp(scratch2, Immediate(big_non_smi_exponent));
722  __ j(not_equal, conversion_failure);
723  // We have the big exponent, typically from >>>. This means the number is
724  // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
725  __ mov(scratch2, scratch);
726  __ and_(scratch2, HeapNumber::kMantissaMask);
727  // Put back the implicit 1.
728  __ or_(scratch2, 1 << HeapNumber::kExponentShift);
729  // Shift up the mantissa bits to take up the space the exponent used to
730  // take. We just orred in the implicit bit so that took care of one and
731  // we want to use the full unsigned range so we subtract 1 bit from the
732  // shift distance.
733  const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
734  __ shl(scratch2, big_shift_distance);
735  // Get the second half of the double.
737  // Shift down 21 bits to get the most significant 11 bits or the low
738  // mantissa word.
739  __ shr(ecx, 32 - big_shift_distance);
740  __ or_(ecx, scratch2);
741  // We have the answer in ecx, but we may need to negate it.
742  __ test(scratch, scratch);
743  __ j(positive, &done, Label::kNear);
744  __ neg(ecx);
745  __ jmp(&done, Label::kNear);
746  }
747 
748  __ bind(&normal_exponent);
749  // Exponent word in scratch, exponent part of exponent word in scratch2.
750  // Zero in ecx.
751  // We know the exponent is smaller than 30 (biased). If it is less than
752  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
753  // it rounds to zero.
754  const uint32_t zero_exponent =
756  __ sub(scratch2, Immediate(zero_exponent));
757  // ecx already has a Smi zero.
758  __ j(less, &done, Label::kNear);
759 
760  // We have a shifted exponent between 0 and 30 in scratch2.
761  __ shr(scratch2, HeapNumber::kExponentShift);
762  __ mov(ecx, Immediate(30));
763  __ sub(ecx, scratch2);
764 
765  __ bind(&right_exponent);
766  // Here ecx is the shift, scratch is the exponent word.
767  // Get the top bits of the mantissa.
768  __ and_(scratch, HeapNumber::kMantissaMask);
769  // Put back the implicit 1.
770  __ or_(scratch, 1 << HeapNumber::kExponentShift);
771  // Shift up the mantissa bits to take up the space the exponent used to
772  // take. We have kExponentShift + 1 significant bits int he low end of the
773  // word. Shift them to the top bits.
774  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
775  __ shl(scratch, shift_distance);
776  // Get the second half of the double. For some exponents we don't
777  // actually need this because the bits get shifted out again, but
778  // it's probably slower to test than just to do it.
779  __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
780  // Shift down 22 bits to get the most significant 10 bits or the low
781  // mantissa word.
782  __ shr(scratch2, 32 - shift_distance);
783  __ or_(scratch2, scratch);
784  // Move down according to the exponent.
785  __ shr_cl(scratch2);
786  // Now the unsigned answer is in scratch2. We need to move it to ecx and
787  // we may need to fix the sign.
788  Label negative;
789  __ xor_(ecx, ecx);
791  __ j(greater, &negative, Label::kNear);
792  __ mov(ecx, scratch2);
793  __ jmp(&done, Label::kNear);
794  __ bind(&negative);
795  __ sub(ecx, scratch2);
796  __ bind(&done);
797  }
798 }
799 
800 
801 void UnaryOpStub::PrintName(StringStream* stream) {
802  const char* op_name = Token::Name(op_);
803  const char* overwrite_name = NULL; // Make g++ happy.
804  switch (mode_) {
805  case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
806  case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
807  }
808  stream->Add("UnaryOpStub_%s_%s_%s",
809  op_name,
810  overwrite_name,
811  UnaryOpIC::GetName(operand_type_));
812 }
813 
814 
815 // TODO(svenpanne): Use virtual functions instead of switch.
816 void UnaryOpStub::Generate(MacroAssembler* masm) {
817  switch (operand_type_) {
819  GenerateTypeTransition(masm);
820  break;
821  case UnaryOpIC::SMI:
822  GenerateSmiStub(masm);
823  break;
825  GenerateHeapNumberStub(masm);
826  break;
827  case UnaryOpIC::GENERIC:
828  GenerateGenericStub(masm);
829  break;
830  }
831 }
832 
833 
834 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
835  __ pop(ecx); // Save return address.
836 
837  __ push(eax); // the operand
838  __ push(Immediate(Smi::FromInt(op_)));
839  __ push(Immediate(Smi::FromInt(mode_)));
840  __ push(Immediate(Smi::FromInt(operand_type_)));
841 
842  __ push(ecx); // Push return address.
843 
844  // Patch the caller to an appropriate specialized stub and return the
845  // operation result to the caller of the stub.
846  __ TailCallExternalReference(
847  ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
848 }
849 
850 
851 // TODO(svenpanne): Use virtual functions instead of switch.
852 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
853  switch (op_) {
854  case Token::SUB:
855  GenerateSmiStubSub(masm);
856  break;
857  case Token::BIT_NOT:
858  GenerateSmiStubBitNot(masm);
859  break;
860  default:
861  UNREACHABLE();
862  }
863 }
864 
865 
866 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
867  Label non_smi, undo, slow;
868  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
869  Label::kNear, Label::kNear, Label::kNear);
870  __ bind(&undo);
871  GenerateSmiCodeUndo(masm);
872  __ bind(&non_smi);
873  __ bind(&slow);
874  GenerateTypeTransition(masm);
875 }
876 
877 
878 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
879  Label non_smi;
880  GenerateSmiCodeBitNot(masm, &non_smi);
881  __ bind(&non_smi);
882  GenerateTypeTransition(masm);
883 }
884 
885 
886 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
887  Label* non_smi,
888  Label* undo,
889  Label* slow,
890  Label::Distance non_smi_near,
891  Label::Distance undo_near,
892  Label::Distance slow_near) {
893  // Check whether the value is a smi.
894  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
895 
896  // We can't handle -0 with smis, so use a type transition for that case.
897  __ test(eax, eax);
898  __ j(zero, slow, slow_near);
899 
900  // Try optimistic subtraction '0 - value', saving operand in eax for undo.
901  __ mov(edx, eax);
902  __ Set(eax, Immediate(0));
903  __ sub(eax, edx);
904  __ j(overflow, undo, undo_near);
905  __ ret(0);
906 }
907 
908 
909 void UnaryOpStub::GenerateSmiCodeBitNot(
910  MacroAssembler* masm,
911  Label* non_smi,
912  Label::Distance non_smi_near) {
913  // Check whether the value is a smi.
914  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
915 
916  // Flip bits and revert inverted smi-tag.
917  __ not_(eax);
918  __ and_(eax, ~kSmiTagMask);
919  __ ret(0);
920 }
921 
922 
923 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
924  __ mov(eax, edx);
925 }
926 
927 
928 // TODO(svenpanne): Use virtual functions instead of switch.
929 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
930  switch (op_) {
931  case Token::SUB:
932  GenerateHeapNumberStubSub(masm);
933  break;
934  case Token::BIT_NOT:
935  GenerateHeapNumberStubBitNot(masm);
936  break;
937  default:
938  UNREACHABLE();
939  }
940 }
941 
942 
943 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
944  Label non_smi, undo, slow, call_builtin;
945  GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
946  __ bind(&non_smi);
947  GenerateHeapNumberCodeSub(masm, &slow);
948  __ bind(&undo);
949  GenerateSmiCodeUndo(masm);
950  __ bind(&slow);
951  GenerateTypeTransition(masm);
952  __ bind(&call_builtin);
953  GenerateGenericCodeFallback(masm);
954 }
955 
956 
957 void UnaryOpStub::GenerateHeapNumberStubBitNot(
958  MacroAssembler* masm) {
959  Label non_smi, slow;
960  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
961  __ bind(&non_smi);
962  GenerateHeapNumberCodeBitNot(masm, &slow);
963  __ bind(&slow);
964  GenerateTypeTransition(masm);
965 }
966 
967 
968 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
969  Label* slow) {
971  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
972  __ j(not_equal, slow);
973 
974  if (mode_ == UNARY_OVERWRITE) {
976  Immediate(HeapNumber::kSignMask)); // Flip sign.
977  } else {
978  __ mov(edx, eax);
979  // edx: operand
980 
981  Label slow_allocate_heapnumber, heapnumber_allocated;
982  __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
983  __ jmp(&heapnumber_allocated, Label::kNear);
984 
985  __ bind(&slow_allocate_heapnumber);
986  {
987  FrameScope scope(masm, StackFrame::INTERNAL);
988  __ push(edx);
989  __ CallRuntime(Runtime::kNumberAlloc, 0);
990  __ pop(edx);
991  }
992 
993  __ bind(&heapnumber_allocated);
994  // eax: allocated 'empty' number
996  __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
1000  }
1001  __ ret(0);
1002 }
1003 
1004 
1005 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1006  Label* slow) {
1008  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
1009  __ j(not_equal, slow);
1010 
1011  // Convert the heap number in eax to an untagged integer in ecx.
1012  IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
1013 
1014  // Do the bitwise operation and check if the result fits in a smi.
1015  Label try_float;
1016  __ not_(ecx);
1017  __ cmp(ecx, 0xc0000000);
1018  __ j(sign, &try_float, Label::kNear);
1019 
1020  // Tag the result as a smi and we're done.
1021  STATIC_ASSERT(kSmiTagSize == 1);
1022  __ lea(eax, Operand(ecx, times_2, kSmiTag));
1023  __ ret(0);
1024 
1025  // Try to store the result in a heap number.
1026  __ bind(&try_float);
1027  if (mode_ == UNARY_NO_OVERWRITE) {
1028  Label slow_allocate_heapnumber, heapnumber_allocated;
1029  __ mov(ebx, eax);
1030  __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
1031  __ jmp(&heapnumber_allocated);
1032 
1033  __ bind(&slow_allocate_heapnumber);
1034  {
1035  FrameScope scope(masm, StackFrame::INTERNAL);
1036  // Push the original HeapNumber on the stack. The integer value can't
1037  // be stored since it's untagged and not in the smi range (so we can't
1038  // smi-tag it). We'll recalculate the value after the GC instead.
1039  __ push(ebx);
1040  __ CallRuntime(Runtime::kNumberAlloc, 0);
1041  // New HeapNumber is in eax.
1042  __ pop(edx);
1043  }
1044  // IntegerConvert uses ebx and edi as scratch registers.
1045  // This conversion won't go slow-case.
1046  IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
1047  __ not_(ecx);
1048 
1049  __ bind(&heapnumber_allocated);
1050  }
1052  CpuFeatures::Scope use_sse2(SSE2);
1053  __ cvtsi2sd(xmm0, ecx);
1055  } else {
1056  __ push(ecx);
1057  __ fild_s(Operand(esp, 0));
1058  __ pop(ecx);
1060  }
1061  __ ret(0);
1062 }
1063 
1064 
1065 // TODO(svenpanne): Use virtual functions instead of switch.
1066 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1067  switch (op_) {
1068  case Token::SUB:
1069  GenerateGenericStubSub(masm);
1070  break;
1071  case Token::BIT_NOT:
1072  GenerateGenericStubBitNot(masm);
1073  break;
1074  default:
1075  UNREACHABLE();
1076  }
1077 }
1078 
1079 
1080 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
1081  Label non_smi, undo, slow;
1082  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
1083  __ bind(&non_smi);
1084  GenerateHeapNumberCodeSub(masm, &slow);
1085  __ bind(&undo);
1086  GenerateSmiCodeUndo(masm);
1087  __ bind(&slow);
1088  GenerateGenericCodeFallback(masm);
1089 }
1090 
1091 
1092 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
1093  Label non_smi, slow;
1094  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
1095  __ bind(&non_smi);
1096  GenerateHeapNumberCodeBitNot(masm, &slow);
1097  __ bind(&slow);
1098  GenerateGenericCodeFallback(masm);
1099 }
1100 
1101 
1102 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1103  // Handle the slow case by jumping to the corresponding JavaScript builtin.
1104  __ pop(ecx); // pop return address.
1105  __ push(eax);
1106  __ push(ecx); // push return address
1107  switch (op_) {
1108  case Token::SUB:
1109  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1110  break;
1111  case Token::BIT_NOT:
1112  __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1113  break;
1114  default:
1115  UNREACHABLE();
1116  }
1117 }
1118 
1119 
1120 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1121  __ pop(ecx); // Save return address.
1122  __ push(edx);
1123  __ push(eax);
1124  // Left and right arguments are now on top.
1125  // Push this stub's key. Although the operation and the type info are
1126  // encoded into the key, the encoding is opaque, so push them too.
1127  __ push(Immediate(Smi::FromInt(MinorKey())));
1128  __ push(Immediate(Smi::FromInt(op_)));
1129  __ push(Immediate(Smi::FromInt(operands_type_)));
1130 
1131  __ push(ecx); // Push return address.
1132 
1133  // Patch the caller to an appropriate specialized stub and return the
1134  // operation result to the caller of the stub.
1135  __ TailCallExternalReference(
1136  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1137  masm->isolate()),
1138  5,
1139  1);
1140 }
1141 
1142 
1143 // Prepare for a type transition runtime call when the args are already on
1144 // the stack, under the return address.
1145 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
1146  __ pop(ecx); // Save return address.
1147  // Left and right arguments are already on top of the stack.
1148  // Push this stub's key. Although the operation and the type info are
1149  // encoded into the key, the encoding is opaque, so push them too.
1150  __ push(Immediate(Smi::FromInt(MinorKey())));
1151  __ push(Immediate(Smi::FromInt(op_)));
1152  __ push(Immediate(Smi::FromInt(operands_type_)));
1153 
1154  __ push(ecx); // Push return address.
1155 
1156  // Patch the caller to an appropriate specialized stub and return the
1157  // operation result to the caller of the stub.
1158  __ TailCallExternalReference(
1159  ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1160  masm->isolate()),
1161  5,
1162  1);
1163 }
1164 
1165 
1166 void BinaryOpStub::Generate(MacroAssembler* masm) {
1167  // Explicitly allow generation of nested stubs. It is safe here because
1168  // generation code does not use any raw pointers.
1169  AllowStubCallsScope allow_stub_calls(masm, true);
1170 
1171  switch (operands_type_) {
1173  GenerateTypeTransition(masm);
1174  break;
1175  case BinaryOpIC::SMI:
1176  GenerateSmiStub(masm);
1177  break;
1178  case BinaryOpIC::INT32:
1179  GenerateInt32Stub(masm);
1180  break;
1182  GenerateHeapNumberStub(masm);
1183  break;
1184  case BinaryOpIC::ODDBALL:
1185  GenerateOddballStub(masm);
1186  break;
1188  GenerateBothStringStub(masm);
1189  break;
1190  case BinaryOpIC::STRING:
1191  GenerateStringStub(masm);
1192  break;
1193  case BinaryOpIC::GENERIC:
1194  GenerateGeneric(masm);
1195  break;
1196  default:
1197  UNREACHABLE();
1198  }
1199 }
1200 
1201 
1202 void BinaryOpStub::PrintName(StringStream* stream) {
1203  const char* op_name = Token::Name(op_);
1204  const char* overwrite_name;
1205  switch (mode_) {
1206  case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1207  case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1208  case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1209  default: overwrite_name = "UnknownOverwrite"; break;
1210  }
1211  stream->Add("BinaryOpStub_%s_%s_%s",
1212  op_name,
1213  overwrite_name,
1214  BinaryOpIC::GetName(operands_type_));
1215 }
1216 
1217 
1218 void BinaryOpStub::GenerateSmiCode(
1219  MacroAssembler* masm,
1220  Label* slow,
1221  SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1222  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1223  // dividend in eax and edx free for the division. Use eax, ebx for those.
1224  Comment load_comment(masm, "-- Load arguments");
1225  Register left = edx;
1226  Register right = eax;
1227  if (op_ == Token::DIV || op_ == Token::MOD) {
1228  left = eax;
1229  right = ebx;
1230  __ mov(ebx, eax);
1231  __ mov(eax, edx);
1232  }
1233 
1234 
1235  // 2. Prepare the smi check of both operands by oring them together.
1236  Comment smi_check_comment(masm, "-- Smi check arguments");
1237  Label not_smis;
1238  Register combined = ecx;
1239  ASSERT(!left.is(combined) && !right.is(combined));
1240  switch (op_) {
1241  case Token::BIT_OR:
1242  // Perform the operation into eax and smi check the result. Preserve
1243  // eax in case the result is not a smi.
1244  ASSERT(!left.is(ecx) && !right.is(ecx));
1245  __ mov(ecx, right);
1246  __ or_(right, left); // Bitwise or is commutative.
1247  combined = right;
1248  break;
1249 
1250  case Token::BIT_XOR:
1251  case Token::BIT_AND:
1252  case Token::ADD:
1253  case Token::SUB:
1254  case Token::MUL:
1255  case Token::DIV:
1256  case Token::MOD:
1257  __ mov(combined, right);
1258  __ or_(combined, left);
1259  break;
1260 
1261  case Token::SHL:
1262  case Token::SAR:
1263  case Token::SHR:
1264  // Move the right operand into ecx for the shift operation, use eax
1265  // for the smi check register.
1266  ASSERT(!left.is(ecx) && !right.is(ecx));
1267  __ mov(ecx, right);
1268  __ or_(right, left);
1269  combined = right;
1270  break;
1271 
1272  default:
1273  break;
1274  }
1275 
1276  // 3. Perform the smi check of the operands.
1277  STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
1278  __ JumpIfNotSmi(combined, &not_smis);
1279 
1280  // 4. Operands are both smis, perform the operation leaving the result in
1281  // eax and check the result if necessary.
1282  Comment perform_smi(masm, "-- Perform smi operation");
1283  Label use_fp_on_smis;
1284  switch (op_) {
1285  case Token::BIT_OR:
1286  // Nothing to do.
1287  break;
1288 
1289  case Token::BIT_XOR:
1290  ASSERT(right.is(eax));
1291  __ xor_(right, left); // Bitwise xor is commutative.
1292  break;
1293 
1294  case Token::BIT_AND:
1295  ASSERT(right.is(eax));
1296  __ and_(right, left); // Bitwise and is commutative.
1297  break;
1298 
1299  case Token::SHL:
1300  // Remove tags from operands (but keep sign).
1301  __ SmiUntag(left);
1302  __ SmiUntag(ecx);
1303  // Perform the operation.
1304  __ shl_cl(left);
1305  // Check that the *signed* result fits in a smi.
1306  __ cmp(left, 0xc0000000);
1307  __ j(sign, &use_fp_on_smis);
1308  // Tag the result and store it in register eax.
1309  __ SmiTag(left);
1310  __ mov(eax, left);
1311  break;
1312 
1313  case Token::SAR:
1314  // Remove tags from operands (but keep sign).
1315  __ SmiUntag(left);
1316  __ SmiUntag(ecx);
1317  // Perform the operation.
1318  __ sar_cl(left);
1319  // Tag the result and store it in register eax.
1320  __ SmiTag(left);
1321  __ mov(eax, left);
1322  break;
1323 
1324  case Token::SHR:
1325  // Remove tags from operands (but keep sign).
1326  __ SmiUntag(left);
1327  __ SmiUntag(ecx);
1328  // Perform the operation.
1329  __ shr_cl(left);
1330  // Check that the *unsigned* result fits in a smi.
1331  // Neither of the two high-order bits can be set:
1332  // - 0x80000000: high bit would be lost when smi tagging.
1333  // - 0x40000000: this number would convert to negative when
1334  // Smi tagging these two cases can only happen with shifts
1335  // by 0 or 1 when handed a valid smi.
1336  __ test(left, Immediate(0xc0000000));
1337  __ j(not_zero, &use_fp_on_smis);
1338  // Tag the result and store it in register eax.
1339  __ SmiTag(left);
1340  __ mov(eax, left);
1341  break;
1342 
1343  case Token::ADD:
1344  ASSERT(right.is(eax));
1345  __ add(right, left); // Addition is commutative.
1346  __ j(overflow, &use_fp_on_smis);
1347  break;
1348 
1349  case Token::SUB:
1350  __ sub(left, right);
1351  __ j(overflow, &use_fp_on_smis);
1352  __ mov(eax, left);
1353  break;
1354 
1355  case Token::MUL:
1356  // If the smi tag is 0 we can just leave the tag on one operand.
1357  STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1358  // We can't revert the multiplication if the result is not a smi
1359  // so save the right operand.
1360  __ mov(ebx, right);
1361  // Remove tag from one of the operands (but keep sign).
1362  __ SmiUntag(right);
1363  // Do multiplication.
1364  __ imul(right, left); // Multiplication is commutative.
1365  __ j(overflow, &use_fp_on_smis);
1366  // Check for negative zero result. Use combined = left | right.
1367  __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1368  break;
1369 
1370  case Token::DIV:
1371  // We can't revert the division if the result is not a smi so
1372  // save the left operand.
1373  __ mov(edi, left);
1374  // Check for 0 divisor.
1375  __ test(right, right);
1376  __ j(zero, &use_fp_on_smis);
1377  // Sign extend left into edx:eax.
1378  ASSERT(left.is(eax));
1379  __ cdq();
1380  // Divide edx:eax by right.
1381  __ idiv(right);
1382  // Check for the corner case of dividing the most negative smi by
1383  // -1. We cannot use the overflow flag, since it is not set by idiv
1384  // instruction.
1385  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1386  __ cmp(eax, 0x40000000);
1387  __ j(equal, &use_fp_on_smis);
1388  // Check for negative zero result. Use combined = left | right.
1389  __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1390  // Check that the remainder is zero.
1391  __ test(edx, edx);
1392  __ j(not_zero, &use_fp_on_smis);
1393  // Tag the result and store it in register eax.
1394  __ SmiTag(eax);
1395  break;
1396 
1397  case Token::MOD:
1398  // Check for 0 divisor.
1399  __ test(right, right);
1400  __ j(zero, &not_smis);
1401 
1402  // Sign extend left into edx:eax.
1403  ASSERT(left.is(eax));
1404  __ cdq();
1405  // Divide edx:eax by right.
1406  __ idiv(right);
1407  // Check for negative zero result. Use combined = left | right.
1408  __ NegativeZeroTest(edx, combined, slow);
1409  // Move remainder to register eax.
1410  __ mov(eax, edx);
1411  break;
1412 
1413  default:
1414  UNREACHABLE();
1415  }
1416 
1417  // 5. Emit return of result in eax. Some operations have registers pushed.
1418  switch (op_) {
1419  case Token::ADD:
1420  case Token::SUB:
1421  case Token::MUL:
1422  case Token::DIV:
1423  __ ret(0);
1424  break;
1425  case Token::MOD:
1426  case Token::BIT_OR:
1427  case Token::BIT_AND:
1428  case Token::BIT_XOR:
1429  case Token::SAR:
1430  case Token::SHL:
1431  case Token::SHR:
1432  __ ret(2 * kPointerSize);
1433  break;
1434  default:
1435  UNREACHABLE();
1436  }
1437 
1438  // 6. For some operations emit inline code to perform floating point
1439  // operations on known smis (e.g., if the result of the operation
1440  // overflowed the smi range).
1441  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1442  __ bind(&use_fp_on_smis);
1443  switch (op_) {
1444  // Undo the effects of some operations, and some register moves.
1445  case Token::SHL:
1446  // The arguments are saved on the stack, and only used from there.
1447  break;
1448  case Token::ADD:
1449  // Revert right = right + left.
1450  __ sub(right, left);
1451  break;
1452  case Token::SUB:
1453  // Revert left = left - right.
1454  __ add(left, right);
1455  break;
1456  case Token::MUL:
1457  // Right was clobbered but a copy is in ebx.
1458  __ mov(right, ebx);
1459  break;
1460  case Token::DIV:
1461  // Left was clobbered but a copy is in edi. Right is in ebx for
1462  // division. They should be in eax, ebx for jump to not_smi.
1463  __ mov(eax, edi);
1464  break;
1465  default:
1466  // No other operators jump to use_fp_on_smis.
1467  break;
1468  }
1469  __ jmp(&not_smis);
1470  } else {
1471  ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1472  switch (op_) {
1473  case Token::SHL:
1474  case Token::SHR: {
1475  Comment perform_float(masm, "-- Perform float operation on smis");
1476  __ bind(&use_fp_on_smis);
1477  // Result we want is in left == edx, so we can put the allocated heap
1478  // number in eax.
1479  __ AllocateHeapNumber(eax, ecx, ebx, slow);
1480  // Store the result in the HeapNumber and return.
1481  // It's OK to overwrite the arguments on the stack because we
1482  // are about to return.
1483  if (op_ == Token::SHR) {
1484  __ mov(Operand(esp, 1 * kPointerSize), left);
1485  __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1486  __ fild_d(Operand(esp, 1 * kPointerSize));
1488  } else {
1489  ASSERT_EQ(Token::SHL, op_);
1491  CpuFeatures::Scope use_sse2(SSE2);
1492  __ cvtsi2sd(xmm0, left);
1494  } else {
1495  __ mov(Operand(esp, 1 * kPointerSize), left);
1496  __ fild_s(Operand(esp, 1 * kPointerSize));
1498  }
1499  }
1500  __ ret(2 * kPointerSize);
1501  break;
1502  }
1503 
1504  case Token::ADD:
1505  case Token::SUB:
1506  case Token::MUL:
1507  case Token::DIV: {
1508  Comment perform_float(masm, "-- Perform float operation on smis");
1509  __ bind(&use_fp_on_smis);
1510  // Restore arguments to edx, eax.
1511  switch (op_) {
1512  case Token::ADD:
1513  // Revert right = right + left.
1514  __ sub(right, left);
1515  break;
1516  case Token::SUB:
1517  // Revert left = left - right.
1518  __ add(left, right);
1519  break;
1520  case Token::MUL:
1521  // Right was clobbered but a copy is in ebx.
1522  __ mov(right, ebx);
1523  break;
1524  case Token::DIV:
1525  // Left was clobbered but a copy is in edi. Right is in ebx for
1526  // division.
1527  __ mov(edx, edi);
1528  __ mov(eax, right);
1529  break;
1530  default: UNREACHABLE();
1531  break;
1532  }
1533  __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1535  CpuFeatures::Scope use_sse2(SSE2);
1536  FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1537  switch (op_) {
1538  case Token::ADD: __ addsd(xmm0, xmm1); break;
1539  case Token::SUB: __ subsd(xmm0, xmm1); break;
1540  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1541  case Token::DIV: __ divsd(xmm0, xmm1); break;
1542  default: UNREACHABLE();
1543  }
1545  } else { // SSE2 not available, use FPU.
1546  FloatingPointHelper::LoadFloatSmis(masm, ebx);
1547  switch (op_) {
1548  case Token::ADD: __ faddp(1); break;
1549  case Token::SUB: __ fsubp(1); break;
1550  case Token::MUL: __ fmulp(1); break;
1551  case Token::DIV: __ fdivp(1); break;
1552  default: UNREACHABLE();
1553  }
1555  }
1556  __ mov(eax, ecx);
1557  __ ret(0);
1558  break;
1559  }
1560 
1561  default:
1562  break;
1563  }
1564  }
1565 
1566  // 7. Non-smi operands, fall out to the non-smi code with the operands in
1567  // edx and eax.
1568  Comment done_comment(masm, "-- Enter non-smi code");
1569  __ bind(&not_smis);
1570  switch (op_) {
1571  case Token::BIT_OR:
1572  case Token::SHL:
1573  case Token::SAR:
1574  case Token::SHR:
1575  // Right operand is saved in ecx and eax was destroyed by the smi
1576  // check.
1577  __ mov(eax, ecx);
1578  break;
1579 
1580  case Token::DIV:
1581  case Token::MOD:
1582  // Operands are in eax, ebx at this point.
1583  __ mov(edx, eax);
1584  __ mov(eax, ebx);
1585  break;
1586 
1587  default:
1588  break;
1589  }
1590 }
1591 
1592 
1593 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1594  Label call_runtime;
1595 
1596  switch (op_) {
1597  case Token::ADD:
1598  case Token::SUB:
1599  case Token::MUL:
1600  case Token::DIV:
1601  break;
1602  case Token::MOD:
1603  case Token::BIT_OR:
1604  case Token::BIT_AND:
1605  case Token::BIT_XOR:
1606  case Token::SAR:
1607  case Token::SHL:
1608  case Token::SHR:
1609  GenerateRegisterArgsPush(masm);
1610  break;
1611  default:
1612  UNREACHABLE();
1613  }
1614 
1615  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1616  result_type_ == BinaryOpIC::SMI) {
1617  GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1618  } else {
1619  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1620  }
1621  __ bind(&call_runtime);
1622  switch (op_) {
1623  case Token::ADD:
1624  case Token::SUB:
1625  case Token::MUL:
1626  case Token::DIV:
1627  GenerateTypeTransition(masm);
1628  break;
1629  case Token::MOD:
1630  case Token::BIT_OR:
1631  case Token::BIT_AND:
1632  case Token::BIT_XOR:
1633  case Token::SAR:
1634  case Token::SHL:
1635  case Token::SHR:
1636  GenerateTypeTransitionWithSavedArgs(masm);
1637  break;
1638  default:
1639  UNREACHABLE();
1640  }
1641 }
1642 
1643 
1644 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1645  ASSERT(operands_type_ == BinaryOpIC::STRING);
1646  ASSERT(op_ == Token::ADD);
1647  // Try to add arguments as strings, otherwise, transition to the generic
1648  // BinaryOpIC type.
1649  GenerateAddStrings(masm);
1650  GenerateTypeTransition(masm);
1651 }
1652 
1653 
1654 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1655  Label call_runtime;
1656  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1657  ASSERT(op_ == Token::ADD);
1658  // If both arguments are strings, call the string add stub.
1659  // Otherwise, do a transition.
1660 
1661  // Registers containing left and right operands respectively.
1662  Register left = edx;
1663  Register right = eax;
1664 
1665  // Test if left operand is a string.
1666  __ JumpIfSmi(left, &call_runtime, Label::kNear);
1667  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1668  __ j(above_equal, &call_runtime, Label::kNear);
1669 
1670  // Test if right operand is a string.
1671  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1672  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1673  __ j(above_equal, &call_runtime, Label::kNear);
1674 
1675  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1676  GenerateRegisterArgsPush(masm);
1677  __ TailCallStub(&string_add_stub);
1678 
1679  __ bind(&call_runtime);
1680  GenerateTypeTransition(masm);
1681 }
1682 
1683 
1684 // Input:
1685 // edx: left operand (tagged)
1686 // eax: right operand (tagged)
1687 // Output:
1688 // eax: result (tagged)
1689 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1690  Label call_runtime;
1691  ASSERT(operands_type_ == BinaryOpIC::INT32);
1692 
1693  // Floating point case.
1694  switch (op_) {
1695  case Token::ADD:
1696  case Token::SUB:
1697  case Token::MUL:
1698  case Token::DIV:
1699  case Token::MOD: {
1700  Label not_floats;
1701  Label not_int32;
1703  CpuFeatures::Scope use_sse2(SSE2);
1704  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1705  FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1706  if (op_ == Token::MOD) {
1707  GenerateRegisterArgsPush(masm);
1708  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1709  } else {
1710  switch (op_) {
1711  case Token::ADD: __ addsd(xmm0, xmm1); break;
1712  case Token::SUB: __ subsd(xmm0, xmm1); break;
1713  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1714  case Token::DIV: __ divsd(xmm0, xmm1); break;
1715  default: UNREACHABLE();
1716  }
1717  // Check result type if it is currently Int32.
1718  if (result_type_ <= BinaryOpIC::INT32) {
1719  __ cvttsd2si(ecx, Operand(xmm0));
1720  __ cvtsi2sd(xmm2, ecx);
1721  __ ucomisd(xmm0, xmm2);
1722  __ j(not_zero, &not_int32);
1723  __ j(carry, &not_int32);
1724  }
1725  GenerateHeapResultAllocation(masm, &call_runtime);
1727  __ ret(0);
1728  }
1729  } else { // SSE2 not available, use FPU.
1730  FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1731  FloatingPointHelper::LoadFloatOperands(
1732  masm,
1733  ecx,
1734  FloatingPointHelper::ARGS_IN_REGISTERS);
1735  FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1736  if (op_ == Token::MOD) {
1737  // The operands are now on the FPU stack, but we don't need them.
1738  __ fstp(0);
1739  __ fstp(0);
1740  GenerateRegisterArgsPush(masm);
1741  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1742  } else {
1743  switch (op_) {
1744  case Token::ADD: __ faddp(1); break;
1745  case Token::SUB: __ fsubp(1); break;
1746  case Token::MUL: __ fmulp(1); break;
1747  case Token::DIV: __ fdivp(1); break;
1748  default: UNREACHABLE();
1749  }
1750  Label after_alloc_failure;
1751  GenerateHeapResultAllocation(masm, &after_alloc_failure);
1753  __ ret(0);
1754  __ bind(&after_alloc_failure);
1755  __ fstp(0); // Pop FPU stack before calling runtime.
1756  __ jmp(&call_runtime);
1757  }
1758  }
1759 
1760  __ bind(&not_floats);
1761  __ bind(&not_int32);
1762  GenerateTypeTransition(masm);
1763  break;
1764  }
1765 
1766  case Token::BIT_OR:
1767  case Token::BIT_AND:
1768  case Token::BIT_XOR:
1769  case Token::SAR:
1770  case Token::SHL:
1771  case Token::SHR: {
1772  GenerateRegisterArgsPush(masm);
1773  Label not_floats;
1774  Label not_int32;
1775  Label non_smi_result;
1776  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1777  use_sse3_,
1778  &not_floats);
1779  FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1780  &not_int32);
1781  switch (op_) {
1782  case Token::BIT_OR: __ or_(eax, ecx); break;
1783  case Token::BIT_AND: __ and_(eax, ecx); break;
1784  case Token::BIT_XOR: __ xor_(eax, ecx); break;
1785  case Token::SAR: __ sar_cl(eax); break;
1786  case Token::SHL: __ shl_cl(eax); break;
1787  case Token::SHR: __ shr_cl(eax); break;
1788  default: UNREACHABLE();
1789  }
1790  if (op_ == Token::SHR) {
1791  // Check if result is non-negative and fits in a smi.
1792  __ test(eax, Immediate(0xc0000000));
1793  __ j(not_zero, &call_runtime);
1794  } else {
1795  // Check if result fits in a smi.
1796  __ cmp(eax, 0xc0000000);
1797  __ j(negative, &non_smi_result, Label::kNear);
1798  }
1799  // Tag smi result and return.
1800  __ SmiTag(eax);
1801  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1802 
1803  // All ops except SHR return a signed int32 that we load in
1804  // a HeapNumber.
1805  if (op_ != Token::SHR) {
1806  __ bind(&non_smi_result);
1807  // Allocate a heap number if needed.
1808  __ mov(ebx, eax); // ebx: result
1809  Label skip_allocation;
1810  switch (mode_) {
1811  case OVERWRITE_LEFT:
1812  case OVERWRITE_RIGHT:
1813  // If the operand was an object, we skip the
1814  // allocation of a heap number.
1815  __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1816  1 * kPointerSize : 2 * kPointerSize));
1817  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1818  // Fall through!
1819  case NO_OVERWRITE:
1820  __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1821  __ bind(&skip_allocation);
1822  break;
1823  default: UNREACHABLE();
1824  }
1825  // Store the result in the HeapNumber and return.
1827  CpuFeatures::Scope use_sse2(SSE2);
1828  __ cvtsi2sd(xmm0, ebx);
1830  } else {
1831  __ mov(Operand(esp, 1 * kPointerSize), ebx);
1832  __ fild_s(Operand(esp, 1 * kPointerSize));
1834  }
1835  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1836  }
1837 
1838  __ bind(&not_floats);
1839  __ bind(&not_int32);
1840  GenerateTypeTransitionWithSavedArgs(masm);
1841  break;
1842  }
1843  default: UNREACHABLE(); break;
1844  }
1845 
1846  // If an allocation fails, or SHR hits a hard case, use the runtime system to
1847  // get the correct result.
1848  __ bind(&call_runtime);
1849 
1850  switch (op_) {
1851  case Token::ADD:
1852  GenerateRegisterArgsPush(masm);
1853  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1854  break;
1855  case Token::SUB:
1856  GenerateRegisterArgsPush(masm);
1857  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1858  break;
1859  case Token::MUL:
1860  GenerateRegisterArgsPush(masm);
1861  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1862  break;
1863  case Token::DIV:
1864  GenerateRegisterArgsPush(masm);
1865  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1866  break;
1867  case Token::MOD:
1868  break;
1869  case Token::BIT_OR:
1870  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1871  break;
1872  case Token::BIT_AND:
1873  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1874  break;
1875  case Token::BIT_XOR:
1876  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1877  break;
1878  case Token::SAR:
1879  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1880  break;
1881  case Token::SHL:
1882  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1883  break;
1884  case Token::SHR:
1885  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1886  break;
1887  default:
1888  UNREACHABLE();
1889  }
1890 }
1891 
1892 
1893 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1894  if (op_ == Token::ADD) {
1895  // Handle string addition here, because it is the only operation
1896  // that does not do a ToNumber conversion on the operands.
1897  GenerateAddStrings(masm);
1898  }
1899 
1900  Factory* factory = masm->isolate()->factory();
1901 
1902  // Convert odd ball arguments to numbers.
1903  Label check, done;
1904  __ cmp(edx, factory->undefined_value());
1905  __ j(not_equal, &check, Label::kNear);
1906  if (Token::IsBitOp(op_)) {
1907  __ xor_(edx, edx);
1908  } else {
1909  __ mov(edx, Immediate(factory->nan_value()));
1910  }
1911  __ jmp(&done, Label::kNear);
1912  __ bind(&check);
1913  __ cmp(eax, factory->undefined_value());
1914  __ j(not_equal, &done, Label::kNear);
1915  if (Token::IsBitOp(op_)) {
1916  __ xor_(eax, eax);
1917  } else {
1918  __ mov(eax, Immediate(factory->nan_value()));
1919  }
1920  __ bind(&done);
1921 
1922  GenerateHeapNumberStub(masm);
1923 }
1924 
1925 
1926 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1927  Label call_runtime;
1928 
1929  // Floating point case.
1930  switch (op_) {
1931  case Token::ADD:
1932  case Token::SUB:
1933  case Token::MUL:
1934  case Token::DIV: {
1935  Label not_floats;
1937  CpuFeatures::Scope use_sse2(SSE2);
1938  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1939 
1940  switch (op_) {
1941  case Token::ADD: __ addsd(xmm0, xmm1); break;
1942  case Token::SUB: __ subsd(xmm0, xmm1); break;
1943  case Token::MUL: __ mulsd(xmm0, xmm1); break;
1944  case Token::DIV: __ divsd(xmm0, xmm1); break;
1945  default: UNREACHABLE();
1946  }
1947  GenerateHeapResultAllocation(masm, &call_runtime);
1949  __ ret(0);
1950  } else { // SSE2 not available, use FPU.
1951  FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1952  FloatingPointHelper::LoadFloatOperands(
1953  masm,
1954  ecx,
1955  FloatingPointHelper::ARGS_IN_REGISTERS);
1956  switch (op_) {
1957  case Token::ADD: __ faddp(1); break;
1958  case Token::SUB: __ fsubp(1); break;
1959  case Token::MUL: __ fmulp(1); break;
1960  case Token::DIV: __ fdivp(1); break;
1961  default: UNREACHABLE();
1962  }
1963  Label after_alloc_failure;
1964  GenerateHeapResultAllocation(masm, &after_alloc_failure);
1966  __ ret(0);
1967  __ bind(&after_alloc_failure);
1968  __ fstp(0); // Pop FPU stack before calling runtime.
1969  __ jmp(&call_runtime);
1970  }
1971 
1972  __ bind(&not_floats);
1973  GenerateTypeTransition(masm);
1974  break;
1975  }
1976 
1977  case Token::MOD: {
1978  // For MOD we go directly to runtime in the non-smi case.
1979  break;
1980  }
1981  case Token::BIT_OR:
1982  case Token::BIT_AND:
1983  case Token::BIT_XOR:
1984  case Token::SAR:
1985  case Token::SHL:
1986  case Token::SHR: {
1987  GenerateRegisterArgsPush(masm);
1988  Label not_floats;
1989  Label non_smi_result;
1990  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1991  use_sse3_,
1992  &not_floats);
1993  switch (op_) {
1994  case Token::BIT_OR: __ or_(eax, ecx); break;
1995  case Token::BIT_AND: __ and_(eax, ecx); break;
1996  case Token::BIT_XOR: __ xor_(eax, ecx); break;
1997  case Token::SAR: __ sar_cl(eax); break;
1998  case Token::SHL: __ shl_cl(eax); break;
1999  case Token::SHR: __ shr_cl(eax); break;
2000  default: UNREACHABLE();
2001  }
2002  if (op_ == Token::SHR) {
2003  // Check if result is non-negative and fits in a smi.
2004  __ test(eax, Immediate(0xc0000000));
2005  __ j(not_zero, &call_runtime);
2006  } else {
2007  // Check if result fits in a smi.
2008  __ cmp(eax, 0xc0000000);
2009  __ j(negative, &non_smi_result, Label::kNear);
2010  }
2011  // Tag smi result and return.
2012  __ SmiTag(eax);
2013  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2014 
2015  // All ops except SHR return a signed int32 that we load in
2016  // a HeapNumber.
2017  if (op_ != Token::SHR) {
2018  __ bind(&non_smi_result);
2019  // Allocate a heap number if needed.
2020  __ mov(ebx, eax); // ebx: result
2021  Label skip_allocation;
2022  switch (mode_) {
2023  case OVERWRITE_LEFT:
2024  case OVERWRITE_RIGHT:
2025  // If the operand was an object, we skip the
2026  // allocation of a heap number.
2027  __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2028  1 * kPointerSize : 2 * kPointerSize));
2029  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2030  // Fall through!
2031  case NO_OVERWRITE:
2032  __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2033  __ bind(&skip_allocation);
2034  break;
2035  default: UNREACHABLE();
2036  }
2037  // Store the result in the HeapNumber and return.
2039  CpuFeatures::Scope use_sse2(SSE2);
2040  __ cvtsi2sd(xmm0, ebx);
2042  } else {
2043  __ mov(Operand(esp, 1 * kPointerSize), ebx);
2044  __ fild_s(Operand(esp, 1 * kPointerSize));
2046  }
2047  __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2048  }
2049 
2050  __ bind(&not_floats);
2051  GenerateTypeTransitionWithSavedArgs(masm);
2052  break;
2053  }
2054  default: UNREACHABLE(); break;
2055  }
2056 
2057  // If an allocation fails, or SHR or MOD hit a hard case,
2058  // use the runtime system to get the correct result.
2059  __ bind(&call_runtime);
2060 
2061  switch (op_) {
2062  case Token::ADD:
2063  GenerateRegisterArgsPush(masm);
2064  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2065  break;
2066  case Token::SUB:
2067  GenerateRegisterArgsPush(masm);
2068  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2069  break;
2070  case Token::MUL:
2071  GenerateRegisterArgsPush(masm);
2072  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2073  break;
2074  case Token::DIV:
2075  GenerateRegisterArgsPush(masm);
2076  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2077  break;
2078  case Token::MOD:
2079  GenerateRegisterArgsPush(masm);
2080  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2081  break;
2082  case Token::BIT_OR:
2083  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2084  break;
2085  case Token::BIT_AND:
2086  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2087  break;
2088  case Token::BIT_XOR:
2089  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2090  break;
2091  case Token::SAR:
2092  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2093  break;
2094  case Token::SHL:
2095  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2096  break;
2097  case Token::SHR:
2098  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2099  break;
2100  default:
2101  UNREACHABLE();
2102  }
2103 }
2104 
2105 
2106 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2107  Label call_runtime;
2108 
2109  Counters* counters = masm->isolate()->counters();
2110  __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
2111 
2112  switch (op_) {
2113  case Token::ADD:
2114  case Token::SUB:
2115  case Token::MUL:
2116  case Token::DIV:
2117  break;
2118  case Token::MOD:
2119  case Token::BIT_OR:
2120  case Token::BIT_AND:
2121  case Token::BIT_XOR:
2122  case Token::SAR:
2123  case Token::SHL:
2124  case Token::SHR:
2125  GenerateRegisterArgsPush(masm);
2126  break;
2127  default:
2128  UNREACHABLE();
2129  }
2130 
2131  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2132 
2133  // Floating point case.
2134  switch (op_) {
2135  case Token::ADD:
2136  case Token::SUB:
2137  case Token::MUL:
2138  case Token::DIV: {
2139  Label not_floats;
2141  CpuFeatures::Scope use_sse2(SSE2);
2142  FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2143 
2144  switch (op_) {
2145  case Token::ADD: __ addsd(xmm0, xmm1); break;
2146  case Token::SUB: __ subsd(xmm0, xmm1); break;
2147  case Token::MUL: __ mulsd(xmm0, xmm1); break;
2148  case Token::DIV: __ divsd(xmm0, xmm1); break;
2149  default: UNREACHABLE();
2150  }
2151  GenerateHeapResultAllocation(masm, &call_runtime);
2153  __ ret(0);
2154  } else { // SSE2 not available, use FPU.
2155  FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2156  FloatingPointHelper::LoadFloatOperands(
2157  masm,
2158  ecx,
2159  FloatingPointHelper::ARGS_IN_REGISTERS);
2160  switch (op_) {
2161  case Token::ADD: __ faddp(1); break;
2162  case Token::SUB: __ fsubp(1); break;
2163  case Token::MUL: __ fmulp(1); break;
2164  case Token::DIV: __ fdivp(1); break;
2165  default: UNREACHABLE();
2166  }
2167  Label after_alloc_failure;
2168  GenerateHeapResultAllocation(masm, &after_alloc_failure);
2170  __ ret(0);
2171  __ bind(&after_alloc_failure);
2172  __ fstp(0); // Pop FPU stack before calling runtime.
2173  __ jmp(&call_runtime);
2174  }
2175  __ bind(&not_floats);
2176  break;
2177  }
2178  case Token::MOD: {
2179  // For MOD we go directly to runtime in the non-smi case.
2180  break;
2181  }
2182  case Token::BIT_OR:
2183  case Token::BIT_AND:
2184  case Token::BIT_XOR:
2185  case Token::SAR:
2186  case Token::SHL:
2187  case Token::SHR: {
2188  Label non_smi_result;
2189  FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2190  use_sse3_,
2191  &call_runtime);
2192  switch (op_) {
2193  case Token::BIT_OR: __ or_(eax, ecx); break;
2194  case Token::BIT_AND: __ and_(eax, ecx); break;
2195  case Token::BIT_XOR: __ xor_(eax, ecx); break;
2196  case Token::SAR: __ sar_cl(eax); break;
2197  case Token::SHL: __ shl_cl(eax); break;
2198  case Token::SHR: __ shr_cl(eax); break;
2199  default: UNREACHABLE();
2200  }
2201  if (op_ == Token::SHR) {
2202  // Check if result is non-negative and fits in a smi.
2203  __ test(eax, Immediate(0xc0000000));
2204  __ j(not_zero, &call_runtime);
2205  } else {
2206  // Check if result fits in a smi.
2207  __ cmp(eax, 0xc0000000);
2208  __ j(negative, &non_smi_result, Label::kNear);
2209  }
2210  // Tag smi result and return.
2211  __ SmiTag(eax);
2212  __ ret(2 * kPointerSize); // Drop the arguments from the stack.
2213 
2214  // All ops except SHR return a signed int32 that we load in
2215  // a HeapNumber.
2216  if (op_ != Token::SHR) {
2217  __ bind(&non_smi_result);
2218  // Allocate a heap number if needed.
2219  __ mov(ebx, eax); // ebx: result
2220  Label skip_allocation;
2221  switch (mode_) {
2222  case OVERWRITE_LEFT:
2223  case OVERWRITE_RIGHT:
2224  // If the operand was an object, we skip the
2225  // allocation of a heap number.
2226  __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2227  1 * kPointerSize : 2 * kPointerSize));
2228  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2229  // Fall through!
2230  case NO_OVERWRITE:
2231  __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2232  __ bind(&skip_allocation);
2233  break;
2234  default: UNREACHABLE();
2235  }
2236  // Store the result in the HeapNumber and return.
2238  CpuFeatures::Scope use_sse2(SSE2);
2239  __ cvtsi2sd(xmm0, ebx);
2241  } else {
2242  __ mov(Operand(esp, 1 * kPointerSize), ebx);
2243  __ fild_s(Operand(esp, 1 * kPointerSize));
2245  }
2246  __ ret(2 * kPointerSize);
2247  }
2248  break;
2249  }
2250  default: UNREACHABLE(); break;
2251  }
2252 
2253  // If all else fails, use the runtime system to get the correct
2254  // result.
2255  __ bind(&call_runtime);
2256  switch (op_) {
2257  case Token::ADD: {
2258  GenerateAddStrings(masm);
2259  GenerateRegisterArgsPush(masm);
2260  __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2261  break;
2262  }
2263  case Token::SUB:
2264  GenerateRegisterArgsPush(masm);
2265  __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2266  break;
2267  case Token::MUL:
2268  GenerateRegisterArgsPush(masm);
2269  __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2270  break;
2271  case Token::DIV:
2272  GenerateRegisterArgsPush(masm);
2273  __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2274  break;
2275  case Token::MOD:
2276  __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2277  break;
2278  case Token::BIT_OR:
2279  __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2280  break;
2281  case Token::BIT_AND:
2282  __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2283  break;
2284  case Token::BIT_XOR:
2285  __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2286  break;
2287  case Token::SAR:
2288  __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2289  break;
2290  case Token::SHL:
2291  __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2292  break;
2293  case Token::SHR:
2294  __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2295  break;
2296  default:
2297  UNREACHABLE();
2298  }
2299 }
2300 
2301 
2302 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2303  ASSERT(op_ == Token::ADD);
2304  Label left_not_string, call_runtime;
2305 
2306  // Registers containing left and right operands respectively.
2307  Register left = edx;
2308  Register right = eax;
2309 
2310  // Test if left operand is a string.
2311  __ JumpIfSmi(left, &left_not_string, Label::kNear);
2312  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2313  __ j(above_equal, &left_not_string, Label::kNear);
2314 
2315  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2316  GenerateRegisterArgsPush(masm);
2317  __ TailCallStub(&string_add_left_stub);
2318 
2319  // Left operand is not a string, test right.
2320  __ bind(&left_not_string);
2321  __ JumpIfSmi(right, &call_runtime, Label::kNear);
2322  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2323  __ j(above_equal, &call_runtime, Label::kNear);
2324 
2325  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2326  GenerateRegisterArgsPush(masm);
2327  __ TailCallStub(&string_add_right_stub);
2328 
2329  // Neither argument is a string.
2330  __ bind(&call_runtime);
2331 }
2332 
2333 
2334 void BinaryOpStub::GenerateHeapResultAllocation(
2335  MacroAssembler* masm,
2336  Label* alloc_failure) {
2337  Label skip_allocation;
2338  OverwriteMode mode = mode_;
2339  switch (mode) {
2340  case OVERWRITE_LEFT: {
2341  // If the argument in edx is already an object, we skip the
2342  // allocation of a heap number.
2343  __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
2344  // Allocate a heap number for the result. Keep eax and edx intact
2345  // for the possible runtime call.
2346  __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2347  // Now edx can be overwritten losing one of the arguments as we are
2348  // now done and will not need it any more.
2349  __ mov(edx, ebx);
2350  __ bind(&skip_allocation);
2351  // Use object in edx as a result holder
2352  __ mov(eax, edx);
2353  break;
2354  }
2355  case OVERWRITE_RIGHT:
2356  // If the argument in eax is already an object, we skip the
2357  // allocation of a heap number.
2358  __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2359  // Fall through!
2360  case NO_OVERWRITE:
2361  // Allocate a heap number for the result. Keep eax and edx intact
2362  // for the possible runtime call.
2363  __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2364  // Now eax can be overwritten losing one of the arguments as we are
2365  // now done and will not need it any more.
2366  __ mov(eax, ebx);
2367  __ bind(&skip_allocation);
2368  break;
2369  default: UNREACHABLE();
2370  }
2371 }
2372 
2373 
2374 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2375  __ pop(ecx);
2376  __ push(edx);
2377  __ push(eax);
2378  __ push(ecx);
2379 }
2380 
2381 
2382 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2383  // TAGGED case:
2384  // Input:
2385  // esp[4]: tagged number input argument (should be number).
2386  // esp[0]: return address.
2387  // Output:
2388  // eax: tagged double result.
2389  // UNTAGGED case:
2390  // Input::
2391  // esp[0]: return address.
2392  // xmm1: untagged double input argument
2393  // Output:
2394  // xmm1: untagged double result.
2395 
2396  Label runtime_call;
2397  Label runtime_call_clear_stack;
2398  Label skip_cache;
2399  const bool tagged = (argument_type_ == TAGGED);
2400  if (tagged) {
2401  // Test that eax is a number.
2402  Label input_not_smi;
2403  Label loaded;
2404  __ mov(eax, Operand(esp, kPointerSize));
2405  __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
2406  // Input is a smi. Untag and load it onto the FPU stack.
2407  // Then load the low and high words of the double into ebx, edx.
2408  STATIC_ASSERT(kSmiTagSize == 1);
2409  __ sar(eax, 1);
2410  __ sub(esp, Immediate(2 * kPointerSize));
2411  __ mov(Operand(esp, 0), eax);
2412  __ fild_s(Operand(esp, 0));
2413  __ fst_d(Operand(esp, 0));
2414  __ pop(edx);
2415  __ pop(ebx);
2416  __ jmp(&loaded, Label::kNear);
2417  __ bind(&input_not_smi);
2418  // Check if input is a HeapNumber.
2420  Factory* factory = masm->isolate()->factory();
2421  __ cmp(ebx, Immediate(factory->heap_number_map()));
2422  __ j(not_equal, &runtime_call);
2423  // Input is a HeapNumber. Push it on the FPU stack and load its
2424  // low and high words into ebx, edx.
2428 
2429  __ bind(&loaded);
2430  } else { // UNTAGGED.
2432  CpuFeatures::Scope sse4_scope(SSE4_1);
2433  __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
2434  } else {
2435  __ pshufd(xmm0, xmm1, 0x1);
2436  __ movd(edx, xmm0);
2437  }
2438  __ movd(ebx, xmm1);
2439  }
2440 
2441  // ST[0] or xmm1 == double value
2442  // ebx = low 32 bits of double value
2443  // edx = high 32 bits of double value
2444  // Compute hash (the shifts are arithmetic):
2445  // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2446  __ mov(ecx, ebx);
2447  __ xor_(ecx, edx);
2448  __ mov(eax, ecx);
2449  __ sar(eax, 16);
2450  __ xor_(ecx, eax);
2451  __ mov(eax, ecx);
2452  __ sar(eax, 8);
2453  __ xor_(ecx, eax);
2454  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2455  __ and_(ecx,
2456  Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2457 
2458  // ST[0] or xmm1 == double value.
2459  // ebx = low 32 bits of double value.
2460  // edx = high 32 bits of double value.
2461  // ecx = TranscendentalCache::hash(double value).
2462  ExternalReference cache_array =
2463  ExternalReference::transcendental_cache_array_address(masm->isolate());
2464  __ mov(eax, Immediate(cache_array));
2465  int cache_array_index =
2466  type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
2467  __ mov(eax, Operand(eax, cache_array_index));
2468  // Eax points to the cache for the type type_.
2469  // If NULL, the cache hasn't been initialized yet, so go through runtime.
2470  __ test(eax, eax);
2471  __ j(zero, &runtime_call_clear_stack);
2472 #ifdef DEBUG
2473  // Check that the layout of cache elements match expectations.
2474  { TranscendentalCache::SubCache::Element test_elem[2];
2475  char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2476  char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2477  char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2478  char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2479  char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2480  CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
2481  CHECK_EQ(0, elem_in0 - elem_start);
2482  CHECK_EQ(kIntSize, elem_in1 - elem_start);
2483  CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2484  }
2485 #endif
2486  // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2487  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2488  __ lea(ecx, Operand(eax, ecx, times_4, 0));
2489  // Check if cache matches: Double value is stored in uint32_t[2] array.
2490  Label cache_miss;
2491  __ cmp(ebx, Operand(ecx, 0));
2492  __ j(not_equal, &cache_miss, Label::kNear);
2493  __ cmp(edx, Operand(ecx, kIntSize));
2494  __ j(not_equal, &cache_miss, Label::kNear);
2495  // Cache hit!
2496  Counters* counters = masm->isolate()->counters();
2497  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
2498  __ mov(eax, Operand(ecx, 2 * kIntSize));
2499  if (tagged) {
2500  __ fstp(0);
2501  __ ret(kPointerSize);
2502  } else { // UNTAGGED.
2504  __ Ret();
2505  }
2506 
2507  __ bind(&cache_miss);
2508  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
2509  // Update cache with new value.
2510  // We are short on registers, so use no_reg as scratch.
2511  // This gives slightly larger code.
2512  if (tagged) {
2513  __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2514  } else { // UNTAGGED.
2515  __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2516  __ sub(esp, Immediate(kDoubleSize));
2517  __ movdbl(Operand(esp, 0), xmm1);
2518  __ fld_d(Operand(esp, 0));
2519  __ add(esp, Immediate(kDoubleSize));
2520  }
2521  GenerateOperation(masm, type_);
2522  __ mov(Operand(ecx, 0), ebx);
2523  __ mov(Operand(ecx, kIntSize), edx);
2524  __ mov(Operand(ecx, 2 * kIntSize), eax);
2526  if (tagged) {
2527  __ ret(kPointerSize);
2528  } else { // UNTAGGED.
2530  __ Ret();
2531 
2532  // Skip cache and return answer directly, only in untagged case.
2533  __ bind(&skip_cache);
2534  __ sub(esp, Immediate(kDoubleSize));
2535  __ movdbl(Operand(esp, 0), xmm1);
2536  __ fld_d(Operand(esp, 0));
2537  GenerateOperation(masm, type_);
2538  __ fstp_d(Operand(esp, 0));
2539  __ movdbl(xmm1, Operand(esp, 0));
2540  __ add(esp, Immediate(kDoubleSize));
2541  // We return the value in xmm1 without adding it to the cache, but
2542  // we cause a scavenging GC so that future allocations will succeed.
2543  {
2544  FrameScope scope(masm, StackFrame::INTERNAL);
2545  // Allocate an unused object bigger than a HeapNumber.
2546  __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2547  __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2548  }
2549  __ Ret();
2550  }
2551 
2552  // Call runtime, doing whatever allocation and cleanup is necessary.
2553  if (tagged) {
2554  __ bind(&runtime_call_clear_stack);
2555  __ fstp(0);
2556  __ bind(&runtime_call);
2557  ExternalReference runtime =
2558  ExternalReference(RuntimeFunction(), masm->isolate());
2559  __ TailCallExternalReference(runtime, 1, 1);
2560  } else { // UNTAGGED.
2561  __ bind(&runtime_call_clear_stack);
2562  __ bind(&runtime_call);
2563  __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2565  {
2566  FrameScope scope(masm, StackFrame::INTERNAL);
2567  __ push(eax);
2568  __ CallRuntime(RuntimeFunction(), 1);
2569  }
2571  __ Ret();
2572  }
2573 }
2574 
2575 
2576 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2577  switch (type_) {
2578  case TranscendentalCache::SIN: return Runtime::kMath_sin;
2579  case TranscendentalCache::COS: return Runtime::kMath_cos;
2580  case TranscendentalCache::TAN: return Runtime::kMath_tan;
2581  case TranscendentalCache::LOG: return Runtime::kMath_log;
2582  default:
2583  UNIMPLEMENTED();
2584  return Runtime::kAbort;
2585  }
2586 }
2587 
2588 
2590  MacroAssembler* masm, TranscendentalCache::Type type) {
2591  // Only free register is edi.
2592  // Input value is on FP stack, and also in ebx/edx.
2593  // Input value is possibly in xmm1.
2594  // Address of result (a newly allocated HeapNumber) may be in eax.
2595  if (type == TranscendentalCache::SIN ||
2596  type == TranscendentalCache::COS ||
2597  type == TranscendentalCache::TAN) {
2598  // Both fsin and fcos require arguments in the range +/-2^63 and
2599  // return NaN for infinities and NaN. They can share all code except
2600  // the actual fsin/fcos operation.
2601  Label in_range, done;
2602  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2603  // work. We must reduce it to the appropriate range.
2604  __ mov(edi, edx);
2605  __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
2606  int supported_exponent_limit =
2608  __ cmp(edi, Immediate(supported_exponent_limit));
2609  __ j(below, &in_range, Label::kNear);
2610  // Check for infinity and NaN. Both return NaN for sin.
2611  __ cmp(edi, Immediate(0x7ff00000));
2612  Label non_nan_result;
2613  __ j(not_equal, &non_nan_result, Label::kNear);
2614  // Input is +/-Infinity or NaN. Result is NaN.
2615  __ fstp(0);
2616  // NaN is represented by 0x7ff8000000000000.
2617  __ push(Immediate(0x7ff80000));
2618  __ push(Immediate(0));
2619  __ fld_d(Operand(esp, 0));
2620  __ add(esp, Immediate(2 * kPointerSize));
2621  __ jmp(&done, Label::kNear);
2622 
2623  __ bind(&non_nan_result);
2624 
2625  // Use fpmod to restrict argument to the range +/-2*PI.
2626  __ mov(edi, eax); // Save eax before using fnstsw_ax.
2627  __ fldpi();
2628  __ fadd(0);
2629  __ fld(1);
2630  // FPU Stack: input, 2*pi, input.
2631  {
2632  Label no_exceptions;
2633  __ fwait();
2634  __ fnstsw_ax();
2635  // Clear if Illegal Operand or Zero Division exceptions are set.
2636  __ test(eax, Immediate(5));
2637  __ j(zero, &no_exceptions, Label::kNear);
2638  __ fnclex();
2639  __ bind(&no_exceptions);
2640  }
2641 
2642  // Compute st(0) % st(1)
2643  {
2644  Label partial_remainder_loop;
2645  __ bind(&partial_remainder_loop);
2646  __ fprem1();
2647  __ fwait();
2648  __ fnstsw_ax();
2649  __ test(eax, Immediate(0x400 /* C2 */));
2650  // If C2 is set, computation only has partial result. Loop to
2651  // continue computation.
2652  __ j(not_zero, &partial_remainder_loop);
2653  }
2654  // FPU Stack: input, 2*pi, input % 2*pi
2655  __ fstp(2);
2656  __ fstp(0);
2657  __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
2658 
2659  // FPU Stack: input % 2*pi
2660  __ bind(&in_range);
2661  switch (type) {
2663  __ fsin();
2664  break;
2666  __ fcos();
2667  break;
2669  // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
2670  // FP register stack.
2671  __ fptan();
2672  __ fstp(0); // Pop FP register stack.
2673  break;
2674  default:
2675  UNREACHABLE();
2676  }
2677  __ bind(&done);
2678  } else {
2680  __ fldln2();
2681  __ fxch();
2682  __ fyl2x();
2683  }
2684 }
2685 
2686 
2687 // Input: edx, eax are the left and right objects of a bit op.
2688 // Output: eax, ecx are left and right integers for a bit op.
2689 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2690  bool use_sse3,
2691  Label* conversion_failure) {
2692  // Check float operands.
2693  Label arg1_is_object, check_undefined_arg1;
2694  Label arg2_is_object, check_undefined_arg2;
2695  Label load_arg2, done;
2696 
2697  // Test if arg1 is a Smi.
2698  __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2699 
2700  __ SmiUntag(edx);
2701  __ jmp(&load_arg2);
2702 
2703  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2704  __ bind(&check_undefined_arg1);
2705  Factory* factory = masm->isolate()->factory();
2706  __ cmp(edx, factory->undefined_value());
2707  __ j(not_equal, conversion_failure);
2708  __ mov(edx, Immediate(0));
2709  __ jmp(&load_arg2);
2710 
2711  __ bind(&arg1_is_object);
2713  __ cmp(ebx, factory->heap_number_map());
2714  __ j(not_equal, &check_undefined_arg1);
2715 
2716  // Get the untagged integer version of the edx heap number in ecx.
2717  IntegerConvert(masm, edx, use_sse3, conversion_failure);
2718  __ mov(edx, ecx);
2719 
2720  // Here edx has the untagged integer, eax has a Smi or a heap number.
2721  __ bind(&load_arg2);
2722 
2723  // Test if arg2 is a Smi.
2724  __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2725 
2726  __ SmiUntag(eax);
2727  __ mov(ecx, eax);
2728  __ jmp(&done);
2729 
2730  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2731  __ bind(&check_undefined_arg2);
2732  __ cmp(eax, factory->undefined_value());
2733  __ j(not_equal, conversion_failure);
2734  __ mov(ecx, Immediate(0));
2735  __ jmp(&done);
2736 
2737  __ bind(&arg2_is_object);
2739  __ cmp(ebx, factory->heap_number_map());
2740  __ j(not_equal, &check_undefined_arg2);
2741 
2742  // Get the untagged integer version of the eax heap number in ecx.
2743  IntegerConvert(masm, eax, use_sse3, conversion_failure);
2744  __ bind(&done);
2745  __ mov(eax, edx);
2746 }
2747 
2748 
2749 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2750  bool use_sse3,
2751  Label* not_int32) {
2752  return;
2753 }
2754 
2755 
2756 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2757  Register number) {
2758  Label load_smi, done;
2759 
2760  __ JumpIfSmi(number, &load_smi, Label::kNear);
2761  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2762  __ jmp(&done, Label::kNear);
2763 
2764  __ bind(&load_smi);
2765  __ SmiUntag(number);
2766  __ push(number);
2767  __ fild_s(Operand(esp, 0));
2768  __ pop(number);
2769 
2770  __ bind(&done);
2771 }
2772 
2773 
2774 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
2775  Label load_smi_edx, load_eax, load_smi_eax, done;
2776  // Load operand in edx into xmm0.
2777  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2779 
2780  __ bind(&load_eax);
2781  // Load operand in eax into xmm1.
2782  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2784  __ jmp(&done, Label::kNear);
2785 
2786  __ bind(&load_smi_edx);
2787  __ SmiUntag(edx); // Untag smi before converting to float.
2788  __ cvtsi2sd(xmm0, edx);
2789  __ SmiTag(edx); // Retag smi for heap number overwriting test.
2790  __ jmp(&load_eax);
2791 
2792  __ bind(&load_smi_eax);
2793  __ SmiUntag(eax); // Untag smi before converting to float.
2794  __ cvtsi2sd(xmm1, eax);
2795  __ SmiTag(eax); // Retag smi for heap number overwriting test.
2796 
2797  __ bind(&done);
2798 }
2799 
2800 
2801 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
2802  Label* not_numbers) {
2803  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
2804  // Load operand in edx into xmm0, or branch to not_numbers.
2805  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2806  Factory* factory = masm->isolate()->factory();
2807  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
2808  __ j(not_equal, not_numbers); // Argument in edx is not a number.
2810  __ bind(&load_eax);
2811  // Load operand in eax into xmm1, or branch to not_numbers.
2812  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2813  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2814  __ j(equal, &load_float_eax, Label::kNear);
2815  __ jmp(not_numbers); // Argument in eax is not a number.
2816  __ bind(&load_smi_edx);
2817  __ SmiUntag(edx); // Untag smi before converting to float.
2818  __ cvtsi2sd(xmm0, edx);
2819  __ SmiTag(edx); // Retag smi for heap number overwriting test.
2820  __ jmp(&load_eax);
2821  __ bind(&load_smi_eax);
2822  __ SmiUntag(eax); // Untag smi before converting to float.
2823  __ cvtsi2sd(xmm1, eax);
2824  __ SmiTag(eax); // Retag smi for heap number overwriting test.
2825  __ jmp(&done, Label::kNear);
2826  __ bind(&load_float_eax);
2828  __ bind(&done);
2829 }
2830 
2831 
2832 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2833  Register scratch) {
2834  const Register left = edx;
2835  const Register right = eax;
2836  __ mov(scratch, left);
2837  ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2838  __ SmiUntag(scratch);
2839  __ cvtsi2sd(xmm0, scratch);
2840 
2841  __ mov(scratch, right);
2842  __ SmiUntag(scratch);
2843  __ cvtsi2sd(xmm1, scratch);
2844 }
2845 
2846 
2847 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2848  Label* non_int32,
2849  Register scratch) {
2850  __ cvttsd2si(scratch, Operand(xmm0));
2851  __ cvtsi2sd(xmm2, scratch);
2852  __ ucomisd(xmm0, xmm2);
2853  __ j(not_zero, non_int32);
2854  __ j(carry, non_int32);
2855  __ cvttsd2si(scratch, Operand(xmm1));
2856  __ cvtsi2sd(xmm2, scratch);
2857  __ ucomisd(xmm1, xmm2);
2858  __ j(not_zero, non_int32);
2859  __ j(carry, non_int32);
2860 }
2861 
2862 
2863 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2864  Register scratch,
2865  ArgLocation arg_location) {
2866  Label load_smi_1, load_smi_2, done_load_1, done;
2867  if (arg_location == ARGS_IN_REGISTERS) {
2868  __ mov(scratch, edx);
2869  } else {
2870  __ mov(scratch, Operand(esp, 2 * kPointerSize));
2871  }
2872  __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2873  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2874  __ bind(&done_load_1);
2875 
2876  if (arg_location == ARGS_IN_REGISTERS) {
2877  __ mov(scratch, eax);
2878  } else {
2879  __ mov(scratch, Operand(esp, 1 * kPointerSize));
2880  }
2881  __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2882  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2883  __ jmp(&done, Label::kNear);
2884 
2885  __ bind(&load_smi_1);
2886  __ SmiUntag(scratch);
2887  __ push(scratch);
2888  __ fild_s(Operand(esp, 0));
2889  __ pop(scratch);
2890  __ jmp(&done_load_1);
2891 
2892  __ bind(&load_smi_2);
2893  __ SmiUntag(scratch);
2894  __ push(scratch);
2895  __ fild_s(Operand(esp, 0));
2896  __ pop(scratch);
2897 
2898  __ bind(&done);
2899 }
2900 
2901 
2902 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2903  Register scratch) {
2904  const Register left = edx;
2905  const Register right = eax;
2906  __ mov(scratch, left);
2907  ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2908  __ SmiUntag(scratch);
2909  __ push(scratch);
2910  __ fild_s(Operand(esp, 0));
2911 
2912  __ mov(scratch, right);
2913  __ SmiUntag(scratch);
2914  __ mov(Operand(esp, 0), scratch);
2915  __ fild_s(Operand(esp, 0));
2916  __ pop(scratch);
2917 }
2918 
2919 
2920 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2921  Label* non_float,
2922  Register scratch) {
2923  Label test_other, done;
2924  // Test if both operands are floats or smi -> scratch=k_is_float;
2925  // Otherwise scratch = k_not_float.
2926  __ JumpIfSmi(edx, &test_other, Label::kNear);
2927  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2928  Factory* factory = masm->isolate()->factory();
2929  __ cmp(scratch, factory->heap_number_map());
2930  __ j(not_equal, non_float); // argument in edx is not a number -> NaN
2931 
2932  __ bind(&test_other);
2933  __ JumpIfSmi(eax, &done, Label::kNear);
2934  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
2935  __ cmp(scratch, factory->heap_number_map());
2936  __ j(not_equal, non_float); // argument in eax is not a number -> NaN
2937 
2938  // Fall-through: Both operands are numbers.
2939  __ bind(&done);
2940 }
2941 
2942 
2943 void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
2944  Label* non_int32) {
2945  return;
2946 }
2947 
2948 
2949 void MathPowStub::Generate(MacroAssembler* masm) {
2950  CpuFeatures::Scope use_sse2(SSE2);
2951  Factory* factory = masm->isolate()->factory();
2952  const Register exponent = eax;
2953  const Register base = edx;
2954  const Register scratch = ecx;
2955  const XMMRegister double_result = xmm3;
2956  const XMMRegister double_base = xmm2;
2957  const XMMRegister double_exponent = xmm1;
2958  const XMMRegister double_scratch = xmm4;
2959 
2960  Label call_runtime, done, exponent_not_smi, int_exponent;
2961 
2962  // Save 1 in double_result - we need this several times later on.
2963  __ mov(scratch, Immediate(1));
2964  __ cvtsi2sd(double_result, scratch);
2965 
2966  if (exponent_type_ == ON_STACK) {
2967  Label base_is_smi, unpack_exponent;
2968  // The exponent and base are supplied as arguments on the stack.
2969  // This can only happen if the stub is called from non-optimized code.
2970  // Load input parameters from stack.
2971  __ mov(base, Operand(esp, 2 * kPointerSize));
2972  __ mov(exponent, Operand(esp, 1 * kPointerSize));
2973 
2974  __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2976  factory->heap_number_map());
2977  __ j(not_equal, &call_runtime);
2978 
2979  __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2980  __ jmp(&unpack_exponent, Label::kNear);
2981 
2982  __ bind(&base_is_smi);
2983  __ SmiUntag(base);
2984  __ cvtsi2sd(double_base, base);
2985 
2986  __ bind(&unpack_exponent);
2987  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2988  __ SmiUntag(exponent);
2989  __ jmp(&int_exponent);
2990 
2991  __ bind(&exponent_not_smi);
2992  __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
2993  factory->heap_number_map());
2994  __ j(not_equal, &call_runtime);
2995  __ movdbl(double_exponent,
2997  } else if (exponent_type_ == TAGGED) {
2998  __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2999  __ SmiUntag(exponent);
3000  __ jmp(&int_exponent);
3001 
3002  __ bind(&exponent_not_smi);
3003  __ movdbl(double_exponent,
3005  }
3006 
3007  if (exponent_type_ != INTEGER) {
3008  Label fast_power;
3009  // Detect integer exponents stored as double.
3010  __ cvttsd2si(exponent, Operand(double_exponent));
3011  // Skip to runtime if possibly NaN (indicated by the indefinite integer).
3012  __ cmp(exponent, Immediate(0x80000000u));
3013  __ j(equal, &call_runtime);
3014  __ cvtsi2sd(double_scratch, exponent);
3015  // Already ruled out NaNs for exponent.
3016  __ ucomisd(double_exponent, double_scratch);
3017  __ j(equal, &int_exponent);
3018 
3019  if (exponent_type_ == ON_STACK) {
3020  // Detect square root case. Crankshaft detects constant +/-0.5 at
3021  // compile time and uses DoMathPowHalf instead. We then skip this check
3022  // for non-constant cases of +/-0.5 as these hardly occur.
3023  Label continue_sqrt, continue_rsqrt, not_plus_half;
3024  // Test for 0.5.
3025  // Load double_scratch with 0.5.
3026  __ mov(scratch, Immediate(0x3F000000u));
3027  __ movd(double_scratch, scratch);
3028  __ cvtss2sd(double_scratch, double_scratch);
3029  // Already ruled out NaNs for exponent.
3030  __ ucomisd(double_scratch, double_exponent);
3031  __ j(not_equal, &not_plus_half, Label::kNear);
3032 
3033  // Calculates square root of base. Check for the special case of
3034  // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3035  // According to IEEE-754, single-precision -Infinity has the highest
3036  // 9 bits set and the lowest 23 bits cleared.
3037  __ mov(scratch, 0xFF800000u);
3038  __ movd(double_scratch, scratch);
3039  __ cvtss2sd(double_scratch, double_scratch);
3040  __ ucomisd(double_base, double_scratch);
3041  // Comparing -Infinity with NaN results in "unordered", which sets the
3042  // zero flag as if both were equal. However, it also sets the carry flag.
3043  __ j(not_equal, &continue_sqrt, Label::kNear);
3044  __ j(carry, &continue_sqrt, Label::kNear);
3045 
3046  // Set result to Infinity in the special case.
3047  __ xorps(double_result, double_result);
3048  __ subsd(double_result, double_scratch);
3049  __ jmp(&done);
3050 
3051  __ bind(&continue_sqrt);
3052  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
3053  __ xorps(double_scratch, double_scratch);
3054  __ addsd(double_scratch, double_base); // Convert -0 to +0.
3055  __ sqrtsd(double_result, double_scratch);
3056  __ jmp(&done);
3057 
3058  // Test for -0.5.
3059  __ bind(&not_plus_half);
3060  // Load double_exponent with -0.5 by substracting 1.
3061  __ subsd(double_scratch, double_result);
3062  // Already ruled out NaNs for exponent.
3063  __ ucomisd(double_scratch, double_exponent);
3064  __ j(not_equal, &fast_power, Label::kNear);
3065 
3066  // Calculates reciprocal of square root of base. Check for the special
3067  // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3068  // According to IEEE-754, single-precision -Infinity has the highest
3069  // 9 bits set and the lowest 23 bits cleared.
3070  __ mov(scratch, 0xFF800000u);
3071  __ movd(double_scratch, scratch);
3072  __ cvtss2sd(double_scratch, double_scratch);
3073  __ ucomisd(double_base, double_scratch);
3074  // Comparing -Infinity with NaN results in "unordered", which sets the
3075  // zero flag as if both were equal. However, it also sets the carry flag.
3076  __ j(not_equal, &continue_rsqrt, Label::kNear);
3077  __ j(carry, &continue_rsqrt, Label::kNear);
3078 
3079  // Set result to 0 in the special case.
3080  __ xorps(double_result, double_result);
3081  __ jmp(&done);
3082 
3083  __ bind(&continue_rsqrt);
3084  // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
3085  __ xorps(double_exponent, double_exponent);
3086  __ addsd(double_exponent, double_base); // Convert -0 to +0.
3087  __ sqrtsd(double_exponent, double_exponent);
3088  __ divsd(double_result, double_exponent);
3089  __ jmp(&done);
3090  }
3091 
3092  // Using FPU instructions to calculate power.
3093  Label fast_power_failed;
3094  __ bind(&fast_power);
3095  __ fnclex(); // Clear flags to catch exceptions later.
3096  // Transfer (B)ase and (E)xponent onto the FPU register stack.
3097  __ sub(esp, Immediate(kDoubleSize));
3098  __ movdbl(Operand(esp, 0), double_exponent);
3099  __ fld_d(Operand(esp, 0)); // E
3100  __ movdbl(Operand(esp, 0), double_base);
3101  __ fld_d(Operand(esp, 0)); // B, E
3102 
3103  // Exponent is in st(1) and base is in st(0)
3104  // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
3105  // FYL2X calculates st(1) * log2(st(0))
3106  __ fyl2x(); // X
3107  __ fld(0); // X, X
3108  __ frndint(); // rnd(X), X
3109  __ fsub(1); // rnd(X), X-rnd(X)
3110  __ fxch(1); // X - rnd(X), rnd(X)
3111  // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
3112  __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
3113  __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
3114  __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
3115  // FSCALE calculates st(0) * 2^st(1)
3116  __ fscale(); // 2^X, rnd(X)
3117  __ fstp(1);
3118  // Bail out to runtime in case of exceptions in the status word.
3119  __ fnstsw_ax();
3120  __ test_b(eax, 0x5F); // We check for all but precision exception.
3121  __ j(not_zero, &fast_power_failed, Label::kNear);
3122  __ fstp_d(Operand(esp, 0));
3123  __ movdbl(double_result, Operand(esp, 0));
3124  __ add(esp, Immediate(kDoubleSize));
3125  __ jmp(&done);
3126 
3127  __ bind(&fast_power_failed);
3128  __ fninit();
3129  __ add(esp, Immediate(kDoubleSize));
3130  __ jmp(&call_runtime);
3131  }
3132 
3133  // Calculate power with integer exponent.
3134  __ bind(&int_exponent);
3135  const XMMRegister double_scratch2 = double_exponent;
3136  __ mov(scratch, exponent); // Back up exponent.
3137  __ movsd(double_scratch, double_base); // Back up base.
3138  __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
3139 
3140  // Get absolute value of exponent.
3141  Label no_neg, while_true, no_multiply;
3142  __ test(scratch, scratch);
3143  __ j(positive, &no_neg, Label::kNear);
3144  __ neg(scratch);
3145  __ bind(&no_neg);
3146 
3147  __ bind(&while_true);
3148  __ shr(scratch, 1);
3149  __ j(not_carry, &no_multiply, Label::kNear);
3150  __ mulsd(double_result, double_scratch);
3151  __ bind(&no_multiply);
3152 
3153  __ mulsd(double_scratch, double_scratch);
3154  __ j(not_zero, &while_true);
3155 
3156  // scratch has the original value of the exponent - if the exponent is
3157  // negative, return 1/result.
3158  __ test(exponent, exponent);
3159  __ j(positive, &done);
3160  __ divsd(double_scratch2, double_result);
3161  __ movsd(double_result, double_scratch2);
3162  // Test whether result is zero. Bail out to check for subnormal result.
3163  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3164  __ xorps(double_scratch2, double_scratch2);
3165  __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
3166  // double_exponent aliased as double_scratch2 has already been overwritten
3167  // and may not have contained the exponent value in the first place when the
3168  // exponent is a smi. We reset it with exponent value before bailing out.
3169  __ j(not_equal, &done);
3170  __ cvtsi2sd(double_exponent, exponent);
3171 
3172  // Returning or bailing out.
3173  Counters* counters = masm->isolate()->counters();
3174  if (exponent_type_ == ON_STACK) {
3175  // The arguments are still on the stack.
3176  __ bind(&call_runtime);
3177  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3178 
3179  // The stub is called from non-optimized code, which expects the result
3180  // as heap number in exponent.
3181  __ bind(&done);
3182  __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
3183  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
3184  __ IncrementCounter(counters->math_pow(), 1);
3185  __ ret(2 * kPointerSize);
3186  } else {
3187  __ bind(&call_runtime);
3188  {
3189  AllowExternalCallThatCantCauseGC scope(masm);
3190  __ PrepareCallCFunction(4, scratch);
3191  __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
3192  __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
3193  __ CallCFunction(
3194  ExternalReference::power_double_double_function(masm->isolate()), 4);
3195  }
3196  // Return value is in st(0) on ia32.
3197  // Store it into the (fixed) result register.
3198  __ sub(esp, Immediate(kDoubleSize));
3199  __ fstp_d(Operand(esp, 0));
3200  __ movdbl(double_result, Operand(esp, 0));
3201  __ add(esp, Immediate(kDoubleSize));
3202 
3203  __ bind(&done);
3204  __ IncrementCounter(counters->math_pow(), 1);
3205  __ ret(0);
3206  }
3207 }
3208 
3209 
3210 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3211  // The key is in edx and the parameter count is in eax.
3212 
3213  // The displacement is used for skipping the frame pointer on the
3214  // stack. It is the offset of the last parameter (if any) relative
3215  // to the frame pointer.
3216  static const int kDisplacement = 1 * kPointerSize;
3217 
3218  // Check that the key is a smi.
3219  Label slow;
3220  __ JumpIfNotSmi(edx, &slow, Label::kNear);
3221 
3222  // Check if the calling frame is an arguments adaptor frame.
3223  Label adaptor;
3227  __ j(equal, &adaptor, Label::kNear);
3228 
3229  // Check index against formal parameters count limit passed in
3230  // through register eax. Use unsigned comparison to get negative
3231  // check for free.
3232  __ cmp(edx, eax);
3233  __ j(above_equal, &slow, Label::kNear);
3234 
3235  // Read the argument from the stack and return it.
3236  STATIC_ASSERT(kSmiTagSize == 1);
3237  STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3238  __ lea(ebx, Operand(ebp, eax, times_2, 0));
3239  __ neg(edx);
3240  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3241  __ ret(0);
3242 
3243  // Arguments adaptor case: Check index against actual arguments
3244  // limit found in the arguments adaptor frame. Use unsigned
3245  // comparison to get negative check for free.
3246  __ bind(&adaptor);
3248  __ cmp(edx, ecx);
3249  __ j(above_equal, &slow, Label::kNear);
3250 
3251  // Read the argument from the stack and return it.
3252  STATIC_ASSERT(kSmiTagSize == 1);
3253  STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3254  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
3255  __ neg(edx);
3256  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3257  __ ret(0);
3258 
3259  // Slow-case: Handle non-smi or out-of-bounds access to arguments
3260  // by calling the runtime system.
3261  __ bind(&slow);
3262  __ pop(ebx); // Return address.
3263  __ push(edx);
3264  __ push(ebx);
3265  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
3266 }
3267 
3268 
3269 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
3270  // esp[0] : return address
3271  // esp[4] : number of parameters
3272  // esp[8] : receiver displacement
3273  // esp[12] : function
3274 
3275  // Check if the calling frame is an arguments adaptor frame.
3276  Label runtime;
3280  __ j(not_equal, &runtime, Label::kNear);
3281 
3282  // Patch the arguments.length and the parameters pointer.
3284  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3285  __ lea(edx, Operand(edx, ecx, times_2,
3287  __ mov(Operand(esp, 2 * kPointerSize), edx);
3288 
3289  __ bind(&runtime);
3290  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3291 }
3292 
3293 
3294 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
3295  // esp[0] : return address
3296  // esp[4] : number of parameters (tagged)
3297  // esp[8] : receiver displacement
3298  // esp[12] : function
3299 
3300  // ebx = parameter count (tagged)
3301  __ mov(ebx, Operand(esp, 1 * kPointerSize));
3302 
3303  // Check if the calling frame is an arguments adaptor frame.
3304  // TODO(rossberg): Factor out some of the bits that are shared with the other
3305  // Generate* functions.
3306  Label runtime;
3307  Label adaptor_frame, try_allocate;
3311  __ j(equal, &adaptor_frame, Label::kNear);
3312 
3313  // No adaptor, parameter count = argument count.
3314  __ mov(ecx, ebx);
3315  __ jmp(&try_allocate, Label::kNear);
3316 
3317  // We have an adaptor frame. Patch the parameters pointer.
3318  __ bind(&adaptor_frame);
3320  __ lea(edx, Operand(edx, ecx, times_2,
3322  __ mov(Operand(esp, 2 * kPointerSize), edx);
3323 
3324  // ebx = parameter count (tagged)
3325  // ecx = argument count (tagged)
3326  // esp[4] = parameter count (tagged)
3327  // esp[8] = address of receiver argument
3328  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
3329  __ cmp(ebx, ecx);
3330  __ j(less_equal, &try_allocate, Label::kNear);
3331  __ mov(ebx, ecx);
3332 
3333  __ bind(&try_allocate);
3334 
3335  // Save mapped parameter count.
3336  __ push(ebx);
3337 
3338  // Compute the sizes of backing store, parameter map, and arguments object.
3339  // 1. Parameter map, has 2 extra words containing context and backing store.
3340  const int kParameterMapHeaderSize =
3342  Label no_parameter_map;
3343  __ test(ebx, ebx);
3344  __ j(zero, &no_parameter_map, Label::kNear);
3345  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
3346  __ bind(&no_parameter_map);
3347 
3348  // 2. Backing store.
3349  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
3350 
3351  // 3. Arguments object.
3352  __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
3353 
3354  // Do the allocation of all three objects in one go.
3355  __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
3356 
3357  // eax = address of new object(s) (tagged)
3358  // ecx = argument count (tagged)
3359  // esp[0] = mapped parameter count (tagged)
3360  // esp[8] = parameter count (tagged)
3361  // esp[12] = address of receiver argument
3362  // Get the arguments boilerplate from the current (global) context into edi.
3363  Label has_mapped_parameters, copy;
3366  __ mov(ebx, Operand(esp, 0 * kPointerSize));
3367  __ test(ebx, ebx);
3368  __ j(not_zero, &has_mapped_parameters, Label::kNear);
3369  __ mov(edi, Operand(edi,
3371  __ jmp(&copy, Label::kNear);
3372 
3373  __ bind(&has_mapped_parameters);
3374  __ mov(edi, Operand(edi,
3376  __ bind(&copy);
3377 
3378  // eax = address of new object (tagged)
3379  // ebx = mapped parameter count (tagged)
3380  // ecx = argument count (tagged)
3381  // edi = address of boilerplate object (tagged)
3382  // esp[0] = mapped parameter count (tagged)
3383  // esp[8] = parameter count (tagged)
3384  // esp[12] = address of receiver argument
3385  // Copy the JS object part.
3386  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3387  __ mov(edx, FieldOperand(edi, i));
3388  __ mov(FieldOperand(eax, i), edx);
3389  }
3390 
3391  // Set up the callee in-object property.
3393  __ mov(edx, Operand(esp, 4 * kPointerSize));
3394  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3395  Heap::kArgumentsCalleeIndex * kPointerSize),
3396  edx);
3397 
3398  // Use the length (smi tagged) and set that as an in-object property too.
3400  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3401  Heap::kArgumentsLengthIndex * kPointerSize),
3402  ecx);
3403 
3404  // Set up the elements pointer in the allocated arguments object.
3405  // If we allocated a parameter map, edi will point there, otherwise to the
3406  // backing store.
3407  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3409 
3410  // eax = address of new object (tagged)
3411  // ebx = mapped parameter count (tagged)
3412  // ecx = argument count (tagged)
3413  // edi = address of parameter map or backing store (tagged)
3414  // esp[0] = mapped parameter count (tagged)
3415  // esp[8] = parameter count (tagged)
3416  // esp[12] = address of receiver argument
3417  // Free a register.
3418  __ push(eax);
3419 
3420  // Initialize parameter map. If there are no mapped arguments, we're done.
3421  Label skip_parameter_map;
3422  __ test(ebx, ebx);
3423  __ j(zero, &skip_parameter_map);
3424 
3426  Immediate(FACTORY->non_strict_arguments_elements_map()));
3427  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
3429  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
3430  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
3431  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
3432 
3433  // Copy the parameter slots and the holes in the arguments.
3434  // We need to fill in mapped_parameter_count slots. They index the context,
3435  // where parameters are stored in reverse order, at
3436  // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3437  // The mapped parameter thus need to get indices
3438  // MIN_CONTEXT_SLOTS+parameter_count-1 ..
3439  // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3440  // We loop from right to left.
3441  Label parameters_loop, parameters_test;
3442  __ push(ecx);
3443  __ mov(eax, Operand(esp, 2 * kPointerSize));
3444  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3445  __ add(ebx, Operand(esp, 4 * kPointerSize));
3446  __ sub(ebx, eax);
3447  __ mov(ecx, FACTORY->the_hole_value());
3448  __ mov(edx, edi);
3449  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
3450  // eax = loop variable (tagged)
3451  // ebx = mapping index (tagged)
3452  // ecx = the hole value
3453  // edx = address of parameter map (tagged)
3454  // edi = address of backing store (tagged)
3455  // esp[0] = argument count (tagged)
3456  // esp[4] = address of new object (tagged)
3457  // esp[8] = mapped parameter count (tagged)
3458  // esp[16] = parameter count (tagged)
3459  // esp[20] = address of receiver argument
3460  __ jmp(&parameters_test, Label::kNear);
3461 
3462  __ bind(&parameters_loop);
3463  __ sub(eax, Immediate(Smi::FromInt(1)));
3464  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
3466  __ add(ebx, Immediate(Smi::FromInt(1)));
3467  __ bind(&parameters_test);
3468  __ test(eax, eax);
3469  __ j(not_zero, &parameters_loop, Label::kNear);
3470  __ pop(ecx);
3471 
3472  __ bind(&skip_parameter_map);
3473 
3474  // ecx = argument count (tagged)
3475  // edi = address of backing store (tagged)
3476  // esp[0] = address of new object (tagged)
3477  // esp[4] = mapped parameter count (tagged)
3478  // esp[12] = parameter count (tagged)
3479  // esp[16] = address of receiver argument
3480  // Copy arguments header and remaining slots (if there are any).
3482  Immediate(FACTORY->fixed_array_map()));
3484 
3485  Label arguments_loop, arguments_test;
3486  __ mov(ebx, Operand(esp, 1 * kPointerSize));
3487  __ mov(edx, Operand(esp, 4 * kPointerSize));
3488  __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
3489  __ sub(edx, ebx);
3490  __ jmp(&arguments_test, Label::kNear);
3491 
3492  __ bind(&arguments_loop);
3493  __ sub(edx, Immediate(kPointerSize));
3494  __ mov(eax, Operand(edx, 0));
3496  __ add(ebx, Immediate(Smi::FromInt(1)));
3497 
3498  __ bind(&arguments_test);
3499  __ cmp(ebx, ecx);
3500  __ j(less, &arguments_loop, Label::kNear);
3501 
3502  // Restore.
3503  __ pop(eax); // Address of arguments object.
3504  __ pop(ebx); // Parameter count.
3505 
3506  // Return and remove the on-stack parameters.
3507  __ ret(3 * kPointerSize);
3508 
3509  // Do the runtime call to allocate the arguments object.
3510  __ bind(&runtime);
3511  __ pop(eax); // Remove saved parameter count.
3512  __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
3513  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3514 }
3515 
3516 
3517 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3518  // esp[0] : return address
3519  // esp[4] : number of parameters
3520  // esp[8] : receiver displacement
3521  // esp[12] : function
3522 
3523  // Check if the calling frame is an arguments adaptor frame.
3524  Label adaptor_frame, try_allocate, runtime;
3528  __ j(equal, &adaptor_frame, Label::kNear);
3529 
3530  // Get the length from the frame.
3531  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3532  __ jmp(&try_allocate, Label::kNear);
3533 
3534  // Patch the arguments.length and the parameters pointer.
3535  __ bind(&adaptor_frame);
3537  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3538  __ lea(edx, Operand(edx, ecx, times_2,
3540  __ mov(Operand(esp, 2 * kPointerSize), edx);
3541 
3542  // Try the new space allocation. Start out with computing the size of
3543  // the arguments object and the elements array.
3544  Label add_arguments_object;
3545  __ bind(&try_allocate);
3546  __ test(ecx, ecx);
3547  __ j(zero, &add_arguments_object, Label::kNear);
3548  __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3549  __ bind(&add_arguments_object);
3550  __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
3551 
3552  // Do the allocation of both objects in one go.
3553  __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3554 
3555  // Get the arguments boilerplate from the current (global) context.
3558  const int offset =
3560  __ mov(edi, Operand(edi, offset));
3561 
3562  // Copy the JS object part.
3563  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3564  __ mov(ebx, FieldOperand(edi, i));
3565  __ mov(FieldOperand(eax, i), ebx);
3566  }
3567 
3568  // Get the length (smi tagged) and set that as an in-object property too.
3570  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3571  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3572  Heap::kArgumentsLengthIndex * kPointerSize),
3573  ecx);
3574 
3575  // If there are no actual arguments, we're done.
3576  Label done;
3577  __ test(ecx, ecx);
3578  __ j(zero, &done, Label::kNear);
3579 
3580  // Get the parameters pointer from the stack.
3581  __ mov(edx, Operand(esp, 2 * kPointerSize));
3582 
3583  // Set up the elements pointer in the allocated arguments object and
3584  // initialize the header in the elements fixed array.
3585  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
3588  Immediate(FACTORY->fixed_array_map()));
3589 
3591  // Untag the length for the loop below.
3592  __ SmiUntag(ecx);
3593 
3594  // Copy the fixed array slots.
3595  Label loop;
3596  __ bind(&loop);
3597  __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
3599  __ add(edi, Immediate(kPointerSize));
3600  __ sub(edx, Immediate(kPointerSize));
3601  __ dec(ecx);
3602  __ j(not_zero, &loop);
3603 
3604  // Return and remove the on-stack parameters.
3605  __ bind(&done);
3606  __ ret(3 * kPointerSize);
3607 
3608  // Do the runtime call to allocate the arguments object.
3609  __ bind(&runtime);
3610  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3611 }
3612 
3613 
3614 void RegExpExecStub::Generate(MacroAssembler* masm) {
3615  // Just jump directly to runtime if native RegExp is not selected at compile
3616  // time or if regexp entry in generated code is turned off runtime switch or
3617  // at compilation.
3618 #ifdef V8_INTERPRETED_REGEXP
3619  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3620 #else // V8_INTERPRETED_REGEXP
3621 
3622  // Stack frame on entry.
3623  // esp[0]: return address
3624  // esp[4]: last_match_info (expected JSArray)
3625  // esp[8]: previous index
3626  // esp[12]: subject string
3627  // esp[16]: JSRegExp object
3628 
3629  static const int kLastMatchInfoOffset = 1 * kPointerSize;
3630  static const int kPreviousIndexOffset = 2 * kPointerSize;
3631  static const int kSubjectOffset = 3 * kPointerSize;
3632  static const int kJSRegExpOffset = 4 * kPointerSize;
3633 
3634  Label runtime, invoke_regexp;
3635 
3636  // Ensure that a RegExp stack is allocated.
3637  ExternalReference address_of_regexp_stack_memory_address =
3638  ExternalReference::address_of_regexp_stack_memory_address(
3639  masm->isolate());
3640  ExternalReference address_of_regexp_stack_memory_size =
3641  ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
3642  __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3643  __ test(ebx, ebx);
3644  __ j(zero, &runtime);
3645 
3646  // Check that the first argument is a JSRegExp object.
3647  __ mov(eax, Operand(esp, kJSRegExpOffset));
3648  STATIC_ASSERT(kSmiTag == 0);
3649  __ JumpIfSmi(eax, &runtime);
3650  __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3651  __ j(not_equal, &runtime);
3652  // Check that the RegExp has been compiled (data contains a fixed array).
3654  if (FLAG_debug_code) {
3655  __ test(ecx, Immediate(kSmiTagMask));
3656  __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3657  __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3658  __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3659  }
3660 
3661  // ecx: RegExp data (FixedArray)
3662  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3664  __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3665  __ j(not_equal, &runtime);
3666 
3667  // ecx: RegExp data (FixedArray)
3668  // Check that the number of captures fit in the static offsets vector buffer.
3670  // Calculate number of capture registers (number_of_captures + 1) * 2. This
3671  // uses the asumption that smis are 2 * their untagged value.
3672  STATIC_ASSERT(kSmiTag == 0);
3674  __ add(edx, Immediate(2)); // edx was a smi.
3675  // Check that the static offsets vector buffer is large enough.
3677  __ j(above, &runtime);
3678 
3679  // ecx: RegExp data (FixedArray)
3680  // edx: Number of capture registers
3681  // Check that the second argument is a string.
3682  __ mov(eax, Operand(esp, kSubjectOffset));
3683  __ JumpIfSmi(eax, &runtime);
3684  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3685  __ j(NegateCondition(is_string), &runtime);
3686  // Get the length of the string to ebx.
3688 
3689  // ebx: Length of subject string as a smi
3690  // ecx: RegExp data (FixedArray)
3691  // edx: Number of capture registers
3692  // Check that the third argument is a positive smi less than the subject
3693  // string length. A negative value will be greater (unsigned comparison).
3694  __ mov(eax, Operand(esp, kPreviousIndexOffset));
3695  __ JumpIfNotSmi(eax, &runtime);
3696  __ cmp(eax, ebx);
3697  __ j(above_equal, &runtime);
3698 
3699  // ecx: RegExp data (FixedArray)
3700  // edx: Number of capture registers
3701  // Check that the fourth object is a JSArray object.
3702  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3703  __ JumpIfSmi(eax, &runtime);
3704  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3705  __ j(not_equal, &runtime);
3706  // Check that the JSArray is in fast case.
3709  Factory* factory = masm->isolate()->factory();
3710  __ cmp(eax, factory->fixed_array_map());
3711  __ j(not_equal, &runtime);
3712  // Check that the last match info has space for the capture registers and the
3713  // additional information.
3715  __ SmiUntag(eax);
3716  __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
3717  __ cmp(edx, eax);
3718  __ j(greater, &runtime);
3719 
3720  // Reset offset for possibly sliced string.
3721  __ Set(edi, Immediate(0));
3722  // ecx: RegExp data (FixedArray)
3723  // Check the representation and encoding of the subject string.
3724  Label seq_ascii_string, seq_two_byte_string, check_code;
3725  __ mov(eax, Operand(esp, kSubjectOffset));
3728  // First check for flat two byte string.
3729  __ and_(ebx, kIsNotStringMask |
3734  __ j(zero, &seq_two_byte_string, Label::kNear);
3735  // Any other flat string must be a flat ASCII string. None of the following
3736  // string type tests will succeed if subject is not a string or a short
3737  // external string.
3738  __ and_(ebx, Immediate(kIsNotStringMask |
3741  __ j(zero, &seq_ascii_string, Label::kNear);
3742 
3743  // ebx: whether subject is a string and if yes, its string representation
3744  // Check for flat cons string or sliced string.
3745  // A flat cons string is a cons string where the second part is the empty
3746  // string. In that case the subject string is just the first part of the cons
3747  // string. Also in this case the first part of the cons string is known to be
3748  // a sequential string or an external string.
3749  // In the case of a sliced string its offset has to be taken into account.
3750  Label cons_string, external_string, check_encoding;
3755  __ cmp(ebx, Immediate(kExternalStringTag));
3756  __ j(less, &cons_string);
3757  __ j(equal, &external_string);
3758 
3759  // Catch non-string subject or short external string.
3761  __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
3762  __ j(not_zero, &runtime);
3763 
3764  // String is sliced.
3767  // edi: offset of sliced string, smi-tagged.
3768  // eax: parent string.
3769  __ jmp(&check_encoding, Label::kNear);
3770  // String is a cons string, check whether it is flat.
3771  __ bind(&cons_string);
3772  __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
3773  __ j(not_equal, &runtime);
3775  __ bind(&check_encoding);
3777  // eax: first part of cons string or parent of sliced string.
3778  // ebx: map of first part of cons string or map of parent of sliced string.
3779  // Is first part of cons or parent of slice a flat two byte string?
3783  __ j(zero, &seq_two_byte_string, Label::kNear);
3784  // Any other flat string must be sequential ASCII or external.
3787  __ j(not_zero, &external_string);
3788 
3789  __ bind(&seq_ascii_string);
3790  // eax: subject string (flat ASCII)
3791  // ecx: RegExp data (FixedArray)
3793  __ Set(ecx, Immediate(1)); // Type is ASCII.
3794  __ jmp(&check_code, Label::kNear);
3795 
3796  __ bind(&seq_two_byte_string);
3797  // eax: subject string (flat two byte)
3798  // ecx: RegExp data (FixedArray)
3800  __ Set(ecx, Immediate(0)); // Type is two byte.
3801 
3802  __ bind(&check_code);
3803  // Check that the irregexp code has been generated for the actual string
3804  // encoding. If it has, the field contains a code object otherwise it contains
3805  // a smi (code flushing support).
3806  __ JumpIfSmi(edx, &runtime);
3807 
3808  // eax: subject string
3809  // edx: code
3810  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
3811  // Load used arguments before starting to push arguments for call to native
3812  // RegExp code to avoid handling changing stack height.
3813  __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3814  __ SmiUntag(ebx); // Previous index from smi.
3815 
3816  // eax: subject string
3817  // ebx: previous index
3818  // edx: code
3819  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3820  // All checks done. Now push arguments for native regexp code.
3821  Counters* counters = masm->isolate()->counters();
3822  __ IncrementCounter(counters->regexp_entry_native(), 1);
3823 
3824  // Isolates: note we add an additional parameter here (isolate pointer).
3825  static const int kRegExpExecuteArguments = 9;
3826  __ EnterApiExitFrame(kRegExpExecuteArguments);
3827 
3828  // Argument 9: Pass current isolate address.
3829  __ mov(Operand(esp, 8 * kPointerSize),
3830  Immediate(ExternalReference::isolate_address()));
3831 
3832  // Argument 8: Indicate that this is a direct call from JavaScript.
3833  __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
3834 
3835  // Argument 7: Start (high end) of backtracking stack memory area.
3836  __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3837  __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3838  __ mov(Operand(esp, 6 * kPointerSize), esi);
3839 
3840  // Argument 6: Set the number of capture registers to zero to force global
3841  // regexps to behave as non-global. This does not affect non-global regexps.
3842  __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
3843 
3844  // Argument 5: static offsets vector buffer.
3845  __ mov(Operand(esp, 4 * kPointerSize),
3846  Immediate(ExternalReference::address_of_static_offsets_vector(
3847  masm->isolate())));
3848 
3849  // Argument 2: Previous index.
3850  __ mov(Operand(esp, 1 * kPointerSize), ebx);
3851 
3852  // Argument 1: Original subject string.
3853  // The original subject is in the previous stack frame. Therefore we have to
3854  // use ebp, which points exactly to one pointer size below the previous esp.
3855  // (Because creating a new stack frame pushes the previous ebp onto the stack
3856  // and thereby moves up esp by one kPointerSize.)
3857  __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
3858  __ mov(Operand(esp, 0 * kPointerSize), esi);
3859 
3860  // esi: original subject string
3861  // eax: underlying subject string
3862  // ebx: previous index
3863  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3864  // edx: code
3865  // Argument 4: End of string data
3866  // Argument 3: Start of string data
3867  // Prepare start and end index of the input.
3868  // Load the length from the original sliced string if that is the case.
3870  __ add(esi, edi); // Calculate input end wrt offset.
3871  __ SmiUntag(edi);
3872  __ add(ebx, edi); // Calculate input start wrt offset.
3873 
3874  // ebx: start index of the input string
3875  // esi: end index of the input string
3876  Label setup_two_byte, setup_rest;
3877  __ test(ecx, ecx);
3878  __ j(zero, &setup_two_byte, Label::kNear);
3879  __ SmiUntag(esi);
3881  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3883  __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3884  __ jmp(&setup_rest, Label::kNear);
3885 
3886  __ bind(&setup_two_byte);
3887  STATIC_ASSERT(kSmiTag == 0);
3888  STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
3890  __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3892  __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3893 
3894  __ bind(&setup_rest);
3895 
3896  // Locate the code entry and call it.
3897  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3898  __ call(edx);
3899 
3900  // Drop arguments and come back to JS mode.
3901  __ LeaveApiExitFrame();
3902 
3903  // Check the result.
3904  Label success;
3905  __ cmp(eax, 1);
3906  // We expect exactly one result since we force the called regexp to behave
3907  // as non-global.
3908  __ j(equal, &success);
3909  Label failure;
3911  __ j(equal, &failure);
3913  // If not exception it can only be retry. Handle that in the runtime system.
3914  __ j(not_equal, &runtime);
3915  // Result must now be exception. If there is no pending exception already a
3916  // stack overflow (on the backtrack stack) was detected in RegExp code but
3917  // haven't created the exception yet. Handle that in the runtime system.
3918  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3919  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
3920  masm->isolate());
3921  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
3922  __ mov(eax, Operand::StaticVariable(pending_exception));
3923  __ cmp(edx, eax);
3924  __ j(equal, &runtime);
3925  // For exception, throw the exception again.
3926 
3927  // Clear the pending exception variable.
3928  __ mov(Operand::StaticVariable(pending_exception), edx);
3929 
3930  // Special handling of termination exceptions which are uncatchable
3931  // by javascript code.
3932  __ cmp(eax, factory->termination_exception());
3933  Label throw_termination_exception;
3934  __ j(equal, &throw_termination_exception, Label::kNear);
3935 
3936  // Handle normal exception by following handler chain.
3937  __ Throw(eax);
3938 
3939  __ bind(&throw_termination_exception);
3940  __ ThrowUncatchable(eax);
3941 
3942  __ bind(&failure);
3943  // For failure to match, return null.
3944  __ mov(eax, factory->null_value());
3945  __ ret(4 * kPointerSize);
3946 
3947  // Load RegExp data.
3948  __ bind(&success);
3949  __ mov(eax, Operand(esp, kJSRegExpOffset));
3952  // Calculate number of capture registers (number_of_captures + 1) * 2.
3953  STATIC_ASSERT(kSmiTag == 0);
3955  __ add(edx, Immediate(2)); // edx was a smi.
3956 
3957  // edx: Number of capture registers
3958  // Load last_match_info which is still known to be a fast case JSArray.
3959  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3961 
3962  // ebx: last_match_info backing store (FixedArray)
3963  // edx: number of capture registers
3964  // Store the capture count.
3965  __ SmiTag(edx); // Number of capture registers to smi.
3967  __ SmiUntag(edx); // Number of capture registers back from smi.
3968  // Store last subject and last input.
3969  __ mov(eax, Operand(esp, kSubjectOffset));
3971  __ RecordWriteField(ebx,
3973  eax,
3974  edi,
3975  kDontSaveFPRegs);
3976  __ mov(eax, Operand(esp, kSubjectOffset));
3978  __ RecordWriteField(ebx,
3980  eax,
3981  edi,
3982  kDontSaveFPRegs);
3983 
3984  // Get the static offsets vector filled by the native regexp code.
3985  ExternalReference address_of_static_offsets_vector =
3986  ExternalReference::address_of_static_offsets_vector(masm->isolate());
3987  __ mov(ecx, Immediate(address_of_static_offsets_vector));
3988 
3989  // ebx: last_match_info backing store (FixedArray)
3990  // ecx: offsets vector
3991  // edx: number of capture registers
3992  Label next_capture, done;
3993  // Capture register counter starts from number of capture registers and
3994  // counts down until wraping after zero.
3995  __ bind(&next_capture);
3996  __ sub(edx, Immediate(1));
3997  __ j(negative, &done, Label::kNear);
3998  // Read the value from the static offsets vector buffer.
3999  __ mov(edi, Operand(ecx, edx, times_int_size, 0));
4000  __ SmiTag(edi);
4001  // Store the smi value in the last match info.
4002  __ mov(FieldOperand(ebx,
4003  edx,
4006  edi);
4007  __ jmp(&next_capture);
4008  __ bind(&done);
4009 
4010  // Return last match info.
4011  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4012  __ ret(4 * kPointerSize);
4013 
4014  // External string. Short external strings have already been ruled out.
4015  // eax: subject string (expected to be external)
4016  // ebx: scratch
4017  __ bind(&external_string);
4020  if (FLAG_debug_code) {
4021  // Assert that we do not have a cons or slice (indirect strings) here.
4022  // Sequential strings have already been ruled out.
4023  __ test_b(ebx, kIsIndirectStringMask);
4024  __ Assert(zero, "external string expected, but not found");
4025  }
4027  // Move the pointer so that offset-wise, it looks like a sequential string.
4031  __ test_b(ebx, kStringEncodingMask);
4032  __ j(not_zero, &seq_ascii_string);
4033  __ jmp(&seq_two_byte_string);
4034 
4035  // Do the runtime call to execute the regexp.
4036  __ bind(&runtime);
4037  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4038 #endif // V8_INTERPRETED_REGEXP
4039 }
4040 
4041 
4042 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4043  const int kMaxInlineLength = 100;
4044  Label slowcase;
4045  Label done;
4046  __ mov(ebx, Operand(esp, kPointerSize * 3));
4047  __ JumpIfNotSmi(ebx, &slowcase);
4048  __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
4049  __ j(above, &slowcase);
4050  // Smi-tagging is equivalent to multiplying by 2.
4051  STATIC_ASSERT(kSmiTag == 0);
4052  STATIC_ASSERT(kSmiTagSize == 1);
4053  // Allocate RegExpResult followed by FixedArray with size in ebx.
4054  // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4055  // Elements: [Map][Length][..elements..]
4056  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4058  ebx, // In: Number of elements (times 2, being a smi)
4059  eax, // Out: Start of allocation (tagged).
4060  ecx, // Out: End of allocation.
4061  edx, // Scratch register
4062  &slowcase,
4063  TAG_OBJECT);
4064  // eax: Start of allocated area, object-tagged.
4065 
4066  // Set JSArray map to global.regexp_result_map().
4067  // Set empty properties FixedArray.
4068  // Set elements to point to FixedArray allocated right after the JSArray.
4069  // Interleave operations for better latency.
4071  Factory* factory = masm->isolate()->factory();
4072  __ mov(ecx, Immediate(factory->empty_fixed_array()));
4073  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4079 
4080  // Set input, index and length fields from arguments.
4081  __ mov(ecx, Operand(esp, kPointerSize * 1));
4083  __ mov(ecx, Operand(esp, kPointerSize * 2));
4085  __ mov(ecx, Operand(esp, kPointerSize * 3));
4087 
4088  // Fill out the elements FixedArray.
4089  // eax: JSArray.
4090  // ebx: FixedArray.
4091  // ecx: Number of elements in array, as smi.
4092 
4093  // Set map.
4095  Immediate(factory->fixed_array_map()));
4096  // Set length.
4098  // Fill contents of fixed-array with the-hole.
4099  __ SmiUntag(ecx);
4100  __ mov(edx, Immediate(factory->the_hole_value()));
4102  // Fill fixed array elements with hole.
4103  // eax: JSArray.
4104  // ecx: Number of elements to fill.
4105  // ebx: Start of elements in FixedArray.
4106  // edx: the hole.
4107  Label loop;
4108  __ test(ecx, ecx);
4109  __ bind(&loop);
4110  __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
4111  __ sub(ecx, Immediate(1));
4112  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4113  __ jmp(&loop);
4114 
4115  __ bind(&done);
4116  __ ret(3 * kPointerSize);
4117 
4118  __ bind(&slowcase);
4119  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4120 }
4121 
4122 
4123 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
4124  Register object,
4125  Register result,
4126  Register scratch1,
4127  Register scratch2,
4128  bool object_is_smi,
4129  Label* not_found) {
4130  // Use of registers. Register result is used as a temporary.
4131  Register number_string_cache = result;
4132  Register mask = scratch1;
4133  Register scratch = scratch2;
4134 
4135  // Load the number string cache.
4136  ExternalReference roots_array_start =
4137  ExternalReference::roots_array_start(masm->isolate());
4138  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
4139  __ mov(number_string_cache,
4140  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
4141  // Make the hash mask from the length of the number string cache. It
4142  // contains two elements (number and string) for each cache entry.
4143  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
4144  __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
4145  __ sub(mask, Immediate(1)); // Make mask.
4146 
4147  // Calculate the entry in the number string cache. The hash value in the
4148  // number string cache for smis is just the smi value, and the hash for
4149  // doubles is the xor of the upper and lower words. See
4150  // Heap::GetNumberStringCache.
4151  Label smi_hash_calculated;
4152  Label load_result_from_cache;
4153  if (object_is_smi) {
4154  __ mov(scratch, object);
4155  __ SmiUntag(scratch);
4156  } else {
4157  Label not_smi;
4158  STATIC_ASSERT(kSmiTag == 0);
4159  __ JumpIfNotSmi(object, &not_smi, Label::kNear);
4160  __ mov(scratch, object);
4161  __ SmiUntag(scratch);
4162  __ jmp(&smi_hash_calculated, Label::kNear);
4163  __ bind(&not_smi);
4164  __ cmp(FieldOperand(object, HeapObject::kMapOffset),
4165  masm->isolate()->factory()->heap_number_map());
4166  __ j(not_equal, not_found);
4167  STATIC_ASSERT(8 == kDoubleSize);
4168  __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
4169  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
4170  // Object is heap number and hash is now in scratch. Calculate cache index.
4171  __ and_(scratch, mask);
4172  Register index = scratch;
4173  Register probe = mask;
4174  __ mov(probe,
4175  FieldOperand(number_string_cache,
4176  index,
4179  __ JumpIfSmi(probe, not_found);
4181  CpuFeatures::Scope fscope(SSE2);
4182  __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
4183  __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
4184  __ ucomisd(xmm0, xmm1);
4185  } else {
4186  __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
4187  __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
4188  __ FCmp();
4189  }
4190  __ j(parity_even, not_found); // Bail out if NaN is involved.
4191  __ j(not_equal, not_found); // The cache did not contain this value.
4192  __ jmp(&load_result_from_cache, Label::kNear);
4193  }
4194 
4195  __ bind(&smi_hash_calculated);
4196  // Object is smi and hash is now in scratch. Calculate cache index.
4197  __ and_(scratch, mask);
4198  Register index = scratch;
4199  // Check if the entry is the smi we are looking for.
4200  __ cmp(object,
4201  FieldOperand(number_string_cache,
4202  index,
4205  __ j(not_equal, not_found);
4206 
4207  // Get the result from the cache.
4208  __ bind(&load_result_from_cache);
4209  __ mov(result,
4210  FieldOperand(number_string_cache,
4211  index,
4213  FixedArray::kHeaderSize + kPointerSize));
4214  Counters* counters = masm->isolate()->counters();
4215  __ IncrementCounter(counters->number_to_string_native(), 1);
4216 }
4217 
4218 
4219 void NumberToStringStub::Generate(MacroAssembler* masm) {
4220  Label runtime;
4221 
4222  __ mov(ebx, Operand(esp, kPointerSize));
4223 
4224  // Generate code to lookup number in the number string cache.
4225  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
4226  __ ret(1 * kPointerSize);
4227 
4228  __ bind(&runtime);
4229  // Handle number to string in the runtime system if not found in the cache.
4230  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
4231 }
4232 
4233 
4234 static int NegativeComparisonResult(Condition cc) {
4235  ASSERT(cc != equal);
4236  ASSERT((cc == less) || (cc == less_equal)
4237  || (cc == greater) || (cc == greater_equal));
4238  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4239 }
4240 
4241 void CompareStub::Generate(MacroAssembler* masm) {
4242  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4243 
4244  Label check_unequal_objects;
4245 
4246  // Compare two smis if required.
4247  if (include_smi_compare_) {
4248  Label non_smi, smi_done;
4249  __ mov(ecx, edx);
4250  __ or_(ecx, eax);
4251  __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
4252  __ sub(edx, eax); // Return on the result of the subtraction.
4253  __ j(no_overflow, &smi_done, Label::kNear);
4254  __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
4255  __ bind(&smi_done);
4256  __ mov(eax, edx);
4257  __ ret(0);
4258  __ bind(&non_smi);
4259  } else if (FLAG_debug_code) {
4260  __ mov(ecx, edx);
4261  __ or_(ecx, eax);
4262  __ test(ecx, Immediate(kSmiTagMask));
4263  __ Assert(not_zero, "Unexpected smi operands.");
4264  }
4265 
4266  // NOTICE! This code is only reached after a smi-fast-case check, so
4267  // it is certain that at least one operand isn't a smi.
4268 
4269  // Identical objects can be compared fast, but there are some tricky cases
4270  // for NaN and undefined.
4271  {
4272  Label not_identical;
4273  __ cmp(eax, edx);
4274  __ j(not_equal, &not_identical);
4275 
4276  if (cc_ != equal) {
4277  // Check for undefined. undefined OP undefined is false even though
4278  // undefined == undefined.
4279  Label check_for_nan;
4280  __ cmp(edx, masm->isolate()->factory()->undefined_value());
4281  __ j(not_equal, &check_for_nan, Label::kNear);
4282  __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4283  __ ret(0);
4284  __ bind(&check_for_nan);
4285  }
4286 
4287  // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
4288  // so we do the second best thing - test it ourselves.
4289  // Note: if cc_ != equal, never_nan_nan_ is not used.
4290  if (never_nan_nan_ && (cc_ == equal)) {
4291  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4292  __ ret(0);
4293  } else {
4294  Label heap_number;
4296  Immediate(masm->isolate()->factory()->heap_number_map()));
4297  __ j(equal, &heap_number, Label::kNear);
4298  if (cc_ != equal) {
4299  // Call runtime on identical JSObjects. Otherwise return equal.
4300  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4301  __ j(above_equal, &not_identical);
4302  }
4303  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4304  __ ret(0);
4305 
4306  __ bind(&heap_number);
4307  // It is a heap number, so return non-equal if it's NaN and equal if
4308  // it's not NaN.
4309  // The representation of NaN values has all exponent bits (52..62) set,
4310  // and not all mantissa bits (0..51) clear.
4311  // We only accept QNaNs, which have bit 51 set.
4312  // Read top bits of double representation (second word of value).
4313 
4314  // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4315  // all bits in the mask are set. We only need to check the word
4316  // that contains the exponent and high bit of the mantissa.
4317  STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4319  __ Set(eax, Immediate(0));
4320  // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4321  // bits.
4322  __ add(edx, edx);
4323  __ cmp(edx, kQuietNaNHighBitsMask << 1);
4324  if (cc_ == equal) {
4325  STATIC_ASSERT(EQUAL != 1);
4326  __ setcc(above_equal, eax);
4327  __ ret(0);
4328  } else {
4329  Label nan;
4330  __ j(above_equal, &nan, Label::kNear);
4331  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4332  __ ret(0);
4333  __ bind(&nan);
4334  __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4335  __ ret(0);
4336  }
4337  }
4338 
4339  __ bind(&not_identical);
4340  }
4341 
4342  // Strict equality can quickly decide whether objects are equal.
4343  // Non-strict object equality is slower, so it is handled later in the stub.
4344  if (cc_ == equal && strict_) {
4345  Label slow; // Fallthrough label.
4346  Label not_smis;
4347  // If we're doing a strict equality comparison, we don't have to do
4348  // type conversion, so we generate code to do fast comparison for objects
4349  // and oddballs. Non-smi numbers and strings still go through the usual
4350  // slow-case code.
4351  // If either is a Smi (we know that not both are), then they can only
4352  // be equal if the other is a HeapNumber. If so, use the slow case.
4353  STATIC_ASSERT(kSmiTag == 0);
4354  ASSERT_EQ(0, Smi::FromInt(0));
4355  __ mov(ecx, Immediate(kSmiTagMask));
4356  __ and_(ecx, eax);
4357  __ test(ecx, edx);
4358  __ j(not_zero, &not_smis, Label::kNear);
4359  // One operand is a smi.
4360 
4361  // Check whether the non-smi is a heap number.
4362  STATIC_ASSERT(kSmiTagMask == 1);
4363  // ecx still holds eax & kSmiTag, which is either zero or one.
4364  __ sub(ecx, Immediate(0x01));
4365  __ mov(ebx, edx);
4366  __ xor_(ebx, eax);
4367  __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
4368  __ xor_(ebx, eax);
4369  // if eax was smi, ebx is now edx, else eax.
4370 
4371  // Check if the non-smi operand is a heap number.
4373  Immediate(masm->isolate()->factory()->heap_number_map()));
4374  // If heap number, handle it in the slow case.
4375  __ j(equal, &slow, Label::kNear);
4376  // Return non-equal (ebx is not zero)
4377  __ mov(eax, ebx);
4378  __ ret(0);
4379 
4380  __ bind(&not_smis);
4381  // If either operand is a JSObject or an oddball value, then they are not
4382  // equal since their pointers are different
4383  // There is no test for undetectability in strict equality.
4384 
4385  // Get the type of the first operand.
4386  // If the first object is a JS object, we have done pointer comparison.
4387  Label first_non_object;
4389  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4390  __ j(below, &first_non_object, Label::kNear);
4391 
4392  // Return non-zero (eax is not zero)
4393  Label return_not_equal;
4395  __ bind(&return_not_equal);
4396  __ ret(0);
4397 
4398  __ bind(&first_non_object);
4399  // Check for oddballs: true, false, null, undefined.
4400  __ CmpInstanceType(ecx, ODDBALL_TYPE);
4401  __ j(equal, &return_not_equal);
4402 
4403  __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
4404  __ j(above_equal, &return_not_equal);
4405 
4406  // Check for oddballs: true, false, null, undefined.
4407  __ CmpInstanceType(ecx, ODDBALL_TYPE);
4408  __ j(equal, &return_not_equal);
4409 
4410  // Fall through to the general case.
4411  __ bind(&slow);
4412  }
4413 
4414  // Generate the number comparison code.
4415  if (include_number_compare_) {
4416  Label non_number_comparison;
4417  Label unordered;
4419  CpuFeatures::Scope use_sse2(SSE2);
4420  CpuFeatures::Scope use_cmov(CMOV);
4421 
4422  FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4423  __ ucomisd(xmm0, xmm1);
4424 
4425  // Don't base result on EFLAGS when a NaN is involved.
4426  __ j(parity_even, &unordered, Label::kNear);
4427  // Return a result of -1, 0, or 1, based on EFLAGS.
4428  __ mov(eax, 0); // equal
4429  __ mov(ecx, Immediate(Smi::FromInt(1)));
4430  __ cmov(above, eax, ecx);
4431  __ mov(ecx, Immediate(Smi::FromInt(-1)));
4432  __ cmov(below, eax, ecx);
4433  __ ret(0);
4434  } else {
4435  FloatingPointHelper::CheckFloatOperands(
4436  masm, &non_number_comparison, ebx);
4437  FloatingPointHelper::LoadFloatOperand(masm, eax);
4438  FloatingPointHelper::LoadFloatOperand(masm, edx);
4439  __ FCmp();
4440 
4441  // Don't base result on EFLAGS when a NaN is involved.
4442  __ j(parity_even, &unordered, Label::kNear);
4443 
4444  Label below_label, above_label;
4445  // Return a result of -1, 0, or 1, based on EFLAGS.
4446  __ j(below, &below_label, Label::kNear);
4447  __ j(above, &above_label, Label::kNear);
4448 
4449  __ Set(eax, Immediate(0));
4450  __ ret(0);
4451 
4452  __ bind(&below_label);
4453  __ mov(eax, Immediate(Smi::FromInt(-1)));
4454  __ ret(0);
4455 
4456  __ bind(&above_label);
4457  __ mov(eax, Immediate(Smi::FromInt(1)));
4458  __ ret(0);
4459  }
4460 
4461  // If one of the numbers was NaN, then the result is always false.
4462  // The cc is never not-equal.
4463  __ bind(&unordered);
4464  ASSERT(cc_ != not_equal);
4465  if (cc_ == less || cc_ == less_equal) {
4466  __ mov(eax, Immediate(Smi::FromInt(1)));
4467  } else {
4468  __ mov(eax, Immediate(Smi::FromInt(-1)));
4469  }
4470  __ ret(0);
4471 
4472  // The number comparison code did not provide a valid result.
4473  __ bind(&non_number_comparison);
4474  }
4475 
4476  // Fast negative check for symbol-to-symbol equality.
4477  Label check_for_strings;
4478  if (cc_ == equal) {
4479  BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4480  BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4481 
4482  // We've already checked for object identity, so if both operands
4483  // are symbols they aren't equal. Register eax already holds a
4484  // non-zero value, which indicates not equal, so just return.
4485  __ ret(0);
4486  }
4487 
4488  __ bind(&check_for_strings);
4489 
4490  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4491  &check_unequal_objects);
4492 
4493  // Inline comparison of ASCII strings.
4494  if (cc_ == equal) {
4496  edx,
4497  eax,
4498  ecx,
4499  ebx);
4500  } else {
4502  edx,
4503  eax,
4504  ecx,
4505  ebx,
4506  edi);
4507  }
4508 #ifdef DEBUG
4509  __ Abort("Unexpected fall-through from string comparison");
4510 #endif
4511 
4512  __ bind(&check_unequal_objects);
4513  if (cc_ == equal && !strict_) {
4514  // Non-strict equality. Objects are unequal if
4515  // they are both JSObjects and not undetectable,
4516  // and their pointers are different.
4517  Label not_both_objects;
4518  Label return_unequal;
4519  // At most one is a smi, so we can test for smi by adding the two.
4520  // A smi plus a heap object has the low bit set, a heap object plus
4521  // a heap object has the low bit clear.
4522  STATIC_ASSERT(kSmiTag == 0);
4523  STATIC_ASSERT(kSmiTagMask == 1);
4524  __ lea(ecx, Operand(eax, edx, times_1, 0));
4525  __ test(ecx, Immediate(kSmiTagMask));
4526  __ j(not_zero, &not_both_objects, Label::kNear);
4527  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4528  __ j(below, &not_both_objects, Label::kNear);
4529  __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
4530  __ j(below, &not_both_objects, Label::kNear);
4531  // We do not bail out after this point. Both are JSObjects, and
4532  // they are equal if and only if both are undetectable.
4533  // The and of the undetectable flags is 1 if and only if they are equal.
4535  1 << Map::kIsUndetectable);
4536  __ j(zero, &return_unequal, Label::kNear);
4538  1 << Map::kIsUndetectable);
4539  __ j(zero, &return_unequal, Label::kNear);
4540  // The objects are both undetectable, so they both compare as the value
4541  // undefined, and are equal.
4542  __ Set(eax, Immediate(EQUAL));
4543  __ bind(&return_unequal);
4544  // Return non-equal by returning the non-zero object pointer in eax,
4545  // or return equal if we fell through to here.
4546  __ ret(0); // rax, rdx were pushed
4547  __ bind(&not_both_objects);
4548  }
4549 
4550  // Push arguments below the return address.
4551  __ pop(ecx);
4552  __ push(edx);
4553  __ push(eax);
4554 
4555  // Figure out which native to call and setup the arguments.
4556  Builtins::JavaScript builtin;
4557  if (cc_ == equal) {
4558  builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4559  } else {
4560  builtin = Builtins::COMPARE;
4561  __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4562  }
4563 
4564  // Restore return address on the stack.
4565  __ push(ecx);
4566 
4567  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4568  // tagged as a small integer.
4569  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4570 }
4571 
4572 
4573 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4574  Label* label,
4575  Register object,
4576  Register scratch) {
4577  __ JumpIfSmi(object, label);
4578  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4579  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4580  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4581  __ cmp(scratch, kSymbolTag | kStringTag);
4582  __ j(not_equal, label);
4583 }
4584 
4585 
4586 void StackCheckStub::Generate(MacroAssembler* masm) {
4587  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4588 }
4589 
4590 
4591 void InterruptStub::Generate(MacroAssembler* masm) {
4592  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
4593 }
4594 
4595 
4596 static void GenerateRecordCallTarget(MacroAssembler* masm) {
4597  // Cache the called function in a global property cell. Cache states
4598  // are uninitialized, monomorphic (indicated by a JSFunction), and
4599  // megamorphic.
4600  // ebx : cache cell for call target
4601  // edi : the function to call
4602  Isolate* isolate = masm->isolate();
4603  Label initialize, done;
4604 
4605  // Load the cache state into ecx.
4607 
4608  // A monomorphic cache hit or an already megamorphic state: invoke the
4609  // function without changing the state.
4610  __ cmp(ecx, edi);
4611  __ j(equal, &done, Label::kNear);
4612  __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4613  __ j(equal, &done, Label::kNear);
4614 
4615  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
4616  // megamorphic.
4617  __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
4618  __ j(equal, &initialize, Label::kNear);
4619  // MegamorphicSentinel is an immortal immovable object (undefined) so no
4620  // write-barrier is needed.
4622  Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4623  __ jmp(&done, Label::kNear);
4624 
4625  // An uninitialized cache is patched with the function.
4626  __ bind(&initialize);
4628  // No need for a write barrier here - cells are rescanned.
4629 
4630  __ bind(&done);
4631 }
4632 
4633 
4634 void CallFunctionStub::Generate(MacroAssembler* masm) {
4635  // ebx : cache cell for call target
4636  // edi : the function to call
4637  Isolate* isolate = masm->isolate();
4638  Label slow, non_function;
4639 
4640  // The receiver might implicitly be the global object. This is
4641  // indicated by passing the hole as the receiver to the call
4642  // function stub.
4643  if (ReceiverMightBeImplicit()) {
4644  Label receiver_ok;
4645  // Get the receiver from the stack.
4646  // +1 ~ return address
4647  __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4648  // Call as function is indicated with the hole.
4649  __ cmp(eax, isolate->factory()->the_hole_value());
4650  __ j(not_equal, &receiver_ok, Label::kNear);
4651  // Patch the receiver on the stack with the global receiver object.
4652  __ mov(ecx, GlobalObjectOperand());
4654  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
4655  __ bind(&receiver_ok);
4656  }
4657 
4658  // Check that the function really is a JavaScript function.
4659  __ JumpIfSmi(edi, &non_function);
4660  // Goto slow case if we do not have a function.
4661  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4662  __ j(not_equal, &slow);
4663 
4664  if (RecordCallTarget()) {
4665  GenerateRecordCallTarget(masm);
4666  }
4667 
4668  // Fast-case: Just invoke the function.
4669  ParameterCount actual(argc_);
4670 
4671  if (ReceiverMightBeImplicit()) {
4672  Label call_as_function;
4673  __ cmp(eax, isolate->factory()->the_hole_value());
4674  __ j(equal, &call_as_function);
4675  __ InvokeFunction(edi,
4676  actual,
4677  JUMP_FUNCTION,
4678  NullCallWrapper(),
4679  CALL_AS_METHOD);
4680  __ bind(&call_as_function);
4681  }
4682  __ InvokeFunction(edi,
4683  actual,
4684  JUMP_FUNCTION,
4685  NullCallWrapper(),
4687 
4688  // Slow-case: Non-function called.
4689  __ bind(&slow);
4690  if (RecordCallTarget()) {
4691  // If there is a call target cache, mark it megamorphic in the
4692  // non-function case. MegamorphicSentinel is an immortal immovable
4693  // object (undefined) so no write barrier is needed.
4695  Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4696  }
4697  // Check for function proxy.
4698  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4699  __ j(not_equal, &non_function);
4700  __ pop(ecx);
4701  __ push(edi); // put proxy as additional argument under return address
4702  __ push(ecx);
4703  __ Set(eax, Immediate(argc_ + 1));
4704  __ Set(ebx, Immediate(0));
4705  __ SetCallKind(ecx, CALL_AS_FUNCTION);
4706  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
4707  {
4708  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4709  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4710  }
4711 
4712  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4713  // of the original receiver from the call site).
4714  __ bind(&non_function);
4715  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4716  __ Set(eax, Immediate(argc_));
4717  __ Set(ebx, Immediate(0));
4718  __ SetCallKind(ecx, CALL_AS_METHOD);
4719  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4720  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4721  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4722 }
4723 
4724 
4725 void CallConstructStub::Generate(MacroAssembler* masm) {
4726  // eax : number of arguments
4727  // ebx : cache cell for call target
4728  // edi : constructor function
4729  Label slow, non_function_call;
4730 
4731  // Check that function is not a smi.
4732  __ JumpIfSmi(edi, &non_function_call);
4733  // Check that function is a JSFunction.
4734  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4735  __ j(not_equal, &slow);
4736 
4737  if (RecordCallTarget()) {
4738  GenerateRecordCallTarget(masm);
4739  }
4740 
4741  // Jump to the function-specific construct stub.
4745  __ jmp(ebx);
4746 
4747  // edi: called object
4748  // eax: number of arguments
4749  // ecx: object map
4750  Label do_call;
4751  __ bind(&slow);
4752  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4753  __ j(not_equal, &non_function_call);
4754  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
4755  __ jmp(&do_call);
4756 
4757  __ bind(&non_function_call);
4758  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
4759  __ bind(&do_call);
4760  // Set expected number of arguments to zero (not changing eax).
4761  __ Set(ebx, Immediate(0));
4762  Handle<Code> arguments_adaptor =
4763  masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4764  __ SetCallKind(ecx, CALL_AS_METHOD);
4765  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
4766 }
4767 
4768 
4769 bool CEntryStub::NeedsImmovableCode() {
4770  return false;
4771 }
4772 
4773 
4775  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
4776  result_size_ == 1;
4777 }
4778 
4779 
4780 void CodeStub::GenerateStubsAheadOfTime() {
4783  // It is important that the store buffer overflow stubs are generated first.
4785 }
4786 
4787 
4788 void CodeStub::GenerateFPStubs() {
4789  CEntryStub save_doubles(1, kSaveFPRegs);
4790  Handle<Code> code = save_doubles.GetCode();
4791  code->set_is_pregenerated(true);
4792  code->GetIsolate()->set_fp_stubs_generated(true);
4793 }
4794 
4795 
4797  CEntryStub stub(1, kDontSaveFPRegs);
4798  Handle<Code> code = stub.GetCode();
4799  code->set_is_pregenerated(true);
4800 }
4801 
4802 
4803 void CEntryStub::GenerateCore(MacroAssembler* masm,
4804  Label* throw_normal_exception,
4805  Label* throw_termination_exception,
4806  Label* throw_out_of_memory_exception,
4807  bool do_gc,
4808  bool always_allocate_scope) {
4809  // eax: result parameter for PerformGC, if any
4810  // ebx: pointer to C function (C callee-saved)
4811  // ebp: frame pointer (restored after C call)
4812  // esp: stack pointer (restored after C call)
4813  // edi: number of arguments including receiver (C callee-saved)
4814  // esi: pointer to the first argument (C callee-saved)
4815 
4816  // Result returned in eax, or eax+edx if result_size_ is 2.
4817 
4818  // Check stack alignment.
4819  if (FLAG_debug_code) {
4820  __ CheckStackAlignment();
4821  }
4822 
4823  if (do_gc) {
4824  // Pass failure code returned from last attempt as first argument to
4825  // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4826  // stack alignment is known to be correct. This function takes one argument
4827  // which is passed on the stack, and we know that the stack has been
4828  // prepared to pass at least one argument.
4829  __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
4831  }
4832 
4833  ExternalReference scope_depth =
4834  ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
4835  if (always_allocate_scope) {
4836  __ inc(Operand::StaticVariable(scope_depth));
4837  }
4838 
4839  // Call C function.
4840  __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
4841  __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
4842  __ mov(Operand(esp, 2 * kPointerSize),
4843  Immediate(ExternalReference::isolate_address()));
4844  __ call(ebx);
4845  // Result is in eax or edx:eax - do not destroy these registers!
4846 
4847  if (always_allocate_scope) {
4848  __ dec(Operand::StaticVariable(scope_depth));
4849  }
4850 
4851  // Make sure we're not trying to return 'the hole' from the runtime
4852  // call as this may lead to crashes in the IC code later.
4853  if (FLAG_debug_code) {
4854  Label okay;
4855  __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4856  __ j(not_equal, &okay, Label::kNear);
4857  __ int3();
4858  __ bind(&okay);
4859  }
4860 
4861  // Check for failure result.
4862  Label failure_returned;
4863  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4864  __ lea(ecx, Operand(eax, 1));
4865  // Lower 2 bits of ecx are 0 iff eax has failure tag.
4866  __ test(ecx, Immediate(kFailureTagMask));
4867  __ j(zero, &failure_returned);
4868 
4869  ExternalReference pending_exception_address(
4870  Isolate::kPendingExceptionAddress, masm->isolate());
4871 
4872  // Check that there is no pending exception, otherwise we
4873  // should have returned some failure value.
4874  if (FLAG_debug_code) {
4875  __ push(edx);
4876  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4877  Label okay;
4878  __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4879  // Cannot use check here as it attempts to generate call into runtime.
4880  __ j(equal, &okay, Label::kNear);
4881  __ int3();
4882  __ bind(&okay);
4883  __ pop(edx);
4884  }
4885 
4886  // Exit the JavaScript to C++ exit frame.
4887  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
4888  __ ret(0);
4889 
4890  // Handling of failure.
4891  __ bind(&failure_returned);
4892 
4893  Label retry;
4894  // If the returned exception is RETRY_AFTER_GC continue at retry label
4896  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4897  __ j(zero, &retry, Label::kNear);
4898 
4899  // Special handling of out of memory exceptions.
4900  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4901  __ j(equal, throw_out_of_memory_exception);
4902 
4903  // Retrieve the pending exception and clear the variable.
4904  __ mov(eax, Operand::StaticVariable(pending_exception_address));
4905  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4906  __ mov(Operand::StaticVariable(pending_exception_address), edx);
4907 
4908  // Special handling of termination exceptions which are uncatchable
4909  // by javascript code.
4910  __ cmp(eax, masm->isolate()->factory()->termination_exception());
4911  __ j(equal, throw_termination_exception);
4912 
4913  // Handle normal exception.
4914  __ jmp(throw_normal_exception);
4915 
4916  // Retry.
4917  __ bind(&retry);
4918 }
4919 
4920 
4921 void CEntryStub::Generate(MacroAssembler* masm) {
4922  // eax: number of arguments including receiver
4923  // ebx: pointer to C function (C callee-saved)
4924  // ebp: frame pointer (restored after C call)
4925  // esp: stack pointer (restored after C call)
4926  // esi: current context (C callee-saved)
4927  // edi: JS function of the caller (C callee-saved)
4928 
4929  // NOTE: Invocations of builtins may return failure objects instead
4930  // of a proper result. The builtin entry handles this by performing
4931  // a garbage collection and retrying the builtin (twice).
4932 
4933  // Enter the exit frame that transitions from JavaScript to C++.
4934  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
4935 
4936  // eax: result parameter for PerformGC, if any (setup below)
4937  // ebx: pointer to builtin function (C callee-saved)
4938  // ebp: frame pointer (restored after C call)
4939  // esp: stack pointer (restored after C call)
4940  // edi: number of arguments including receiver (C callee-saved)
4941  // esi: argv pointer (C callee-saved)
4942 
4943  Label throw_normal_exception;
4944  Label throw_termination_exception;
4945  Label throw_out_of_memory_exception;
4946 
4947  // Call into the runtime system.
4948  GenerateCore(masm,
4949  &throw_normal_exception,
4950  &throw_termination_exception,
4951  &throw_out_of_memory_exception,
4952  false,
4953  false);
4954 
4955  // Do space-specific GC and retry runtime call.
4956  GenerateCore(masm,
4957  &throw_normal_exception,
4958  &throw_termination_exception,
4959  &throw_out_of_memory_exception,
4960  true,
4961  false);
4962 
4963  // Do full GC and retry runtime call one final time.
4964  Failure* failure = Failure::InternalError();
4965  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4966  GenerateCore(masm,
4967  &throw_normal_exception,
4968  &throw_termination_exception,
4969  &throw_out_of_memory_exception,
4970  true,
4971  true);
4972 
4973  __ bind(&throw_out_of_memory_exception);
4974  // Set external caught exception to false.
4975  Isolate* isolate = masm->isolate();
4976  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4977  isolate);
4978  __ mov(Operand::StaticVariable(external_caught), Immediate(false));
4979 
4980  // Set pending exception and eax to out of memory exception.
4981  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4982  isolate);
4983  __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4984  __ mov(Operand::StaticVariable(pending_exception), eax);
4985  // Fall through to the next label.
4986 
4987  __ bind(&throw_termination_exception);
4988  __ ThrowUncatchable(eax);
4989 
4990  __ bind(&throw_normal_exception);
4991  __ Throw(eax);
4992 }
4993 
4994 
4995 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4996  Label invoke, handler_entry, exit;
4997  Label not_outermost_js, not_outermost_js_2;
4998 
4999  // Set up frame.
5000  __ push(ebp);
5001  __ mov(ebp, esp);
5002 
5003  // Push marker in two places.
5004  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
5005  __ push(Immediate(Smi::FromInt(marker))); // context slot
5006  __ push(Immediate(Smi::FromInt(marker))); // function slot
5007  // Save callee-saved registers (C calling conventions).
5008  __ push(edi);
5009  __ push(esi);
5010  __ push(ebx);
5011 
5012  // Save copies of the top frame descriptor on the stack.
5013  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
5014  __ push(Operand::StaticVariable(c_entry_fp));
5015 
5016  // If this is the outermost JS call, set js_entry_sp value.
5017  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
5018  masm->isolate());
5019  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
5020  __ j(not_equal, &not_outermost_js, Label::kNear);
5021  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
5022  __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5023  __ jmp(&invoke, Label::kNear);
5024  __ bind(&not_outermost_js);
5025  __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
5026 
5027  // Jump to a faked try block that does the invoke, with a faked catch
5028  // block that sets the pending exception.
5029  __ jmp(&invoke);
5030  __ bind(&handler_entry);
5031  handler_offset_ = handler_entry.pos();
5032  // Caught exception: Store result (exception) in the pending exception
5033  // field in the JSEnv and return a failure sentinel.
5034  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
5035  masm->isolate());
5036  __ mov(Operand::StaticVariable(pending_exception), eax);
5037  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
5038  __ jmp(&exit);
5039 
5040  // Invoke: Link this frame into the handler chain. There's only one
5041  // handler block in this code object, so its index is 0.
5042  __ bind(&invoke);
5043  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
5044 
5045  // Clear any pending exceptions.
5046  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
5047  __ mov(Operand::StaticVariable(pending_exception), edx);
5048 
5049  // Fake a receiver (NULL).
5050  __ push(Immediate(0)); // receiver
5051 
5052  // Invoke the function by calling through JS entry trampoline builtin and
5053  // pop the faked function when we return. Notice that we cannot store a
5054  // reference to the trampoline code directly in this stub, because the
5055  // builtin stubs may not have been generated yet.
5056  if (is_construct) {
5057  ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
5058  masm->isolate());
5059  __ mov(edx, Immediate(construct_entry));
5060  } else {
5061  ExternalReference entry(Builtins::kJSEntryTrampoline,
5062  masm->isolate());
5063  __ mov(edx, Immediate(entry));
5064  }
5065  __ mov(edx, Operand(edx, 0)); // deref address
5067  __ call(edx);
5068 
5069  // Unlink this frame from the handler chain.
5070  __ PopTryHandler();
5071 
5072  __ bind(&exit);
5073  // Check if the current stack frame is marked as the outermost JS frame.
5074  __ pop(ebx);
5075  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5076  __ j(not_equal, &not_outermost_js_2);
5077  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
5078  __ bind(&not_outermost_js_2);
5079 
5080  // Restore the top frame descriptor from the stack.
5081  __ pop(Operand::StaticVariable(ExternalReference(
5082  Isolate::kCEntryFPAddress,
5083  masm->isolate())));
5084 
5085  // Restore callee-saved registers (C calling conventions).
5086  __ pop(ebx);
5087  __ pop(esi);
5088  __ pop(edi);
5089  __ add(esp, Immediate(2 * kPointerSize)); // remove markers
5090 
5091  // Restore frame pointer and return.
5092  __ pop(ebp);
5093  __ ret(0);
5094 }
5095 
5096 
5097 // Generate stub code for instanceof.
5098 // This code can patch a call site inlined cache of the instance of check,
5099 // which looks like this.
5100 //
5101 // 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
5102 // 75 0a jne <some near label>
5103 // b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
5104 //
5105 // If call site patching is requested the stack will have the delta from the
5106 // return address to the cmp instruction just below the return address. This
5107 // also means that call site patching can only take place with arguments in
5108 // registers. TOS looks like this when call site patching is requested
5109 //
5110 // esp[0] : return address
5111 // esp[4] : delta from return address to cmp instruction
5112 //
5113 void InstanceofStub::Generate(MacroAssembler* masm) {
5114  // Call site inlining and patching implies arguments in registers.
5115  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
5116 
5117  // Fixed register usage throughout the stub.
5118  Register object = eax; // Object (lhs).
5119  Register map = ebx; // Map of the object.
5120  Register function = edx; // Function (rhs).
5121  Register prototype = edi; // Prototype of the function.
5122  Register scratch = ecx;
5123 
5124  // Constants describing the call site code to patch.
5125  static const int kDeltaToCmpImmediate = 2;
5126  static const int kDeltaToMov = 8;
5127  static const int kDeltaToMovImmediate = 9;
5128  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
5129  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
5130  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
5131 
5132  ExternalReference roots_array_start =
5133  ExternalReference::roots_array_start(masm->isolate());
5134 
5135  ASSERT_EQ(object.code(), InstanceofStub::left().code());
5136  ASSERT_EQ(function.code(), InstanceofStub::right().code());
5137 
5138  // Get the object and function - they are always both needed.
5139  Label slow, not_js_object;
5140  if (!HasArgsInRegisters()) {
5141  __ mov(object, Operand(esp, 2 * kPointerSize));
5142  __ mov(function, Operand(esp, 1 * kPointerSize));
5143  }
5144 
5145  // Check that the left hand is a JS object.
5146  __ JumpIfSmi(object, &not_js_object);
5147  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
5148 
5149  // If there is a call site cache don't look in the global cache, but do the
5150  // real lookup and update the call site cache.
5151  if (!HasCallSiteInlineCheck()) {
5152  // Look up the function and the map in the instanceof cache.
5153  Label miss;
5154  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5155  __ cmp(function, Operand::StaticArray(scratch,
5157  roots_array_start));
5158  __ j(not_equal, &miss, Label::kNear);
5159  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5160  __ cmp(map, Operand::StaticArray(
5161  scratch, times_pointer_size, roots_array_start));
5162  __ j(not_equal, &miss, Label::kNear);
5163  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5164  __ mov(eax, Operand::StaticArray(
5165  scratch, times_pointer_size, roots_array_start));
5166  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5167  __ bind(&miss);
5168  }
5169 
5170  // Get the prototype of the function.
5171  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
5172 
5173  // Check that the function prototype is a JS object.
5174  __ JumpIfSmi(prototype, &slow);
5175  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
5176 
5177  // Update the global instanceof or call site inlined cache with the current
5178  // map and function. The cached answer will be set when it is known below.
5179  if (!HasCallSiteInlineCheck()) {
5180  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5181  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5182  map);
5183  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5184  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5185  function);
5186  } else {
5187  // The constants for the code patching are based on no push instructions
5188  // at the call site.
5189  ASSERT(HasArgsInRegisters());
5190  // Get return address and delta to inlined map check.
5191  __ mov(scratch, Operand(esp, 0 * kPointerSize));
5192  __ sub(scratch, Operand(esp, 1 * kPointerSize));
5193  if (FLAG_debug_code) {
5194  __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
5195  __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
5196  __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
5197  __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
5198  }
5199  __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
5200  __ mov(Operand(scratch, 0), map);
5201  }
5202 
5203  // Loop through the prototype chain of the object looking for the function
5204  // prototype.
5205  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
5206  Label loop, is_instance, is_not_instance;
5207  __ bind(&loop);
5208  __ cmp(scratch, prototype);
5209  __ j(equal, &is_instance, Label::kNear);
5210  Factory* factory = masm->isolate()->factory();
5211  __ cmp(scratch, Immediate(factory->null_value()));
5212  __ j(equal, &is_not_instance, Label::kNear);
5213  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5214  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
5215  __ jmp(&loop);
5216 
5217  __ bind(&is_instance);
5218  if (!HasCallSiteInlineCheck()) {
5219  __ Set(eax, Immediate(0));
5220  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5221  __ mov(Operand::StaticArray(scratch,
5222  times_pointer_size, roots_array_start), eax);
5223  } else {
5224  // Get return address and delta to inlined map check.
5225  __ mov(eax, factory->true_value());
5226  __ mov(scratch, Operand(esp, 0 * kPointerSize));
5227  __ sub(scratch, Operand(esp, 1 * kPointerSize));
5228  if (FLAG_debug_code) {
5229  __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5230  __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5231  }
5232  __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5233  if (!ReturnTrueFalseObject()) {
5234  __ Set(eax, Immediate(0));
5235  }
5236  }
5237  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5238 
5239  __ bind(&is_not_instance);
5240  if (!HasCallSiteInlineCheck()) {
5241  __ Set(eax, Immediate(Smi::FromInt(1)));
5242  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5243  __ mov(Operand::StaticArray(
5244  scratch, times_pointer_size, roots_array_start), eax);
5245  } else {
5246  // Get return address and delta to inlined map check.
5247  __ mov(eax, factory->false_value());
5248  __ mov(scratch, Operand(esp, 0 * kPointerSize));
5249  __ sub(scratch, Operand(esp, 1 * kPointerSize));
5250  if (FLAG_debug_code) {
5251  __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5252  __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5253  }
5254  __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5255  if (!ReturnTrueFalseObject()) {
5256  __ Set(eax, Immediate(Smi::FromInt(1)));
5257  }
5258  }
5259  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5260 
5261  Label object_not_null, object_not_null_or_smi;
5262  __ bind(&not_js_object);
5263  // Before null, smi and string value checks, check that the rhs is a function
5264  // as for a non-function rhs an exception needs to be thrown.
5265  __ JumpIfSmi(function, &slow, Label::kNear);
5266  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5267  __ j(not_equal, &slow, Label::kNear);
5268 
5269  // Null is not instance of anything.
5270  __ cmp(object, factory->null_value());
5271  __ j(not_equal, &object_not_null, Label::kNear);
5272  __ Set(eax, Immediate(Smi::FromInt(1)));
5273  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5274 
5275  __ bind(&object_not_null);
5276  // Smi values is not instance of anything.
5277  __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
5278  __ Set(eax, Immediate(Smi::FromInt(1)));
5279  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5280 
5281  __ bind(&object_not_null_or_smi);
5282  // String values is not instance of anything.
5283  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5284  __ j(NegateCondition(is_string), &slow, Label::kNear);
5285  __ Set(eax, Immediate(Smi::FromInt(1)));
5286  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5287 
5288  // Slow-case: Go through the JavaScript implementation.
5289  __ bind(&slow);
5290  if (!ReturnTrueFalseObject()) {
5291  // Tail call the builtin which returns 0 or 1.
5292  if (HasArgsInRegisters()) {
5293  // Push arguments below return address.
5294  __ pop(scratch);
5295  __ push(object);
5296  __ push(function);
5297  __ push(scratch);
5298  }
5299  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5300  } else {
5301  // Call the builtin and convert 0/1 to true/false.
5302  {
5303  FrameScope scope(masm, StackFrame::INTERNAL);
5304  __ push(object);
5305  __ push(function);
5306  __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
5307  }
5308  Label true_value, done;
5309  __ test(eax, eax);
5310  __ j(zero, &true_value, Label::kNear);
5311  __ mov(eax, factory->false_value());
5312  __ jmp(&done, Label::kNear);
5313  __ bind(&true_value);
5314  __ mov(eax, factory->true_value());
5315  __ bind(&done);
5316  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5317  }
5318 }
5319 
5320 
5321 Register InstanceofStub::left() { return eax; }
5322 
5323 
5324 Register InstanceofStub::right() { return edx; }
5325 
5326 
5327 int CompareStub::MinorKey() {
5328  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5329  // stubs the never NaN NaN condition is only taken into account if the
5330  // condition is equals.
5331  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5332  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5333  return ConditionField::encode(static_cast<unsigned>(cc_))
5334  | RegisterField::encode(false) // lhs_ and rhs_ are not used
5335  | StrictField::encode(strict_)
5336  | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
5337  | IncludeNumberCompareField::encode(include_number_compare_)
5338  | IncludeSmiCompareField::encode(include_smi_compare_);
5339 }
5340 
5341 
5342 // Unfortunately you have to run without snapshots to see most of these
5343 // names in the profile since most compare stubs end up in the snapshot.
5344 void CompareStub::PrintName(StringStream* stream) {
5345  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5346  const char* cc_name;
5347  switch (cc_) {
5348  case less: cc_name = "LT"; break;
5349  case greater: cc_name = "GT"; break;
5350  case less_equal: cc_name = "LE"; break;
5351  case greater_equal: cc_name = "GE"; break;
5352  case equal: cc_name = "EQ"; break;
5353  case not_equal: cc_name = "NE"; break;
5354  default: cc_name = "UnknownCondition"; break;
5355  }
5356  bool is_equality = cc_ == equal || cc_ == not_equal;
5357  stream->Add("CompareStub_%s", cc_name);
5358  if (strict_ && is_equality) stream->Add("_STRICT");
5359  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5360  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5361  if (!include_smi_compare_) stream->Add("_NO_SMI");
5362 }
5363 
5364 
5365 // -------------------------------------------------------------------------
5366 // StringCharCodeAtGenerator
5367 
5368 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5369  // If the receiver is a smi trigger the non-string case.
5370  STATIC_ASSERT(kSmiTag == 0);
5371  __ JumpIfSmi(object_, receiver_not_string_);
5372 
5373  // Fetch the instance type of the receiver into result register.
5374  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5375  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5376  // If the receiver is not a string trigger the non-string case.
5377  __ test(result_, Immediate(kIsNotStringMask));
5378  __ j(not_zero, receiver_not_string_);
5379 
5380  // If the index is non-smi trigger the non-smi case.
5381  STATIC_ASSERT(kSmiTag == 0);
5382  __ JumpIfNotSmi(index_, &index_not_smi_);
5383  __ bind(&got_smi_index_);
5384 
5385  // Check for index out of range.
5386  __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
5387  __ j(above_equal, index_out_of_range_);
5388 
5389  __ SmiUntag(index_);
5390 
5391  Factory* factory = masm->isolate()->factory();
5393  masm, factory, object_, index_, result_, &call_runtime_);
5394 
5395  __ SmiTag(result_);
5396  __ bind(&exit_);
5397 }
5398 
5399 
5401  MacroAssembler* masm,
5402  const RuntimeCallHelper& call_helper) {
5403  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5404 
5405  // Index is not a smi.
5406  __ bind(&index_not_smi_);
5407  // If index is a heap number, try converting it to an integer.
5408  __ CheckMap(index_,
5409  masm->isolate()->factory()->heap_number_map(),
5410  index_not_number_,
5412  call_helper.BeforeCall(masm);
5413  __ push(object_);
5414  __ push(index_); // Consumed by runtime conversion function.
5415  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5416  __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5417  } else {
5418  ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5419  // NumberToSmi discards numbers that are not exact integers.
5420  __ CallRuntime(Runtime::kNumberToSmi, 1);
5421  }
5422  if (!index_.is(eax)) {
5423  // Save the conversion result before the pop instructions below
5424  // have a chance to overwrite it.
5425  __ mov(index_, eax);
5426  }
5427  __ pop(object_);
5428  // Reload the instance type.
5429  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5430  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5431  call_helper.AfterCall(masm);
5432  // If index is still not a smi, it must be out of range.
5433  STATIC_ASSERT(kSmiTag == 0);
5434  __ JumpIfNotSmi(index_, index_out_of_range_);
5435  // Otherwise, return to the fast path.
5436  __ jmp(&got_smi_index_);
5437 
5438  // Call runtime. We get here when the receiver is a string and the
5439  // index is a number, but the code of getting the actual character
5440  // is too complex (e.g., when the string needs to be flattened).
5441  __ bind(&call_runtime_);
5442  call_helper.BeforeCall(masm);
5443  __ push(object_);
5444  __ SmiTag(index_);
5445  __ push(index_);
5446  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5447  if (!result_.is(eax)) {
5448  __ mov(result_, eax);
5449  }
5450  call_helper.AfterCall(masm);
5451  __ jmp(&exit_);
5452 
5453  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5454 }
5455 
5456 
5457 // -------------------------------------------------------------------------
5458 // StringCharFromCodeGenerator
5459 
5460 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5461  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5462  STATIC_ASSERT(kSmiTag == 0);
5465  __ test(code_,
5466  Immediate(kSmiTagMask |
5468  __ j(not_zero, &slow_case_);
5469 
5470  Factory* factory = masm->isolate()->factory();
5471  __ Set(result_, Immediate(factory->single_character_string_cache()));
5472  STATIC_ASSERT(kSmiTag == 0);
5473  STATIC_ASSERT(kSmiTagSize == 1);
5475  // At this point code register contains smi tagged ASCII char code.
5476  __ mov(result_, FieldOperand(result_,
5477  code_, times_half_pointer_size,
5479  __ cmp(result_, factory->undefined_value());
5480  __ j(equal, &slow_case_);
5481  __ bind(&exit_);
5482 }
5483 
5484 
5486  MacroAssembler* masm,
5487  const RuntimeCallHelper& call_helper) {
5488  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5489 
5490  __ bind(&slow_case_);
5491  call_helper.BeforeCall(masm);
5492  __ push(code_);
5493  __ CallRuntime(Runtime::kCharFromCode, 1);
5494  if (!result_.is(eax)) {
5495  __ mov(result_, eax);
5496  }
5497  call_helper.AfterCall(masm);
5498  __ jmp(&exit_);
5499 
5500  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5501 }
5502 
5503 
5504 // -------------------------------------------------------------------------
5505 // StringCharAtGenerator
5506 
5507 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5508  char_code_at_generator_.GenerateFast(masm);
5509  char_from_code_generator_.GenerateFast(masm);
5510 }
5511 
5512 
5514  MacroAssembler* masm,
5515  const RuntimeCallHelper& call_helper) {
5516  char_code_at_generator_.GenerateSlow(masm, call_helper);
5517  char_from_code_generator_.GenerateSlow(masm, call_helper);
5518 }
5519 
5520 
5521 void StringAddStub::Generate(MacroAssembler* masm) {
5522  Label call_runtime, call_builtin;
5523  Builtins::JavaScript builtin_id = Builtins::ADD;
5524 
5525  // Load the two arguments.
5526  __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5527  __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5528 
5529  // Make sure that both arguments are strings if not known in advance.
5530  if (flags_ == NO_STRING_ADD_FLAGS) {
5531  __ JumpIfSmi(eax, &call_runtime);
5532  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5533  __ j(above_equal, &call_runtime);
5534 
5535  // First argument is a a string, test second.
5536  __ JumpIfSmi(edx, &call_runtime);
5537  __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5538  __ j(above_equal, &call_runtime);
5539  } else {
5540  // Here at least one of the arguments is definitely a string.
5541  // We convert the one that is not known to be a string.
5542  if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5543  ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5544  GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5545  &call_builtin);
5546  builtin_id = Builtins::STRING_ADD_RIGHT;
5547  } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5548  ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5549  GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5550  &call_builtin);
5551  builtin_id = Builtins::STRING_ADD_LEFT;
5552  }
5553  }
5554 
5555  // Both arguments are strings.
5556  // eax: first string
5557  // edx: second string
5558  // Check if either of the strings are empty. In that case return the other.
5559  Label second_not_zero_length, both_not_zero_length;
5561  STATIC_ASSERT(kSmiTag == 0);
5562  __ test(ecx, ecx);
5563  __ j(not_zero, &second_not_zero_length, Label::kNear);
5564  // Second string is empty, result is first string which is already in eax.
5565  Counters* counters = masm->isolate()->counters();
5566  __ IncrementCounter(counters->string_add_native(), 1);
5567  __ ret(2 * kPointerSize);
5568  __ bind(&second_not_zero_length);
5570  STATIC_ASSERT(kSmiTag == 0);
5571  __ test(ebx, ebx);
5572  __ j(not_zero, &both_not_zero_length, Label::kNear);
5573  // First string is empty, result is second string which is in edx.
5574  __ mov(eax, edx);
5575  __ IncrementCounter(counters->string_add_native(), 1);
5576  __ ret(2 * kPointerSize);
5577 
5578  // Both strings are non-empty.
5579  // eax: first string
5580  // ebx: length of first string as a smi
5581  // ecx: length of second string as a smi
5582  // edx: second string
5583  // Look at the length of the result of adding the two strings.
5584  Label string_add_flat_result, longer_than_two;
5585  __ bind(&both_not_zero_length);
5586  __ add(ebx, ecx);
5588  // Handle exceptionally long strings in the runtime system.
5589  __ j(overflow, &call_runtime);
5590  // Use the symbol table when adding two one character strings, as it
5591  // helps later optimizations to return a symbol here.
5592  __ cmp(ebx, Immediate(Smi::FromInt(2)));
5593  __ j(not_equal, &longer_than_two);
5594 
5595  // Check that both strings are non-external ASCII strings.
5596  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
5597 
5598  // Get the two characters forming the new string.
5601 
5602  // Try to lookup two character string in symbol table. If it is not found
5603  // just allocate a new one.
5604  Label make_two_character_string, make_two_character_string_no_reload;
5606  masm, ebx, ecx, eax, edx, edi,
5607  &make_two_character_string_no_reload, &make_two_character_string);
5608  __ IncrementCounter(counters->string_add_native(), 1);
5609  __ ret(2 * kPointerSize);
5610 
5611  // Allocate a two character string.
5612  __ bind(&make_two_character_string);
5613  // Reload the arguments.
5614  __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5615  __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5616  // Get the two characters forming the new string.
5619  __ bind(&make_two_character_string_no_reload);
5620  __ IncrementCounter(counters->string_add_make_two_char(), 1);
5621  __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
5622  // Pack both characters in ebx.
5623  __ shl(ecx, kBitsPerByte);
5624  __ or_(ebx, ecx);
5625  // Set the characters in the new string.
5627  __ IncrementCounter(counters->string_add_native(), 1);
5628  __ ret(2 * kPointerSize);
5629 
5630  __ bind(&longer_than_two);
5631  // Check if resulting string will be flat.
5632  __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
5633  __ j(below, &string_add_flat_result);
5634 
5635  // If result is not supposed to be flat allocate a cons string object. If both
5636  // strings are ASCII the result is an ASCII cons string.
5637  Label non_ascii, allocated, ascii_data;
5642  __ and_(ecx, edi);
5645  __ test(ecx, Immediate(kStringEncodingMask));
5646  __ j(zero, &non_ascii);
5647  __ bind(&ascii_data);
5648  // Allocate an ASCII cons string.
5649  __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
5650  __ bind(&allocated);
5651  // Fill the fields of the cons string.
5652  if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5655  Immediate(String::kEmptyHashField));
5658  __ mov(eax, ecx);
5659  __ IncrementCounter(counters->string_add_native(), 1);
5660  __ ret(2 * kPointerSize);
5661  __ bind(&non_ascii);
5662  // At least one of the strings is two-byte. Check whether it happens
5663  // to contain only ASCII characters.
5664  // ecx: first instance type AND second instance type.
5665  // edi: second instance type.
5666  __ test(ecx, Immediate(kAsciiDataHintMask));
5667  __ j(not_zero, &ascii_data);
5670  __ xor_(edi, ecx);
5674  __ j(equal, &ascii_data);
5675  // Allocate a two byte cons string.
5676  __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
5677  __ jmp(&allocated);
5678 
5679  // We cannot encounter sliced strings or cons strings here since:
5681  // Handle creating a flat result from either external or sequential strings.
5682  // Locate the first characters' locations.
5683  // eax: first string
5684  // ebx: length of resulting flat string as a smi
5685  // edx: second string
5686  Label first_prepared, second_prepared;
5687  Label first_is_sequential, second_is_sequential;
5688  __ bind(&string_add_flat_result);
5691  // ecx: instance type of first string
5693  __ test_b(ecx, kStringRepresentationMask);
5694  __ j(zero, &first_is_sequential, Label::kNear);
5695  // Rule out short external string and load string resource.
5697  __ test_b(ecx, kShortExternalStringMask);
5698  __ j(not_zero, &call_runtime);
5701  __ jmp(&first_prepared, Label::kNear);
5702  __ bind(&first_is_sequential);
5703  __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5704  __ bind(&first_prepared);
5705 
5708  // Check whether both strings have same encoding.
5709  // edi: instance type of second string
5710  __ xor_(ecx, edi);
5711  __ test_b(ecx, kStringEncodingMask);
5712  __ j(not_zero, &call_runtime);
5714  __ test_b(edi, kStringRepresentationMask);
5715  __ j(zero, &second_is_sequential, Label::kNear);
5716  // Rule out short external string and load string resource.
5718  __ test_b(edi, kShortExternalStringMask);
5719  __ j(not_zero, &call_runtime);
5722  __ jmp(&second_prepared, Label::kNear);
5723  __ bind(&second_is_sequential);
5724  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5725  __ bind(&second_prepared);
5726 
5727  // Push the addresses of both strings' first characters onto the stack.
5728  __ push(edx);
5729  __ push(eax);
5730 
5731  Label non_ascii_string_add_flat_result, call_runtime_drop_two;
5732  // edi: instance type of second string
5733  // First string and second string have the same encoding.
5735  __ test_b(edi, kStringEncodingMask);
5736  __ j(zero, &non_ascii_string_add_flat_result);
5737 
5738  // Both strings are ASCII strings.
5739  // ebx: length of resulting flat string as a smi
5740  __ SmiUntag(ebx);
5741  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5742  // eax: result string
5743  __ mov(ecx, eax);
5744  // Locate first character of result.
5745  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5746  // Load first argument's length and first character location. Account for
5747  // values currently on the stack when fetching arguments from it.
5748  __ mov(edx, Operand(esp, 4 * kPointerSize));
5750  __ SmiUntag(edi);
5751  __ pop(edx);
5752  // eax: result string
5753  // ecx: first character of result
5754  // edx: first char of first argument
5755  // edi: length of first argument
5757  // Load second argument's length and first character location. Account for
5758  // values currently on the stack when fetching arguments from it.
5759  __ mov(edx, Operand(esp, 2 * kPointerSize));
5761  __ SmiUntag(edi);
5762  __ pop(edx);
5763  // eax: result string
5764  // ecx: next character of result
5765  // edx: first char of second argument
5766  // edi: length of second argument
5768  __ IncrementCounter(counters->string_add_native(), 1);
5769  __ ret(2 * kPointerSize);
5770 
5771  // Handle creating a flat two byte result.
5772  // eax: first string - known to be two byte
5773  // ebx: length of resulting flat string as a smi
5774  // edx: second string
5775  __ bind(&non_ascii_string_add_flat_result);
5776  // Both strings are two byte strings.
5777  __ SmiUntag(ebx);
5778  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5779  // eax: result string
5780  __ mov(ecx, eax);
5781  // Locate first character of result.
5783  // Load second argument's length and first character location. Account for
5784  // values currently on the stack when fetching arguments from it.
5785  __ mov(edx, Operand(esp, 4 * kPointerSize));
5787  __ SmiUntag(edi);
5788  __ pop(edx);
5789  // eax: result string
5790  // ecx: first character of result
5791  // edx: first char of first argument
5792  // edi: length of first argument
5794  // Load second argument's length and first character location. Account for
5795  // values currently on the stack when fetching arguments from it.
5796  __ mov(edx, Operand(esp, 2 * kPointerSize));
5798  __ SmiUntag(edi);
5799  __ pop(edx);
5800  // eax: result string
5801  // ecx: next character of result
5802  // edx: first char of second argument
5803  // edi: length of second argument
5805  __ IncrementCounter(counters->string_add_native(), 1);
5806  __ ret(2 * kPointerSize);
5807 
5808  // Recover stack pointer before jumping to runtime.
5809  __ bind(&call_runtime_drop_two);
5810  __ Drop(2);
5811  // Just jump to runtime to add the two strings.
5812  __ bind(&call_runtime);
5813  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5814 
5815  if (call_builtin.is_linked()) {
5816  __ bind(&call_builtin);
5817  __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5818  }
5819 }
5820 
5821 
5822 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5823  int stack_offset,
5824  Register arg,
5825  Register scratch1,
5826  Register scratch2,
5827  Register scratch3,
5828  Label* slow) {
5829  // First check if the argument is already a string.
5830  Label not_string, done;
5831  __ JumpIfSmi(arg, &not_string);
5832  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5833  __ j(below, &done);
5834 
5835  // Check the number to string cache.
5836  Label not_cached;
5837  __ bind(&not_string);
5838  // Puts the cached result into scratch1.
5840  arg,
5841  scratch1,
5842  scratch2,
5843  scratch3,
5844  false,
5845  &not_cached);
5846  __ mov(arg, scratch1);
5847  __ mov(Operand(esp, stack_offset), arg);
5848  __ jmp(&done);
5849 
5850  // Check if the argument is a safe string wrapper.
5851  __ bind(&not_cached);
5852  __ JumpIfSmi(arg, slow);
5853  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
5854  __ j(not_equal, slow);
5855  __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5857  __ j(zero, slow);
5858  __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5859  __ mov(Operand(esp, stack_offset), arg);
5860 
5861  __ bind(&done);
5862 }
5863 
5864 
5865 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5866  Register dest,
5867  Register src,
5868  Register count,
5869  Register scratch,
5870  bool ascii) {
5871  Label loop;
5872  __ bind(&loop);
5873  // This loop just copies one character at a time, as it is only used for very
5874  // short strings.
5875  if (ascii) {
5876  __ mov_b(scratch, Operand(src, 0));
5877  __ mov_b(Operand(dest, 0), scratch);
5878  __ add(src, Immediate(1));
5879  __ add(dest, Immediate(1));
5880  } else {
5881  __ mov_w(scratch, Operand(src, 0));
5882  __ mov_w(Operand(dest, 0), scratch);
5883  __ add(src, Immediate(2));
5884  __ add(dest, Immediate(2));
5885  }
5886  __ sub(count, Immediate(1));
5887  __ j(not_zero, &loop);
5888 }
5889 
5890 
5891 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5892  Register dest,
5893  Register src,
5894  Register count,
5895  Register scratch,
5896  bool ascii) {
5897  // Copy characters using rep movs of doublewords.
5898  // The destination is aligned on a 4 byte boundary because we are
5899  // copying to the beginning of a newly allocated string.
5900  ASSERT(dest.is(edi)); // rep movs destination
5901  ASSERT(src.is(esi)); // rep movs source
5902  ASSERT(count.is(ecx)); // rep movs count
5903  ASSERT(!scratch.is(dest));
5904  ASSERT(!scratch.is(src));
5905  ASSERT(!scratch.is(count));
5906 
5907  // Nothing to do for zero characters.
5908  Label done;
5909  __ test(count, count);
5910  __ j(zero, &done);
5911 
5912  // Make count the number of bytes to copy.
5913  if (!ascii) {
5914  __ shl(count, 1);
5915  }
5916 
5917  // Don't enter the rep movs if there are less than 4 bytes to copy.
5918  Label last_bytes;
5919  __ test(count, Immediate(~3));
5920  __ j(zero, &last_bytes, Label::kNear);
5921 
5922  // Copy from edi to esi using rep movs instruction.
5923  __ mov(scratch, count);
5924  __ sar(count, 2); // Number of doublewords to copy.
5925  __ cld();
5926  __ rep_movs();
5927 
5928  // Find number of bytes left.
5929  __ mov(count, scratch);
5930  __ and_(count, 3);
5931 
5932  // Check if there are more bytes to copy.
5933  __ bind(&last_bytes);
5934  __ test(count, count);
5935  __ j(zero, &done);
5936 
5937  // Copy remaining characters.
5938  Label loop;
5939  __ bind(&loop);
5940  __ mov_b(scratch, Operand(src, 0));
5941  __ mov_b(Operand(dest, 0), scratch);
5942  __ add(src, Immediate(1));
5943  __ add(dest, Immediate(1));
5944  __ sub(count, Immediate(1));
5945  __ j(not_zero, &loop);
5946 
5947  __ bind(&done);
5948 }
5949 
5950 
5951 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5952  Register c1,
5953  Register c2,
5954  Register scratch1,
5955  Register scratch2,
5956  Register scratch3,
5957  Label* not_probed,
5958  Label* not_found) {
5959  // Register scratch3 is the general scratch register in this function.
5960  Register scratch = scratch3;
5961 
5962  // Make sure that both characters are not digits as such strings has a
5963  // different hash algorithm. Don't try to look for these in the symbol table.
5964  Label not_array_index;
5965  __ mov(scratch, c1);
5966  __ sub(scratch, Immediate(static_cast<int>('0')));
5967  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
5968  __ j(above, &not_array_index, Label::kNear);
5969  __ mov(scratch, c2);
5970  __ sub(scratch, Immediate(static_cast<int>('0')));
5971  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
5972  __ j(below_equal, not_probed);
5973 
5974  __ bind(&not_array_index);
5975  // Calculate the two character string hash.
5976  Register hash = scratch1;
5977  GenerateHashInit(masm, hash, c1, scratch);
5978  GenerateHashAddCharacter(masm, hash, c2, scratch);
5979  GenerateHashGetHash(masm, hash, scratch);
5980 
5981  // Collect the two characters in a register.
5982  Register chars = c1;
5983  __ shl(c2, kBitsPerByte);
5984  __ or_(chars, c2);
5985 
5986  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5987  // hash: hash of two character string.
5988 
5989  // Load the symbol table.
5990  Register symbol_table = c2;
5991  ExternalReference roots_array_start =
5992  ExternalReference::roots_array_start(masm->isolate());
5993  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5994  __ mov(symbol_table,
5995  Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
5996 
5997  // Calculate capacity mask from the symbol table capacity.
5998  Register mask = scratch2;
5999  __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
6000  __ SmiUntag(mask);
6001  __ sub(mask, Immediate(1));
6002 
6003  // Registers
6004  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
6005  // hash: hash of two character string
6006  // symbol_table: symbol table
6007  // mask: capacity mask
6008  // scratch: -
6009 
6010  // Perform a number of probes in the symbol table.
6011  static const int kProbes = 4;
6012  Label found_in_symbol_table;
6013  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
6014  Register candidate = scratch; // Scratch register contains candidate.
6015  for (int i = 0; i < kProbes; i++) {
6016  // Calculate entry in symbol table.
6017  __ mov(scratch, hash);
6018  if (i > 0) {
6019  __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
6020  }
6021  __ and_(scratch, mask);
6022 
6023  // Load the entry from the symbol table.
6025  __ mov(candidate,
6026  FieldOperand(symbol_table,
6027  scratch,
6030 
6031  // If entry is undefined no string with this hash can be found.
6032  Factory* factory = masm->isolate()->factory();
6033  __ cmp(candidate, factory->undefined_value());
6034  __ j(equal, not_found);
6035  __ cmp(candidate, factory->the_hole_value());
6036  __ j(equal, &next_probe[i]);
6037 
6038  // If length is not 2 the string is not a candidate.
6039  __ cmp(FieldOperand(candidate, String::kLengthOffset),
6040  Immediate(Smi::FromInt(2)));
6041  __ j(not_equal, &next_probe[i]);
6042 
6043  // As we are out of registers save the mask on the stack and use that
6044  // register as a temporary.
6045  __ push(mask);
6046  Register temp = mask;
6047 
6048  // Check that the candidate is a non-external ASCII string.
6049  __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
6050  __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
6051  __ JumpIfInstanceTypeIsNotSequentialAscii(
6052  temp, temp, &next_probe_pop_mask[i]);
6053 
6054  // Check if the two characters match.
6055  __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
6056  __ and_(temp, 0x0000ffff);
6057  __ cmp(chars, temp);
6058  __ j(equal, &found_in_symbol_table);
6059  __ bind(&next_probe_pop_mask[i]);
6060  __ pop(mask);
6061  __ bind(&next_probe[i]);
6062  }
6063 
6064  // No matching 2 character string found by probing.
6065  __ jmp(not_found);
6066 
6067  // Scratch register contains result when we fall through to here.
6068  Register result = candidate;
6069  __ bind(&found_in_symbol_table);
6070  __ pop(mask); // Pop saved mask from the stack.
6071  if (!result.is(eax)) {
6072  __ mov(eax, result);
6073  }
6074 }
6075 
6076 
6077 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6078  Register hash,
6079  Register character,
6080  Register scratch) {
6081  // hash = (seed + character) + ((seed + character) << 10);
6082  if (Serializer::enabled()) {
6083  ExternalReference roots_array_start =
6084  ExternalReference::roots_array_start(masm->isolate());
6085  __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
6086  __ mov(scratch, Operand::StaticArray(scratch,
6088  roots_array_start));
6089  __ SmiUntag(scratch);
6090  __ add(scratch, character);
6091  __ mov(hash, scratch);
6092  __ shl(scratch, 10);
6093  __ add(hash, scratch);
6094  } else {
6095  int32_t seed = masm->isolate()->heap()->HashSeed();
6096  __ lea(scratch, Operand(character, seed));
6097  __ shl(scratch, 10);
6098  __ lea(hash, Operand(scratch, character, times_1, seed));
6099  }
6100  // hash ^= hash >> 6;
6101  __ mov(scratch, hash);
6102  __ shr(scratch, 6);
6103  __ xor_(hash, scratch);
6104 }
6105 
6106 
6107 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6108  Register hash,
6109  Register character,
6110  Register scratch) {
6111  // hash += character;
6112  __ add(hash, character);
6113  // hash += hash << 10;
6114  __ mov(scratch, hash);
6115  __ shl(scratch, 10);
6116  __ add(hash, scratch);
6117  // hash ^= hash >> 6;
6118  __ mov(scratch, hash);
6119  __ shr(scratch, 6);
6120  __ xor_(hash, scratch);
6121 }
6122 
6123 
6124 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6125  Register hash,
6126  Register scratch) {
6127  // hash += hash << 3;
6128  __ mov(scratch, hash);
6129  __ shl(scratch, 3);
6130  __ add(hash, scratch);
6131  // hash ^= hash >> 11;
6132  __ mov(scratch, hash);
6133  __ shr(scratch, 11);
6134  __ xor_(hash, scratch);
6135  // hash += hash << 15;
6136  __ mov(scratch, hash);
6137  __ shl(scratch, 15);
6138  __ add(hash, scratch);
6139 
6140  __ and_(hash, String::kHashBitMask);
6141 
6142  // if (hash == 0) hash = 27;
6143  Label hash_not_zero;
6144  __ j(not_zero, &hash_not_zero, Label::kNear);
6145  __ mov(hash, Immediate(StringHasher::kZeroHash));
6146  __ bind(&hash_not_zero);
6147 }
6148 
6149 
6150 void SubStringStub::Generate(MacroAssembler* masm) {
6151  Label runtime;
6152 
6153  // Stack frame on entry.
6154  // esp[0]: return address
6155  // esp[4]: to
6156  // esp[8]: from
6157  // esp[12]: string
6158 
6159  // Make sure first argument is a string.
6160  __ mov(eax, Operand(esp, 3 * kPointerSize));
6161  STATIC_ASSERT(kSmiTag == 0);
6162  __ JumpIfSmi(eax, &runtime);
6163  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
6164  __ j(NegateCondition(is_string), &runtime);
6165 
6166  // eax: string
6167  // ebx: instance type
6168 
6169  // Calculate length of sub string using the smi values.
6170  __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
6171  __ JumpIfNotSmi(ecx, &runtime);
6172  __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
6173  __ JumpIfNotSmi(edx, &runtime);
6174  __ sub(ecx, edx);
6176  Label not_original_string;
6177  // Shorter than original string's length: an actual substring.
6178  __ j(below, &not_original_string, Label::kNear);
6179  // Longer than original string's length or negative: unsafe arguments.
6180  __ j(above, &runtime);
6181  // Return original string.
6182  Counters* counters = masm->isolate()->counters();
6183  __ IncrementCounter(counters->sub_string_native(), 1);
6184  __ ret(3 * kPointerSize);
6185  __ bind(&not_original_string);
6186 
6187  // eax: string
6188  // ebx: instance type
6189  // ecx: sub string length (smi)
6190  // edx: from index (smi)
6191  // Deal with different string types: update the index if necessary
6192  // and put the underlying string into edi.
6193  Label underlying_unpacked, sliced_string, seq_or_external_string;
6194  // If the string is not indirect, it can only be sequential or external.
6197  __ test(ebx, Immediate(kIsIndirectStringMask));
6198  __ j(zero, &seq_or_external_string, Label::kNear);
6199 
6200  Factory* factory = masm->isolate()->factory();
6201  __ test(ebx, Immediate(kSlicedNotConsMask));
6202  __ j(not_zero, &sliced_string, Label::kNear);
6203  // Cons string. Check whether it is flat, then fetch first part.
6204  // Flat cons strings have an empty second part.
6206  factory->empty_string());
6207  __ j(not_equal, &runtime);
6209  // Update instance type.
6212  __ jmp(&underlying_unpacked, Label::kNear);
6213 
6214  __ bind(&sliced_string);
6215  // Sliced string. Fetch parent and adjust start index by offset.
6218  // Update instance type.
6221  __ jmp(&underlying_unpacked, Label::kNear);
6222 
6223  __ bind(&seq_or_external_string);
6224  // Sequential or external string. Just move string to the expected register.
6225  __ mov(edi, eax);
6226 
6227  __ bind(&underlying_unpacked);
6228 
6229  if (FLAG_string_slices) {
6230  Label copy_routine;
6231  // edi: underlying subject string
6232  // ebx: instance type of underlying subject string
6233  // edx: adjusted start index (smi)
6234  // ecx: length (smi)
6235  __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
6236  // Short slice. Copy instead of slicing.
6237  __ j(less, &copy_routine);
6238  // Allocate new sliced string. At this point we do not reload the instance
6239  // type including the string encoding because we simply rely on the info
6240  // provided by the original string. It does not matter if the original
6241  // string's encoding is wrong because we always have to recheck encoding of
6242  // the newly created string's parent anyways due to externalized strings.
6243  Label two_byte_slice, set_slice_header;
6246  __ test(ebx, Immediate(kStringEncodingMask));
6247  __ j(zero, &two_byte_slice, Label::kNear);
6248  __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
6249  __ jmp(&set_slice_header, Label::kNear);
6250  __ bind(&two_byte_slice);
6251  __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
6252  __ bind(&set_slice_header);
6255  Immediate(String::kEmptyHashField));
6258  __ IncrementCounter(counters->sub_string_native(), 1);
6259  __ ret(3 * kPointerSize);
6260 
6261  __ bind(&copy_routine);
6262  }
6263 
6264  // edi: underlying subject string
6265  // ebx: instance type of underlying subject string
6266  // edx: adjusted start index (smi)
6267  // ecx: length (smi)
6268  // The subject string can only be external or sequential string of either
6269  // encoding at this point.
6270  Label two_byte_sequential, runtime_drop_two, sequential_string;
6273  __ test_b(ebx, kExternalStringTag);
6274  __ j(zero, &sequential_string);
6275 
6276  // Handle external string.
6277  // Rule out short external strings.
6279  __ test_b(ebx, kShortExternalStringMask);
6280  __ j(not_zero, &runtime);
6282  // Move the pointer so that offset-wise, it looks like a sequential string.
6285 
6286  __ bind(&sequential_string);
6287  // Stash away (adjusted) index and (underlying) string.
6288  __ push(edx);
6289  __ push(edi);
6290  __ SmiUntag(ecx);
6292  __ test_b(ebx, kStringEncodingMask);
6293  __ j(zero, &two_byte_sequential);
6294 
6295  // Sequential ASCII string. Allocate the result.
6296  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6297 
6298  // eax: result string
6299  // ecx: result string length
6300  __ mov(edx, esi); // esi used by following code.
6301  // Locate first character of result.
6302  __ mov(edi, eax);
6303  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6304  // Load string argument and locate character of sub string start.
6305  __ pop(esi);
6306  __ pop(ebx);
6307  __ SmiUntag(ebx);
6309 
6310  // eax: result string
6311  // ecx: result length
6312  // edx: original value of esi
6313  // edi: first character of result
6314  // esi: character of sub string start
6316  __ mov(esi, edx); // Restore esi.
6317  __ IncrementCounter(counters->sub_string_native(), 1);
6318  __ ret(3 * kPointerSize);
6319 
6320  __ bind(&two_byte_sequential);
6321  // Sequential two-byte string. Allocate the result.
6322  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6323 
6324  // eax: result string
6325  // ecx: result string length
6326  __ mov(edx, esi); // esi used by following code.
6327  // Locate first character of result.
6328  __ mov(edi, eax);
6329  __ add(edi,
6331  // Load string argument and locate character of sub string start.
6332  __ pop(esi);
6333  __ pop(ebx);
6334  // As from is a smi it is 2 times the value which matches the size of a two
6335  // byte character.
6336  STATIC_ASSERT(kSmiTag == 0);
6339 
6340  // eax: result string
6341  // ecx: result length
6342  // edx: original value of esi
6343  // edi: first character of result
6344  // esi: character of sub string start
6346  __ mov(esi, edx); // Restore esi.
6347  __ IncrementCounter(counters->sub_string_native(), 1);
6348  __ ret(3 * kPointerSize);
6349 
6350  // Drop pushed values on the stack before tail call.
6351  __ bind(&runtime_drop_two);
6352  __ Drop(2);
6353 
6354  // Just jump to runtime to create the sub string.
6355  __ bind(&runtime);
6356  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6357 }
6358 
6359 
6360 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6361  Register left,
6362  Register right,
6363  Register scratch1,
6364  Register scratch2) {
6365  Register length = scratch1;
6366 
6367  // Compare lengths.
6368  Label strings_not_equal, check_zero_length;
6369  __ mov(length, FieldOperand(left, String::kLengthOffset));
6370  __ cmp(length, FieldOperand(right, String::kLengthOffset));
6371  __ j(equal, &check_zero_length, Label::kNear);
6372  __ bind(&strings_not_equal);
6373  __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
6374  __ ret(0);
6375 
6376  // Check if the length is zero.
6377  Label compare_chars;
6378  __ bind(&check_zero_length);
6379  STATIC_ASSERT(kSmiTag == 0);
6380  __ test(length, length);
6381  __ j(not_zero, &compare_chars, Label::kNear);
6382  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6383  __ ret(0);
6384 
6385  // Compare characters.
6386  __ bind(&compare_chars);
6387  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
6388  &strings_not_equal, Label::kNear);
6389 
6390  // Characters are equal.
6391  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6392  __ ret(0);
6393 }
6394 
6395 
6396 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6397  Register left,
6398  Register right,
6399  Register scratch1,
6400  Register scratch2,
6401  Register scratch3) {
6402  Counters* counters = masm->isolate()->counters();
6403  __ IncrementCounter(counters->string_compare_native(), 1);
6404 
6405  // Find minimum length.
6406  Label left_shorter;
6407  __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
6408  __ mov(scratch3, scratch1);
6409  __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
6410 
6411  Register length_delta = scratch3;
6412 
6413  __ j(less_equal, &left_shorter, Label::kNear);
6414  // Right string is shorter. Change scratch1 to be length of right string.
6415  __ sub(scratch1, length_delta);
6416  __ bind(&left_shorter);
6417 
6418  Register min_length = scratch1;
6419 
6420  // If either length is zero, just compare lengths.
6421  Label compare_lengths;
6422  __ test(min_length, min_length);
6423  __ j(zero, &compare_lengths, Label::kNear);
6424 
6425  // Compare characters.
6426  Label result_not_equal;
6427  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
6428  &result_not_equal, Label::kNear);
6429 
6430  // Compare lengths - strings up to min-length are equal.
6431  __ bind(&compare_lengths);
6432  __ test(length_delta, length_delta);
6433  __ j(not_zero, &result_not_equal, Label::kNear);
6434 
6435  // Result is EQUAL.
6436  STATIC_ASSERT(EQUAL == 0);
6437  STATIC_ASSERT(kSmiTag == 0);
6438  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6439  __ ret(0);
6440 
6441  Label result_greater;
6442  __ bind(&result_not_equal);
6443  __ j(greater, &result_greater, Label::kNear);
6444 
6445  // Result is LESS.
6446  __ Set(eax, Immediate(Smi::FromInt(LESS)));
6447  __ ret(0);
6448 
6449  // Result is GREATER.
6450  __ bind(&result_greater);
6451  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
6452  __ ret(0);
6453 }
6454 
6455 
6456 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6457  MacroAssembler* masm,
6458  Register left,
6459  Register right,
6460  Register length,
6461  Register scratch,
6462  Label* chars_not_equal,
6463  Label::Distance chars_not_equal_near) {
6464  // Change index to run from -length to -1 by adding length to string
6465  // start. This means that loop ends when index reaches zero, which
6466  // doesn't need an additional compare.
6467  __ SmiUntag(length);
6468  __ lea(left,
6470  __ lea(right,
6472  __ neg(length);
6473  Register index = length; // index = -length;
6474 
6475  // Compare loop.
6476  Label loop;
6477  __ bind(&loop);
6478  __ mov_b(scratch, Operand(left, index, times_1, 0));
6479  __ cmpb(scratch, Operand(right, index, times_1, 0));
6480  __ j(not_equal, chars_not_equal, chars_not_equal_near);
6481  __ inc(index);
6482  __ j(not_zero, &loop);
6483 }
6484 
6485 
6486 void StringCompareStub::Generate(MacroAssembler* masm) {
6487  Label runtime;
6488 
6489  // Stack frame on entry.
6490  // esp[0]: return address
6491  // esp[4]: right string
6492  // esp[8]: left string
6493 
6494  __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
6495  __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
6496 
6497  Label not_same;
6498  __ cmp(edx, eax);
6499  __ j(not_equal, &not_same, Label::kNear);
6500  STATIC_ASSERT(EQUAL == 0);
6501  STATIC_ASSERT(kSmiTag == 0);
6502  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6503  __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
6504  __ ret(2 * kPointerSize);
6505 
6506  __ bind(&not_same);
6507 
6508  // Check that both objects are sequential ASCII strings.
6509  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6510 
6511  // Compare flat ASCII strings.
6512  // Drop arguments from the stack.
6513  __ pop(ecx);
6514  __ add(esp, Immediate(2 * kPointerSize));
6515  __ push(ecx);
6517 
6518  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6519  // tagged as a small integer.
6520  __ bind(&runtime);
6521  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6522 }
6523 
6524 
6525 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6526  ASSERT(state_ == CompareIC::SMIS);
6527  Label miss;
6528  __ mov(ecx, edx);
6529  __ or_(ecx, eax);
6530  __ JumpIfNotSmi(ecx, &miss, Label::kNear);
6531 
6532  if (GetCondition() == equal) {
6533  // For equality we do not care about the sign of the result.
6534  __ sub(eax, edx);
6535  } else {
6536  Label done;
6537  __ sub(edx, eax);
6538  __ j(no_overflow, &done, Label::kNear);
6539  // Correct sign of result in case of overflow.
6540  __ not_(edx);
6541  __ bind(&done);
6542  __ mov(eax, edx);
6543  }
6544  __ ret(0);
6545 
6546  __ bind(&miss);
6547  GenerateMiss(masm);
6548 }
6549 
6550 
6551 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6552  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6553 
6554  Label generic_stub;
6555  Label unordered, maybe_undefined1, maybe_undefined2;
6556  Label miss;
6557  __ mov(ecx, edx);
6558  __ and_(ecx, eax);
6559  __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6560 
6561  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6562  __ j(not_equal, &maybe_undefined1, Label::kNear);
6563  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6564  __ j(not_equal, &maybe_undefined2, Label::kNear);
6565 
6566  // Inlining the double comparison and falling back to the general compare
6567  // stub if NaN is involved or SS2 or CMOV is unsupported.
6569  CpuFeatures::Scope scope1(SSE2);
6570  CpuFeatures::Scope scope2(CMOV);
6571 
6572  // Load left and right operand
6575 
6576  // Compare operands
6577  __ ucomisd(xmm0, xmm1);
6578 
6579  // Don't base result on EFLAGS when a NaN is involved.
6580  __ j(parity_even, &unordered, Label::kNear);
6581 
6582  // Return a result of -1, 0, or 1, based on EFLAGS.
6583  // Performing mov, because xor would destroy the flag register.
6584  __ mov(eax, 0); // equal
6585  __ mov(ecx, Immediate(Smi::FromInt(1)));
6586  __ cmov(above, eax, ecx);
6587  __ mov(ecx, Immediate(Smi::FromInt(-1)));
6588  __ cmov(below, eax, ecx);
6589  __ ret(0);
6590  }
6591 
6592  __ bind(&unordered);
6593  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6594  __ bind(&generic_stub);
6595  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6596 
6597  __ bind(&maybe_undefined1);
6599  __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
6600  __ j(not_equal, &miss);
6601  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6602  __ j(not_equal, &maybe_undefined2, Label::kNear);
6603  __ jmp(&unordered);
6604  }
6605 
6606  __ bind(&maybe_undefined2);
6608  __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
6609  __ j(equal, &unordered);
6610  }
6611 
6612  __ bind(&miss);
6613  GenerateMiss(masm);
6614 }
6615 
6616 
6617 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6618  ASSERT(state_ == CompareIC::SYMBOLS);
6619  ASSERT(GetCondition() == equal);
6620 
6621  // Registers containing left and right operands respectively.
6622  Register left = edx;
6623  Register right = eax;
6624  Register tmp1 = ecx;
6625  Register tmp2 = ebx;
6626 
6627  // Check that both operands are heap objects.
6628  Label miss;
6629  __ mov(tmp1, left);
6630  STATIC_ASSERT(kSmiTag == 0);
6631  __ and_(tmp1, right);
6632  __ JumpIfSmi(tmp1, &miss, Label::kNear);
6633 
6634  // Check that both operands are symbols.
6635  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6636  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6637  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6638  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6639  STATIC_ASSERT(kSymbolTag != 0);
6640  __ and_(tmp1, tmp2);
6641  __ test(tmp1, Immediate(kIsSymbolMask));
6642  __ j(zero, &miss, Label::kNear);
6643 
6644  // Symbols are compared by identity.
6645  Label done;
6646  __ cmp(left, right);
6647  // Make sure eax is non-zero. At this point input operands are
6648  // guaranteed to be non-zero.
6649  ASSERT(right.is(eax));
6650  __ j(not_equal, &done, Label::kNear);
6651  STATIC_ASSERT(EQUAL == 0);
6652  STATIC_ASSERT(kSmiTag == 0);
6653  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6654  __ bind(&done);
6655  __ ret(0);
6656 
6657  __ bind(&miss);
6658  GenerateMiss(masm);
6659 }
6660 
6661 
6662 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6663  ASSERT(state_ == CompareIC::STRINGS);
6664  Label miss;
6665 
6666  bool equality = Token::IsEqualityOp(op_);
6667 
6668  // Registers containing left and right operands respectively.
6669  Register left = edx;
6670  Register right = eax;
6671  Register tmp1 = ecx;
6672  Register tmp2 = ebx;
6673  Register tmp3 = edi;
6674 
6675  // Check that both operands are heap objects.
6676  __ mov(tmp1, left);
6677  STATIC_ASSERT(kSmiTag == 0);
6678  __ and_(tmp1, right);
6679  __ JumpIfSmi(tmp1, &miss);
6680 
6681  // Check that both operands are strings. This leaves the instance
6682  // types loaded in tmp1 and tmp2.
6683  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6684  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6685  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6686  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6687  __ mov(tmp3, tmp1);
6689  __ or_(tmp3, tmp2);
6690  __ test(tmp3, Immediate(kIsNotStringMask));
6691  __ j(not_zero, &miss);
6692 
6693  // Fast check for identical strings.
6694  Label not_same;
6695  __ cmp(left, right);
6696  __ j(not_equal, &not_same, Label::kNear);
6697  STATIC_ASSERT(EQUAL == 0);
6698  STATIC_ASSERT(kSmiTag == 0);
6699  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6700  __ ret(0);
6701 
6702  // Handle not identical strings.
6703  __ bind(&not_same);
6704 
6705  // Check that both strings are symbols. If they are, we're done
6706  // because we already know they are not identical. But in the case of
6707  // non-equality compare, we still need to determine the order.
6708  if (equality) {
6709  Label do_compare;
6710  STATIC_ASSERT(kSymbolTag != 0);
6711  __ and_(tmp1, tmp2);
6712  __ test(tmp1, Immediate(kIsSymbolMask));
6713  __ j(zero, &do_compare, Label::kNear);
6714  // Make sure eax is non-zero. At this point input operands are
6715  // guaranteed to be non-zero.
6716  ASSERT(right.is(eax));
6717  __ ret(0);
6718  __ bind(&do_compare);
6719  }
6720 
6721  // Check that both strings are sequential ASCII.
6722  Label runtime;
6723  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
6724 
6725  // Compare flat ASCII strings. Returns when done.
6726  if (equality) {
6728  masm, left, right, tmp1, tmp2);
6729  } else {
6731  masm, left, right, tmp1, tmp2, tmp3);
6732  }
6733 
6734  // Handle more complex cases in runtime.
6735  __ bind(&runtime);
6736  __ pop(tmp1); // Return address.
6737  __ push(left);
6738  __ push(right);
6739  __ push(tmp1);
6740  if (equality) {
6741  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6742  } else {
6743  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6744  }
6745 
6746  __ bind(&miss);
6747  GenerateMiss(masm);
6748 }
6749 
6750 
6751 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6752  ASSERT(state_ == CompareIC::OBJECTS);
6753  Label miss;
6754  __ mov(ecx, edx);
6755  __ and_(ecx, eax);
6756  __ JumpIfSmi(ecx, &miss, Label::kNear);
6757 
6758  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6759  __ j(not_equal, &miss, Label::kNear);
6760  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6761  __ j(not_equal, &miss, Label::kNear);
6762 
6763  ASSERT(GetCondition() == equal);
6764  __ sub(eax, edx);
6765  __ ret(0);
6766 
6767  __ bind(&miss);
6768  GenerateMiss(masm);
6769 }
6770 
6771 
6772 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6773  Label miss;
6774  __ mov(ecx, edx);
6775  __ and_(ecx, eax);
6776  __ JumpIfSmi(ecx, &miss, Label::kNear);
6777 
6780  __ cmp(ecx, known_map_);
6781  __ j(not_equal, &miss, Label::kNear);
6782  __ cmp(ebx, known_map_);
6783  __ j(not_equal, &miss, Label::kNear);
6784 
6785  __ sub(eax, edx);
6786  __ ret(0);
6787 
6788  __ bind(&miss);
6789  GenerateMiss(masm);
6790 }
6791 
6792 
6793 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6794  {
6795  // Call the runtime system in a fresh internal frame.
6796  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6797  masm->isolate());
6798  FrameScope scope(masm, StackFrame::INTERNAL);
6799  __ push(edx); // Preserve edx and eax.
6800  __ push(eax);
6801  __ push(edx); // And also use them as the arguments.
6802  __ push(eax);
6803  __ push(Immediate(Smi::FromInt(op_)));
6804  __ CallExternalReference(miss, 3);
6805  // Compute the entry point of the rewritten stub.
6807  __ pop(eax);
6808  __ pop(edx);
6809  }
6810 
6811  // Do a tail call to the rewritten stub.
6812  __ jmp(edi);
6813 }
6814 
6815 
6816 // Helper function used to check that the dictionary doesn't contain
6817 // the property. This function may return false negatives, so miss_label
6818 // must always call a backup property check that is complete.
6819 // This function is safe to call if the receiver has fast properties.
6820 // Name must be a symbol and receiver must be a heap object.
6821 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6822  Label* miss,
6823  Label* done,
6824  Register properties,
6825  Handle<String> name,
6826  Register r0) {
6827  ASSERT(name->IsSymbol());
6828 
6829  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6830  // not equal to the name and kProbes-th slot is not used (its name is the
6831  // undefined value), it guarantees the hash table doesn't contain the
6832  // property. It's true even if some slots represent deleted properties
6833  // (their names are the hole value).
6834  for (int i = 0; i < kInlinedProbes; i++) {
6835  // Compute the masked index: (hash + i + i * i) & mask.
6836  Register index = r0;
6837  // Capacity is smi 2^n.
6838  __ mov(index, FieldOperand(properties, kCapacityOffset));
6839  __ dec(index);
6840  __ and_(index,
6841  Immediate(Smi::FromInt(name->Hash() +
6842  StringDictionary::GetProbeOffset(i))));
6843 
6844  // Scale the index by multiplying by the entry size.
6846  __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
6847  Register entity_name = r0;
6848  // Having undefined at this place means the name is not contained.
6849  ASSERT_EQ(kSmiTagSize, 1);
6850  __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
6851  kElementsStartOffset - kHeapObjectTag));
6852  __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
6853  __ j(equal, done);
6854 
6855  // Stop if found the property.
6856  __ cmp(entity_name, Handle<String>(name));
6857  __ j(equal, miss);
6858 
6859  Label the_hole;
6860  // Check for the hole and skip.
6861  __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
6862  __ j(equal, &the_hole, Label::kNear);
6863 
6864  // Check if the entry name is not a symbol.
6865  __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
6866  __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
6867  kIsSymbolMask);
6868  __ j(zero, miss);
6869  __ bind(&the_hole);
6870  }
6871 
6872  StringDictionaryLookupStub stub(properties,
6873  r0,
6874  r0,
6876  __ push(Immediate(Handle<Object>(name)));
6877  __ push(Immediate(name->Hash()));
6878  __ CallStub(&stub);
6879  __ test(r0, r0);
6880  __ j(not_zero, miss);
6881  __ jmp(done);
6882 }
6883 
6884 
6885 // Probe the string dictionary in the |elements| register. Jump to the
6886 // |done| label if a property with the given name is found leaving the
6887 // index into the dictionary in |r0|. Jump to the |miss| label
6888 // otherwise.
6889 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6890  Label* miss,
6891  Label* done,
6892  Register elements,
6893  Register name,
6894  Register r0,
6895  Register r1) {
6896  ASSERT(!elements.is(r0));
6897  ASSERT(!elements.is(r1));
6898  ASSERT(!name.is(r0));
6899  ASSERT(!name.is(r1));
6900 
6901  // Assert that name contains a string.
6902  if (FLAG_debug_code) __ AbortIfNotString(name);
6903 
6904  __ mov(r1, FieldOperand(elements, kCapacityOffset));
6905  __ shr(r1, kSmiTagSize); // convert smi to int
6906  __ dec(r1);
6907 
6908  // Generate an unrolled loop that performs a few probes before
6909  // giving up. Measurements done on Gmail indicate that 2 probes
6910  // cover ~93% of loads from dictionaries.
6911  for (int i = 0; i < kInlinedProbes; i++) {
6912  // Compute the masked index: (hash + i + i * i) & mask.
6913  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6914  __ shr(r0, String::kHashShift);
6915  if (i > 0) {
6916  __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
6917  }
6918  __ and_(r0, r1);
6919 
6920  // Scale the index by multiplying by the entry size.
6922  __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
6923 
6924  // Check if the key is identical to the name.
6925  __ cmp(name, Operand(elements,
6926  r0,
6927  times_4,
6928  kElementsStartOffset - kHeapObjectTag));
6929  __ j(equal, done);
6930  }
6931 
6932  StringDictionaryLookupStub stub(elements,
6933  r1,
6934  r0,
6935  POSITIVE_LOOKUP);
6936  __ push(name);
6937  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6938  __ shr(r0, String::kHashShift);
6939  __ push(r0);
6940  __ CallStub(&stub);
6941 
6942  __ test(r1, r1);
6943  __ j(zero, miss);
6944  __ jmp(done);
6945 }
6946 
6947 
6948 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6949  // This stub overrides SometimesSetsUpAFrame() to return false. That means
6950  // we cannot call anything that could cause a GC from this stub.
6951  // Stack frame on entry:
6952  // esp[0 * kPointerSize]: return address.
6953  // esp[1 * kPointerSize]: key's hash.
6954  // esp[2 * kPointerSize]: key.
6955  // Registers:
6956  // dictionary_: StringDictionary to probe.
6957  // result_: used as scratch.
6958  // index_: will hold an index of entry if lookup is successful.
6959  // might alias with result_.
6960  // Returns:
6961  // result_ is zero if lookup failed, non zero otherwise.
6962 
6963  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6964 
6965  Register scratch = result_;
6966 
6967  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
6968  __ dec(scratch);
6969  __ SmiUntag(scratch);
6970  __ push(scratch);
6971 
6972  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6973  // not equal to the name and kProbes-th slot is not used (its name is the
6974  // undefined value), it guarantees the hash table doesn't contain the
6975  // property. It's true even if some slots represent deleted properties
6976  // (their names are the null value).
6977  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6978  // Compute the masked index: (hash + i + i * i) & mask.
6979  __ mov(scratch, Operand(esp, 2 * kPointerSize));
6980  if (i > 0) {
6981  __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
6982  }
6983  __ and_(scratch, Operand(esp, 0));
6984 
6985  // Scale the index by multiplying by the entry size.
6987  __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
6988 
6989  // Having undefined at this place means the name is not contained.
6990  ASSERT_EQ(kSmiTagSize, 1);
6991  __ mov(scratch, Operand(dictionary_,
6992  index_,
6994  kElementsStartOffset - kHeapObjectTag));
6995  __ cmp(scratch, masm->isolate()->factory()->undefined_value());
6996  __ j(equal, &not_in_dictionary);
6997 
6998  // Stop if found the property.
6999  __ cmp(scratch, Operand(esp, 3 * kPointerSize));
7000  __ j(equal, &in_dictionary);
7001 
7002  if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7003  // If we hit a non symbol key during negative lookup
7004  // we have to bailout as this key might be equal to the
7005  // key we are looking for.
7006 
7007  // Check if the entry name is not a symbol.
7008  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
7009  __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
7010  kIsSymbolMask);
7011  __ j(zero, &maybe_in_dictionary);
7012  }
7013  }
7014 
7015  __ bind(&maybe_in_dictionary);
7016  // If we are doing negative lookup then probing failure should be
7017  // treated as a lookup success. For positive lookup probing failure
7018  // should be treated as lookup failure.
7019  if (mode_ == POSITIVE_LOOKUP) {
7020  __ mov(result_, Immediate(0));
7021  __ Drop(1);
7022  __ ret(2 * kPointerSize);
7023  }
7024 
7025  __ bind(&in_dictionary);
7026  __ mov(result_, Immediate(1));
7027  __ Drop(1);
7028  __ ret(2 * kPointerSize);
7029 
7030  __ bind(&not_in_dictionary);
7031  __ mov(result_, Immediate(0));
7032  __ Drop(1);
7033  __ ret(2 * kPointerSize);
7034 }
7035 
7036 
7037 struct AheadOfTimeWriteBarrierStubList {
7038  Register object, value, address;
7039  RememberedSetAction action;
7040 };
7041 
7042 
7043 #define REG(Name) { kRegister_ ## Name ## _Code }
7044 
7045 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7046  // Used in RegExpExecStub.
7047  { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
7048  // Used in CompileArrayPushCall.
7049  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7050  { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
7051  // Used in CompileStoreGlobal and CallFunctionStub.
7052  { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
7053  // Used in StoreStubCompiler::CompileStoreField and
7054  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7055  { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
7056  // GenerateStoreField calls the stub with two different permutations of
7057  // registers. This is the second.
7058  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7059  // StoreIC::GenerateNormal via GenerateDictionaryStore
7060  { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
7061  // KeyedStoreIC::GenerateGeneric.
7062  { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
7063  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7064  { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
7065  { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
7066  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
7067  // and ElementsTransitionGenerator::GenerateSmiToDouble
7068  // and ElementsTransitionGenerator::GenerateDoubleToObject
7069  { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
7070  { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
7071  // ElementsTransitionGenerator::GenerateDoubleToObject
7072  { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
7073  { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
7074  // StoreArrayLiteralElementStub::Generate
7075  { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
7076  // Null termination.
7077  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7078 };
7079 
7080 #undef REG
7081 
7083  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7084  !entry->object.is(no_reg);
7085  entry++) {
7086  if (object_.is(entry->object) &&
7087  value_.is(entry->value) &&
7088  address_.is(entry->address) &&
7089  remembered_set_action_ == entry->action &&
7090  save_fp_regs_mode_ == kDontSaveFPRegs) {
7091  return true;
7092  }
7093  }
7094  return false;
7095 }
7096 
7097 
7100  stub1.GetCode()->set_is_pregenerated(true);
7101 
7102  CpuFeatures::TryForceFeatureScope scope(SSE2);
7105  stub2.GetCode()->set_is_pregenerated(true);
7106  }
7107 }
7108 
7109 
7111  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7112  !entry->object.is(no_reg);
7113  entry++) {
7114  RecordWriteStub stub(entry->object,
7115  entry->value,
7116  entry->address,
7117  entry->action,
7118  kDontSaveFPRegs);
7119  stub.GetCode()->set_is_pregenerated(true);
7120  }
7121 }
7122 
7123 
7124 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7125 // the value has just been written into the object, now this stub makes sure
7126 // we keep the GC informed. The word in the object where the value has been
7127 // written is in the address register.
7128 void RecordWriteStub::Generate(MacroAssembler* masm) {
7129  Label skip_to_incremental_noncompacting;
7130  Label skip_to_incremental_compacting;
7131 
7132  // The first two instructions are generated with labels so as to get the
7133  // offset fixed up correctly by the bind(Label*) call. We patch it back and
7134  // forth between a compare instructions (a nop in this position) and the
7135  // real branch when we start and stop incremental heap marking.
7136  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
7137  __ jmp(&skip_to_incremental_compacting, Label::kFar);
7138 
7139  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7140  __ RememberedSetHelper(object_,
7141  address_,
7142  value_,
7143  save_fp_regs_mode_,
7145  } else {
7146  __ ret(0);
7147  }
7148 
7149  __ bind(&skip_to_incremental_noncompacting);
7150  GenerateIncremental(masm, INCREMENTAL);
7151 
7152  __ bind(&skip_to_incremental_compacting);
7153  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7154 
7155  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7156  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7157  masm->set_byte_at(0, kTwoByteNopInstruction);
7158  masm->set_byte_at(2, kFiveByteNopInstruction);
7159 }
7160 
7161 
7162 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7163  regs_.Save(masm);
7164 
7165  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7166  Label dont_need_remembered_set;
7167 
7168  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7169  __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7170  regs_.scratch0(),
7171  &dont_need_remembered_set);
7172 
7173  __ CheckPageFlag(regs_.object(),
7174  regs_.scratch0(),
7176  not_zero,
7177  &dont_need_remembered_set);
7178 
7179  // First notify the incremental marker if necessary, then update the
7180  // remembered set.
7181  CheckNeedsToInformIncrementalMarker(
7182  masm,
7183  kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
7184  mode);
7185  InformIncrementalMarker(masm, mode);
7186  regs_.Restore(masm);
7187  __ RememberedSetHelper(object_,
7188  address_,
7189  value_,
7190  save_fp_regs_mode_,
7192 
7193  __ bind(&dont_need_remembered_set);
7194  }
7195 
7196  CheckNeedsToInformIncrementalMarker(
7197  masm,
7198  kReturnOnNoNeedToInformIncrementalMarker,
7199  mode);
7200  InformIncrementalMarker(masm, mode);
7201  regs_.Restore(masm);
7202  __ ret(0);
7203 }
7204 
7205 
7206 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7207  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7208  int argument_count = 3;
7209  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7210  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
7211  if (mode == INCREMENTAL_COMPACTION) {
7212  __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
7213  } else {
7214  ASSERT(mode == INCREMENTAL);
7215  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7216  __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
7217  }
7218  __ mov(Operand(esp, 2 * kPointerSize),
7219  Immediate(ExternalReference::isolate_address()));
7220 
7221  AllowExternalCallThatCantCauseGC scope(masm);
7222  if (mode == INCREMENTAL_COMPACTION) {
7223  __ CallCFunction(
7224  ExternalReference::incremental_evacuation_record_write_function(
7225  masm->isolate()),
7226  argument_count);
7227  } else {
7228  ASSERT(mode == INCREMENTAL);
7229  __ CallCFunction(
7230  ExternalReference::incremental_marking_record_write_function(
7231  masm->isolate()),
7232  argument_count);
7233  }
7234  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7235 }
7236 
7237 
7238 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7239  MacroAssembler* masm,
7240  OnNoNeedToInformIncrementalMarker on_no_need,
7241  Mode mode) {
7242  Label object_is_black, need_incremental, need_incremental_pop_object;
7243 
7244  // Let's look at the color of the object: If it is not black we don't have
7245  // to inform the incremental marker.
7246  __ JumpIfBlack(regs_.object(),
7247  regs_.scratch0(),
7248  regs_.scratch1(),
7249  &object_is_black,
7250  Label::kNear);
7251 
7252  regs_.Restore(masm);
7253  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7254  __ RememberedSetHelper(object_,
7255  address_,
7256  value_,
7257  save_fp_regs_mode_,
7259  } else {
7260  __ ret(0);
7261  }
7262 
7263  __ bind(&object_is_black);
7264 
7265  // Get the value from the slot.
7266  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7267 
7268  if (mode == INCREMENTAL_COMPACTION) {
7269  Label ensure_not_white;
7270 
7271  __ CheckPageFlag(regs_.scratch0(), // Contains value.
7272  regs_.scratch1(), // Scratch.
7274  zero,
7275  &ensure_not_white,
7276  Label::kNear);
7277 
7278  __ CheckPageFlag(regs_.object(),
7279  regs_.scratch1(), // Scratch.
7281  not_zero,
7282  &ensure_not_white,
7283  Label::kNear);
7284 
7285  __ jmp(&need_incremental);
7286 
7287  __ bind(&ensure_not_white);
7288  }
7289 
7290  // We need an extra register for this, so we push the object register
7291  // temporarily.
7292  __ push(regs_.object());
7293  __ EnsureNotWhite(regs_.scratch0(), // The value.
7294  regs_.scratch1(), // Scratch.
7295  regs_.object(), // Scratch.
7296  &need_incremental_pop_object,
7297  Label::kNear);
7298  __ pop(regs_.object());
7299 
7300  regs_.Restore(masm);
7301  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7302  __ RememberedSetHelper(object_,
7303  address_,
7304  value_,
7305  save_fp_regs_mode_,
7307  } else {
7308  __ ret(0);
7309  }
7310 
7311  __ bind(&need_incremental_pop_object);
7312  __ pop(regs_.object());
7313 
7314  __ bind(&need_incremental);
7315 
7316  // Fall through when we need to inform the incremental marker.
7317 }
7318 
7319 
7320 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7321  // ----------- S t a t e -------------
7322  // -- eax : element value to store
7323  // -- ebx : array literal
7324  // -- edi : map of array literal
7325  // -- ecx : element index as smi
7326  // -- edx : array literal index in function
7327  // -- esp[0] : return address
7328  // -----------------------------------
7329 
7330  Label element_done;
7331  Label double_elements;
7332  Label smi_element;
7333  Label slow_elements;
7334  Label slow_elements_from_double;
7335  Label fast_elements;
7336 
7337  __ CheckFastElements(edi, &double_elements);
7338 
7339  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
7340  __ JumpIfSmi(eax, &smi_element);
7341  __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
7342 
7343  // Store into the array literal requires a elements transition. Call into
7344  // the runtime.
7345 
7346  __ bind(&slow_elements);
7347  __ pop(edi); // Pop return address and remember to put back later for tail
7348  // call.
7349  __ push(ebx);
7350  __ push(ecx);
7351  __ push(eax);
7354  __ push(edx);
7355  __ push(edi); // Return return address so that tail call returns to right
7356  // place.
7357  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7358 
7359  __ bind(&slow_elements_from_double);
7360  __ pop(edx);
7361  __ jmp(&slow_elements);
7362 
7363  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
7364  __ bind(&fast_elements);
7368  __ mov(Operand(ecx, 0), eax);
7369  // Update the write barrier for the array store.
7370  __ RecordWrite(ebx, ecx, eax,
7373  OMIT_SMI_CHECK);
7374  __ ret(0);
7375 
7376  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
7377  // and value is Smi.
7378  __ bind(&smi_element);
7382  __ ret(0);
7383 
7384  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
7385  __ bind(&double_elements);
7386 
7387  __ push(edx);
7389  __ StoreNumberToDoubleElements(eax,
7390  edx,
7391  ecx,
7392  edi,
7393  xmm0,
7394  &slow_elements_from_double,
7395  false);
7396  __ pop(edx);
7397  __ ret(0);
7398 }
7399 
7400 #undef __
7401 
7402 } } // namespace v8::internal
7403 
7404 #endif // V8_TARGET_ARCH_IA32
static const int kResourceDataOffset
Definition: objects.h:7517
void GenerateFast(MacroAssembler *masm)
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kBitFieldOffset
Definition: objects.h:4994
void GenerateFast(MacroAssembler *masm)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
const intptr_t kSmiTagMask
Definition: v8.h:3855
static const int kCodeOffset
Definition: objects.h:5606
static const int kEvacuationCandidateMask
Definition: spaces.h:407
#define CHECK_EQ(expected, value)
Definition: checks.h:219
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
static const int kCodeEntryOffset
Definition: objects.h:5981
static const int kMaxAsciiCharCode
Definition: objects.h:7107
static const int kPrototypeOrInitialMapOffset
Definition: objects.h:5982
#define COMPARE(asm_, compare_string)
static int SlotOffset(int index)
Definition: contexts.h:408
RecordWriteStub(Register object, Register value, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode)
static const int kStaticOffsetsVectorSize
Definition: jsregexp.h:1649
static const int kArgumentsObjectSize
Definition: heap.h:863
const XMMRegister xmm4
static void GenerateFixedRegStubsAheadOfTime()
const uint32_t kTwoByteStringTag
Definition: objects.h:450
const int kFailureTypeTagSize
Definition: objects.h:1037
static const uint32_t kExponentMask
Definition: objects.h:1317
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2220
static Failure * InternalError()
Definition: objects-inl.h:1011
static void GenerateCopyCharacters(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
static void PerformGC(Object *result)
Definition: runtime.cc:13582
static const char * Name(Value tok)
Definition: token.h:196
static Smi * FromInt(int value)
Definition: objects-inl.h:973
void Generate(MacroAssembler *masm)
static const byte kTwoByteNopInstruction
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
const Register esp
static const int kDataOffset
Definition: objects.h:6432
static const int kGlobalReceiverOffset
Definition: objects.h:6085
void Generate(MacroAssembler *masm)
static Failure * OutOfMemoryException()
Definition: objects-inl.h:1021
static const int kEmptyHashField
Definition: objects.h:7159
static void GenerateHashGetHash(MacroAssembler *masm, Register hash)
static const int kExponentBias
Definition: objects.h:1321
int int32_t
Definition: unicode.cc:47
static Handle< Object > UninitializedSentinel(Isolate *isolate)
Definition: objects-inl.h:5052
static bool IsSupported(CpuFeature f)
static Failure * Exception()
Definition: objects-inl.h:1016
void Generate(MacroAssembler *masm)
static bool enabled()
Definition: serialize.h:480
virtual bool IsPregenerated()
void Generate(MacroAssembler *masm)
#define ASSERT(condition)
Definition: checks.h:270
const int kPointerSizeLog2
Definition: globals.h:246
static const int kInstanceSizeOffset
Definition: objects.h:4981
static void GenerateCompareFlatAsciiStrings(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3, Register scratch4)
static Handle< Object > MegamorphicSentinel(Isolate *isolate)
Definition: objects-inl.h:5057
static const char * GetName(TypeInfo type_info)
Definition: ic.cc:2285
const uint32_t kStringRepresentationMask
Definition: objects.h:455
static void GenerateOperation(MacroAssembler *masm, TranscendentalCache::Type type)
MemOperand GlobalObjectOperand()
static const int kSize
Definition: objects.h:8134
static const int kGlobalContextOffset
Definition: objects.h:6084
MemOperand ContextOperand(Register context, int index)
static const int kContextOffset
Definition: objects.h:5986
const uint32_t kAsciiDataHintTag
Definition: objects.h:479
const uint32_t kShortExternalStringMask
Definition: objects.h:483
static void GenerateNegativeLookup(MacroAssembler *masm, Label *miss, Label *done, Register receiver, Register properties, Handle< String > name, Register scratch0)
static const int kLastSubjectOffset
Definition: jsregexp.h:152
const int kIntSize
Definition: globals.h:231
unsigned int seed
Definition: test-strings.cc:17
static const int kZeroHash
Definition: objects.h:6816
const Register edi
void Generate(MacroAssembler *masm)
static const int kHashFieldOffset
Definition: objects.h:7099
static const int kSize
Definition: objects.h:8112
static const int kLastCaptureCountOffset
Definition: jsregexp.h:150
static const int kFirstOffset
Definition: objects.h:7420
static const int kMinLength
Definition: objects.h:7433
const uint32_t kNotStringTag
Definition: objects.h:438
static const int kParentOffset
Definition: objects.h:7473
static const int kNonMantissaBitsInTopWord
Definition: objects.h:1324
static const int kLiteralsOffset
Definition: objects.h:5987
const Register ebp
#define UNREACHABLE()
Definition: checks.h:50
static const int kArgumentsObjectSizeStrict
Definition: heap.h:866
static void GenerateCopyCharactersREP(MacroAssembler *masm, Register dest, Register src, Register count, Register scratch, bool ascii)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static const int kLengthOffset
Definition: objects.h:7098
const uint32_t kIsSymbolMask
Definition: objects.h:443
static const int kExponentShift
Definition: objects.h:1322
const intptr_t kFailureTagMask
Definition: v8globals.h:73
const Register eax
static const int kValueOffset
Definition: objects.h:1307
const int kFailureTagSize
Definition: v8globals.h:72
static void GenerateFlatAsciiStringEquals(MacroAssembler *masm, Register left, Register right, Register scratch1, Register scratch2, Register scratch3)
const int kDoubleSize
Definition: globals.h:232
static const int kIrregexpCaptureCountOffset
Definition: objects.h:6478
static const int kInputOffset
Definition: objects.h:8133
static bool IsBitOp(Value op)
Definition: token.h:256
const XMMRegister xmm1
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
void Generate(MacroAssembler *masm)
const int kPointerSize
Definition: globals.h:234
static const int kStringWrapperSafeForDefaultValueOf
Definition: objects.h:5011
Operand FieldOperand(Register object, int offset)
const Register ecx
const int kHeapObjectTag
Definition: v8.h:3848
const uint32_t kAsciiDataHintMask
Definition: objects.h:478
#define __
static const byte kFiveByteNopInstruction
void Generate(MacroAssembler *masm)
static const int kPropertiesOffset
Definition: objects.h:2113
static const int kMinLength
Definition: objects.h:7485
const uint32_t kShortExternalStringTag
Definition: objects.h:484
static void GenerateHashAddCharacter(MacroAssembler *masm, Register hash, Register character)
static void Generate(MacroAssembler *masm, Register string, Register index, Register result, Label *call_runtime)
static const int kHeaderSize
Definition: objects.h:7282
static const int kNextFunctionLinkOffset
Definition: objects.h:5989
const int kBitsPerByte
Definition: globals.h:251
static int SizeFor(int length)
Definition: objects.h:2369
const Register r0
static const int kElementsOffset
Definition: objects.h:2114
bool IsPowerOf2(T x)
Definition: utils.h:50
const uint32_t kStringTag
Definition: objects.h:437
const uint32_t kQuietNaNHighBitsMask
Definition: v8globals.h:109
static bool IsEqualityOp(Value op)
Definition: token.h:222
static const int kOffsetOffset
Definition: objects.h:7474
void Generate(MacroAssembler *masm)
static const int kLengthOffset
Definition: objects.h:8111
static int SizeFor(int length)
Definition: objects.h:2288
void GenerateSlow(MacroAssembler *masm, const RuntimeCallHelper &call_helper)
virtual void Generate(MacroAssembler *masm)
static const int kLastMatchOverhead
Definition: jsregexp.h:147
const XMMRegister xmm3
static const int kHeaderSize
Definition: objects.h:2233
void Generate(MacroAssembler *masm)
#define ISOLATE
Definition: isolate.h:1410
static const int kMapOffset
Definition: objects.h:1219
bool is(Register reg) const
static const int kSkipEvacuationSlotsRecordingMask
Definition: spaces.h:410
const uint32_t kIsNotStringMask
Definition: objects.h:436
const Register r1
const uint32_t kSlicedNotConsMask
Definition: objects.h:473
static const int kLengthOffset
Definition: objects.h:2232
const Register ebx
void Generate(MacroAssembler *masm)
void Generate(MacroAssembler *masm)
static const int kSecondOffset
Definition: objects.h:7421
static void GeneratePositiveLookup(MacroAssembler *masm, Label *miss, Label *done, Register elements, Register name, Register r0, Register r1)
static const int kArgumentsLengthIndex
Definition: heap.h:869
static const int kFirstCaptureOffset
Definition: jsregexp.h:156
#define UNIMPLEMENTED()
Definition: checks.h:48
static const uint32_t kHashBitMask
Definition: objects.h:7125
static const uint32_t kSignMask
Definition: objects.h:1316
static const int kLastInputOffset
Definition: jsregexp.h:154
const int kSmiShiftSize
Definition: v8.h:3899
const int kSmiTagSize
Definition: v8.h:3854
static const int kHeaderSize
Definition: objects.h:4513
void GenerateBody(MacroAssembler *masm, bool is_construct)
static const int kDataAsciiCodeOffset
Definition: objects.h:6474
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
const Register esi
static XMMRegister from_code(int code)
static void GenerateAheadOfTime()
static const uint32_t kMantissaMask
Definition: objects.h:1318
static const int kArgumentsCalleeIndex
Definition: heap.h:871
const int kSmiTag
Definition: v8.h:3853
static const int kIsUndetectable
Definition: objects.h:5005
static const int kHeaderSize
Definition: objects.h:2115
void Generate(MacroAssembler *masm)
void GenerateFast(MacroAssembler *masm)
const int kFailureTag
Definition: v8globals.h:71
static void GenerateLookupNumberStringCache(MacroAssembler *masm, Register object, Register result, Register scratch1, Register scratch2, Register scratch3, bool object_is_smi, Label *not_found)
#define FACTORY
Definition: isolate.h:1409
static const int kDataTagOffset
Definition: objects.h:6472
static const int kPrototypeOffset
Definition: objects.h:4953
static const int kSize
Definition: objects.h:5990
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler *masm, Register c1, Register c2, Register scratch1, Register scratch2, Register scratch3, Register scratch4, Register scratch5, Label *not_found)
static const int kMaxLength
Definition: objects.h:7166
static const int kValueOffset
Definition: objects.h:6188
bool Contains(Type type) const
Definition: code-stubs.h:1050
const uint32_t kSymbolTag
Definition: objects.h:445
const uint32_t kAsciiStringTag
Definition: objects.h:451
static const int kConstructStubOffset
Definition: objects.h:5608
static const int kNumRegisters
static const int kHashShift
Definition: objects.h:7121
const XMMRegister xmm2
const Register edx
static const int kSharedFunctionInfoOffset
Definition: objects.h:5984
#define FUNCTION_ADDR(f)
Definition: globals.h:307
static const int kMaxValue
Definition: objects.h:1006
void Generate(MacroAssembler *masm)
static const int kBitField2Offset
Definition: objects.h:4995
void Generate(MacroAssembler *masm)
CEntryStub(int result_size, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
Definition: code-stubs.h:629
void check(i::Vector< const char > string)
static const int kExponentOffset
Definition: objects.h:1313
static const int kDataUC16CodeOffset
Definition: objects.h:6476
void Generate(MacroAssembler *masm)
StoreBufferOverflowStub(SaveFPRegsMode save_fp)
FlagType type() const
Definition: flags.cc:1358
static void GenerateHashInit(MacroAssembler *masm, Register hash, Register character)
static bool IsOrderedRelationalCompareOp(Value op)
Definition: token.h:218
const uint32_t kStringEncodingMask
Definition: objects.h:449
static const int kInstanceTypeOffset
Definition: objects.h:4992
static const int kIndexOffset
Definition: objects.h:8132
static const int kMantissaOffset
Definition: objects.h:1312
void Generate(MacroAssembler *masm)
const XMMRegister xmm0
void Generate(MacroAssembler *masm)