v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "assembler-x64.h"
35 #include "macro-assembler-x64.h"
36 #include "serialize.h"
37 #include "debug.h"
38 #include "heap.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
44  : Assembler(arg_isolate, buffer, size),
45  generating_stub_(false),
46  allow_stub_calls_(true),
47  has_frame_(false),
48  root_array_available_(true) {
49  if (isolate() != NULL) {
50  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51  isolate());
52  }
53 }
54 
55 
56 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
57  Address roots_register_value = kRootRegisterBias +
58  reinterpret_cast<Address>(isolate->heap()->roots_array_start());
59  intptr_t delta = other.address() - roots_register_value;
60  return delta;
61 }
62 
63 
64 Operand MacroAssembler::ExternalOperand(ExternalReference target,
65  Register scratch) {
66  if (root_array_available_ && !Serializer::enabled()) {
67  intptr_t delta = RootRegisterDelta(target, isolate());
68  if (is_int32(delta)) {
69  Serializer::TooLateToEnableNow();
70  return Operand(kRootRegister, static_cast<int32_t>(delta));
71  }
72  }
73  movq(scratch, target);
74  return Operand(scratch, 0);
75 }
76 
77 
78 void MacroAssembler::Load(Register destination, ExternalReference source) {
79  if (root_array_available_ && !Serializer::enabled()) {
80  intptr_t delta = RootRegisterDelta(source, isolate());
81  if (is_int32(delta)) {
82  Serializer::TooLateToEnableNow();
83  movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
84  return;
85  }
86  }
87  // Safe code.
88  if (destination.is(rax)) {
89  load_rax(source);
90  } else {
91  movq(kScratchRegister, source);
92  movq(destination, Operand(kScratchRegister, 0));
93  }
94 }
95 
96 
97 void MacroAssembler::Store(ExternalReference destination, Register source) {
98  if (root_array_available_ && !Serializer::enabled()) {
99  intptr_t delta = RootRegisterDelta(destination, isolate());
100  if (is_int32(delta)) {
101  Serializer::TooLateToEnableNow();
102  movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
103  return;
104  }
105  }
106  // Safe code.
107  if (source.is(rax)) {
108  store_rax(destination);
109  } else {
110  movq(kScratchRegister, destination);
111  movq(Operand(kScratchRegister, 0), source);
112  }
113 }
114 
115 
116 void MacroAssembler::LoadAddress(Register destination,
117  ExternalReference source) {
118  if (root_array_available_ && !Serializer::enabled()) {
119  intptr_t delta = RootRegisterDelta(source, isolate());
120  if (is_int32(delta)) {
121  Serializer::TooLateToEnableNow();
122  lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
123  return;
124  }
125  }
126  // Safe code.
127  movq(destination, source);
128 }
129 
130 
131 int MacroAssembler::LoadAddressSize(ExternalReference source) {
132  if (root_array_available_ && !Serializer::enabled()) {
133  // This calculation depends on the internals of LoadAddress.
134  // It's correctness is ensured by the asserts in the Call
135  // instruction below.
136  intptr_t delta = RootRegisterDelta(source, isolate());
137  if (is_int32(delta)) {
138  Serializer::TooLateToEnableNow();
139  // Operand is lea(scratch, Operand(kRootRegister, delta));
140  // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
141  int size = 4;
142  if (!is_int8(static_cast<int32_t>(delta))) {
143  size += 3; // Need full four-byte displacement in lea.
144  }
145  return size;
146  }
147  }
148  // Size of movq(destination, src);
149  return 10;
150 }
151 
152 
153 void MacroAssembler::PushAddress(ExternalReference source) {
154  int64_t address = reinterpret_cast<int64_t>(source.address());
155  if (is_int32(address) && !Serializer::enabled()) {
156  if (emit_debug_code()) {
157  movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
158  }
159  push(Immediate(static_cast<int32_t>(address)));
160  return;
161  }
162  LoadAddress(kScratchRegister, source);
163  push(kScratchRegister);
164 }
165 
166 
167 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
168  ASSERT(root_array_available_);
169  movq(destination, Operand(kRootRegister,
170  (index << kPointerSizeLog2) - kRootRegisterBias));
171 }
172 
173 
174 void MacroAssembler::LoadRootIndexed(Register destination,
175  Register variable_offset,
176  int fixed_offset) {
177  ASSERT(root_array_available_);
178  movq(destination,
179  Operand(kRootRegister,
180  variable_offset, times_pointer_size,
181  (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
182 }
183 
184 
185 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
186  ASSERT(root_array_available_);
187  movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
188  source);
189 }
190 
191 
192 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
193  ASSERT(root_array_available_);
194  push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
195 }
196 
197 
198 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
199  ASSERT(root_array_available_);
200  cmpq(with, Operand(kRootRegister,
201  (index << kPointerSizeLog2) - kRootRegisterBias));
202 }
203 
204 
205 void MacroAssembler::CompareRoot(const Operand& with,
206  Heap::RootListIndex index) {
207  ASSERT(root_array_available_);
208  ASSERT(!with.AddressUsesRegister(kScratchRegister));
209  LoadRoot(kScratchRegister, index);
210  cmpq(with, kScratchRegister);
211 }
212 
213 
214 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
215  Register addr,
216  Register scratch,
217  SaveFPRegsMode save_fp,
218  RememberedSetFinalAction and_then) {
219  if (FLAG_debug_code) {
220  Label ok;
221  JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
222  int3();
223  bind(&ok);
224  }
225  // Load store buffer top.
226  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
227  // Store pointer to buffer.
228  movq(Operand(scratch, 0), addr);
229  // Increment buffer top.
230  addq(scratch, Immediate(kPointerSize));
231  // Write back new top of buffer.
232  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
233  // Call stub on end of buffer.
234  Label done;
235  // Check for end of buffer.
236  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
237  if (and_then == kReturnAtEnd) {
238  Label buffer_overflowed;
239  j(not_equal, &buffer_overflowed, Label::kNear);
240  ret(0);
241  bind(&buffer_overflowed);
242  } else {
243  ASSERT(and_then == kFallThroughAtEnd);
244  j(equal, &done, Label::kNear);
245  }
246  StoreBufferOverflowStub store_buffer_overflow =
247  StoreBufferOverflowStub(save_fp);
248  CallStub(&store_buffer_overflow);
249  if (and_then == kReturnAtEnd) {
250  ret(0);
251  } else {
252  ASSERT(and_then == kFallThroughAtEnd);
253  bind(&done);
254  }
255 }
256 
257 
258 void MacroAssembler::InNewSpace(Register object,
259  Register scratch,
260  Condition cc,
261  Label* branch,
262  Label::Distance distance) {
263  if (Serializer::enabled()) {
264  // Can't do arithmetic on external references if it might get serialized.
265  // The mask isn't really an address. We load it as an external reference in
266  // case the size of the new space is different between the snapshot maker
267  // and the running system.
268  if (scratch.is(object)) {
269  movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
270  and_(scratch, kScratchRegister);
271  } else {
272  movq(scratch, ExternalReference::new_space_mask(isolate()));
273  and_(scratch, object);
274  }
275  movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
276  cmpq(scratch, kScratchRegister);
277  j(cc, branch, distance);
278  } else {
279  ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
280  intptr_t new_space_start =
281  reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
282  movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
283  if (scratch.is(object)) {
284  addq(scratch, kScratchRegister);
285  } else {
286  lea(scratch, Operand(object, kScratchRegister, times_1, 0));
287  }
288  and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
289  j(cc, branch, distance);
290  }
291 }
292 
293 
294 void MacroAssembler::RecordWriteField(
295  Register object,
296  int offset,
297  Register value,
298  Register dst,
299  SaveFPRegsMode save_fp,
300  RememberedSetAction remembered_set_action,
301  SmiCheck smi_check) {
302  // The compiled code assumes that record write doesn't change the
303  // context register, so we check that none of the clobbered
304  // registers are rsi.
305  ASSERT(!value.is(rsi) && !dst.is(rsi));
306 
307  // First, check if a write barrier is even needed. The tests below
308  // catch stores of Smis.
309  Label done;
310 
311  // Skip barrier if writing a smi.
312  if (smi_check == INLINE_SMI_CHECK) {
313  JumpIfSmi(value, &done);
314  }
315 
316  // Although the object register is tagged, the offset is relative to the start
317  // of the object, so so offset must be a multiple of kPointerSize.
318  ASSERT(IsAligned(offset, kPointerSize));
319 
320  lea(dst, FieldOperand(object, offset));
321  if (emit_debug_code()) {
322  Label ok;
323  testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
324  j(zero, &ok, Label::kNear);
325  int3();
326  bind(&ok);
327  }
328 
329  RecordWrite(
330  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
331 
332  bind(&done);
333 
334  // Clobber clobbered input registers when running with the debug-code flag
335  // turned on to provoke errors.
336  if (emit_debug_code()) {
337  movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
338  movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
339  }
340 }
341 
342 
343 void MacroAssembler::RecordWriteArray(Register object,
344  Register value,
345  Register index,
346  SaveFPRegsMode save_fp,
347  RememberedSetAction remembered_set_action,
348  SmiCheck smi_check) {
349  // First, check if a write barrier is even needed. The tests below
350  // catch stores of Smis.
351  Label done;
352 
353  // Skip barrier if writing a smi.
354  if (smi_check == INLINE_SMI_CHECK) {
355  JumpIfSmi(value, &done);
356  }
357 
358  // Array access: calculate the destination address. Index is not a smi.
359  Register dst = index;
360  lea(dst, Operand(object, index, times_pointer_size,
361  FixedArray::kHeaderSize - kHeapObjectTag));
362 
363  RecordWrite(
364  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
365 
366  bind(&done);
367 
368  // Clobber clobbered input registers when running with the debug-code flag
369  // turned on to provoke errors.
370  if (emit_debug_code()) {
371  movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
372  movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
373  }
374 }
375 
376 
377 void MacroAssembler::RecordWrite(Register object,
378  Register address,
379  Register value,
380  SaveFPRegsMode fp_mode,
381  RememberedSetAction remembered_set_action,
382  SmiCheck smi_check) {
383  // The compiled code assumes that record write doesn't change the
384  // context register, so we check that none of the clobbered
385  // registers are rsi.
386  ASSERT(!value.is(rsi) && !address.is(rsi));
387 
388  ASSERT(!object.is(value));
389  ASSERT(!object.is(address));
390  ASSERT(!value.is(address));
391  if (emit_debug_code()) {
392  AbortIfSmi(object);
393  }
394 
395  if (remembered_set_action == OMIT_REMEMBERED_SET &&
396  !FLAG_incremental_marking) {
397  return;
398  }
399 
400  if (FLAG_debug_code) {
401  Label ok;
402  cmpq(value, Operand(address, 0));
403  j(equal, &ok, Label::kNear);
404  int3();
405  bind(&ok);
406  }
407 
408  // First, check if a write barrier is even needed. The tests below
409  // catch stores of smis and stores into the young generation.
410  Label done;
411 
412  if (smi_check == INLINE_SMI_CHECK) {
413  // Skip barrier if writing a smi.
414  JumpIfSmi(value, &done);
415  }
416 
417  CheckPageFlag(value,
418  value, // Used as scratch.
419  MemoryChunk::kPointersToHereAreInterestingMask,
420  zero,
421  &done,
422  Label::kNear);
423 
424  CheckPageFlag(object,
425  value, // Used as scratch.
426  MemoryChunk::kPointersFromHereAreInterestingMask,
427  zero,
428  &done,
429  Label::kNear);
430 
431  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
432  CallStub(&stub);
433 
434  bind(&done);
435 
436  // Clobber clobbered registers when running with the debug-code flag
437  // turned on to provoke errors.
438  if (emit_debug_code()) {
439  movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
440  movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
441  }
442 }
443 
444 
445 void MacroAssembler::Assert(Condition cc, const char* msg) {
446  if (emit_debug_code()) Check(cc, msg);
447 }
448 
449 
450 void MacroAssembler::AssertFastElements(Register elements) {
451  if (emit_debug_code()) {
452  Label ok;
453  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
454  Heap::kFixedArrayMapRootIndex);
455  j(equal, &ok, Label::kNear);
456  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
457  Heap::kFixedDoubleArrayMapRootIndex);
458  j(equal, &ok, Label::kNear);
459  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
460  Heap::kFixedCOWArrayMapRootIndex);
461  j(equal, &ok, Label::kNear);
462  Abort("JSObject with fast elements map has slow elements");
463  bind(&ok);
464  }
465 }
466 
467 
468 void MacroAssembler::Check(Condition cc, const char* msg) {
469  Label L;
470  j(cc, &L, Label::kNear);
471  Abort(msg);
472  // Control will not return here.
473  bind(&L);
474 }
475 
476 
477 void MacroAssembler::CheckStackAlignment() {
478  int frame_alignment = OS::ActivationFrameAlignment();
479  int frame_alignment_mask = frame_alignment - 1;
480  if (frame_alignment > kPointerSize) {
481  ASSERT(IsPowerOf2(frame_alignment));
482  Label alignment_as_expected;
483  testq(rsp, Immediate(frame_alignment_mask));
484  j(zero, &alignment_as_expected, Label::kNear);
485  // Abort if stack is not aligned.
486  int3();
487  bind(&alignment_as_expected);
488  }
489 }
490 
491 
492 void MacroAssembler::NegativeZeroTest(Register result,
493  Register op,
494  Label* then_label) {
495  Label ok;
496  testl(result, result);
497  j(not_zero, &ok, Label::kNear);
498  testl(op, op);
499  j(sign, then_label);
500  bind(&ok);
501 }
502 
503 
504 void MacroAssembler::Abort(const char* msg) {
505  // We want to pass the msg string like a smi to avoid GC
506  // problems, however msg is not guaranteed to be aligned
507  // properly. Instead, we pass an aligned pointer that is
508  // a proper v8 smi, but also pass the alignment difference
509  // from the real pointer as a smi.
510  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
511  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
512  // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
513  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
514 #ifdef DEBUG
515  if (msg != NULL) {
516  RecordComment("Abort message: ");
517  RecordComment(msg);
518  }
519 #endif
520  push(rax);
522  push(kScratchRegister);
523  movq(kScratchRegister,
524  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
526  push(kScratchRegister);
527 
528  if (!has_frame_) {
529  // We don't actually want to generate a pile of code for this, so just
530  // claim there is a stack frame, without generating one.
531  FrameScope scope(this, StackFrame::NONE);
532  CallRuntime(Runtime::kAbort, 2);
533  } else {
534  CallRuntime(Runtime::kAbort, 2);
535  }
536  // Control will not return here.
537  int3();
538 }
539 
540 
541 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
542  ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
543  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
544 }
545 
546 
547 void MacroAssembler::TailCallStub(CodeStub* stub) {
548  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
549  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
550 }
551 
552 
553 void MacroAssembler::StubReturn(int argc) {
554  ASSERT(argc >= 1 && generating_stub());
555  ret((argc - 1) * kPointerSize);
556 }
557 
558 
559 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
560  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
561  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
562 }
563 
564 
565 void MacroAssembler::IllegalOperation(int num_arguments) {
566  if (num_arguments > 0) {
567  addq(rsp, Immediate(num_arguments * kPointerSize));
568  }
569  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
570 }
571 
572 
573 void MacroAssembler::IndexFromHash(Register hash, Register index) {
574  // The assert checks that the constants for the maximum number of digits
575  // for an array index cached in the hash field and the number of bits
576  // reserved for it does not conflict.
577  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
578  (1 << String::kArrayIndexValueBits));
579  // We want the smi-tagged index in key. Even if we subsequently go to
580  // the slow case, converting the key to a smi is always valid.
581  // key: string key
582  // hash: key's hash field, including its array index value.
583  and_(hash, Immediate(String::kArrayIndexValueMask));
584  shr(hash, Immediate(String::kHashShift));
585  // Here we actually clobber the key which will be used if calling into
586  // runtime later. However as the new key is the numeric value of a string key
587  // there is no difference in using either key.
588  Integer32ToSmi(index, hash);
589 }
590 
591 
592 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
593  CallRuntime(Runtime::FunctionForId(id), num_arguments);
594 }
595 
596 
597 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
598  const Runtime::Function* function = Runtime::FunctionForId(id);
599  Set(rax, function->nargs);
600  LoadAddress(rbx, ExternalReference(function, isolate()));
601  CEntryStub ces(1, kSaveFPRegs);
602  CallStub(&ces);
603 }
604 
605 
606 void MacroAssembler::CallRuntime(const Runtime::Function* f,
607  int num_arguments) {
608  // If the expected number of arguments of the runtime function is
609  // constant, we check that the actual number of arguments match the
610  // expectation.
611  if (f->nargs >= 0 && f->nargs != num_arguments) {
612  IllegalOperation(num_arguments);
613  return;
614  }
615 
616  // TODO(1236192): Most runtime routines don't need the number of
617  // arguments passed in because it is constant. At some point we
618  // should remove this need and make the runtime routine entry code
619  // smarter.
620  Set(rax, num_arguments);
621  LoadAddress(rbx, ExternalReference(f, isolate()));
622  CEntryStub ces(f->result_size);
623  CallStub(&ces);
624 }
625 
626 
627 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
628  int num_arguments) {
629  Set(rax, num_arguments);
630  LoadAddress(rbx, ext);
631 
632  CEntryStub stub(1);
633  CallStub(&stub);
634 }
635 
636 
637 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
638  int num_arguments,
639  int result_size) {
640  // ----------- S t a t e -------------
641  // -- rsp[0] : return address
642  // -- rsp[8] : argument num_arguments - 1
643  // ...
644  // -- rsp[8 * num_arguments] : argument 0 (receiver)
645  // -----------------------------------
646 
647  // TODO(1236192): Most runtime routines don't need the number of
648  // arguments passed in because it is constant. At some point we
649  // should remove this need and make the runtime routine entry code
650  // smarter.
651  Set(rax, num_arguments);
652  JumpToExternalReference(ext, result_size);
653 }
654 
655 
656 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
657  int num_arguments,
658  int result_size) {
659  TailCallExternalReference(ExternalReference(fid, isolate()),
660  num_arguments,
661  result_size);
662 }
663 
664 
665 static int Offset(ExternalReference ref0, ExternalReference ref1) {
666  int64_t offset = (ref0.address() - ref1.address());
667  // Check that fits into int.
668  ASSERT(static_cast<int>(offset) == offset);
669  return static_cast<int>(offset);
670 }
671 
672 
673 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
674 #if defined(_WIN64) && !defined(__MINGW64__)
675  // We need to prepare a slot for result handle on stack and put
676  // a pointer to it into 1st arg register.
677  EnterApiExitFrame(arg_stack_space + 1);
678 
679  // rcx must be used to pass the pointer to the return value slot.
680  lea(rcx, StackSpaceOperand(arg_stack_space));
681 #else
682  EnterApiExitFrame(arg_stack_space);
683 #endif
684 }
685 
686 
687 void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
688  int stack_space) {
689  Label empty_result;
690  Label prologue;
691  Label promote_scheduled_exception;
692  Label delete_allocated_handles;
693  Label leave_exit_frame;
694  Label write_back;
695 
696  Factory* factory = isolate()->factory();
697  ExternalReference next_address =
698  ExternalReference::handle_scope_next_address();
699  const int kNextOffset = 0;
700  const int kLimitOffset = Offset(
701  ExternalReference::handle_scope_limit_address(),
702  next_address);
703  const int kLevelOffset = Offset(
704  ExternalReference::handle_scope_level_address(),
705  next_address);
706  ExternalReference scheduled_exception_address =
707  ExternalReference::scheduled_exception_address(isolate());
708 
709  // Allocate HandleScope in callee-save registers.
710  Register prev_next_address_reg = r14;
711  Register prev_limit_reg = rbx;
712  Register base_reg = r15;
713  movq(base_reg, next_address);
714  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
715  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
716  addl(Operand(base_reg, kLevelOffset), Immediate(1));
717  // Call the api function!
718  movq(rax, reinterpret_cast<int64_t>(function_address),
720  call(rax);
721 
722 #if defined(_WIN64) && !defined(__MINGW64__)
723  // rax keeps a pointer to v8::Handle, unpack it.
724  movq(rax, Operand(rax, 0));
725 #endif
726  // Check if the result handle holds 0.
727  testq(rax, rax);
728  j(zero, &empty_result);
729  // It was non-zero. Dereference to get the result value.
730  movq(rax, Operand(rax, 0));
731  bind(&prologue);
732 
733  // No more valid handles (the result handle was the last one). Restore
734  // previous handle scope.
735  subl(Operand(base_reg, kLevelOffset), Immediate(1));
736  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
737  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
738  j(not_equal, &delete_allocated_handles);
739  bind(&leave_exit_frame);
740 
741  // Check if the function scheduled an exception.
742  movq(rsi, scheduled_exception_address);
743  Cmp(Operand(rsi, 0), factory->the_hole_value());
744  j(not_equal, &promote_scheduled_exception);
745 
746  LeaveApiExitFrame();
747  ret(stack_space * kPointerSize);
748 
749  bind(&promote_scheduled_exception);
750  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
751 
752  bind(&empty_result);
753  // It was zero; the result is undefined.
754  Move(rax, factory->undefined_value());
755  jmp(&prologue);
756 
757  // HandleScope limit has changed. Delete allocated extensions.
758  bind(&delete_allocated_handles);
759  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
760  movq(prev_limit_reg, rax);
761 #ifdef _WIN64
762  LoadAddress(rcx, ExternalReference::isolate_address());
763 #else
764  LoadAddress(rdi, ExternalReference::isolate_address());
765 #endif
766  LoadAddress(rax,
767  ExternalReference::delete_handle_scope_extensions(isolate()));
768  call(rax);
769  movq(rax, prev_limit_reg);
770  jmp(&leave_exit_frame);
771 }
772 
773 
774 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
775  int result_size) {
776  // Set the entry point and jump to the C entry runtime stub.
777  LoadAddress(rbx, ext);
778  CEntryStub ces(result_size);
779  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
780 }
781 
782 
783 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
785  const CallWrapper& call_wrapper) {
786  // You can't call a builtin without a valid frame.
787  ASSERT(flag == JUMP_FUNCTION || has_frame());
788 
789  // Rely on the assertion to check that the number of provided
790  // arguments match the expected number of arguments. Fake a
791  // parameter count to avoid emitting code to do the check.
792  ParameterCount expected(0);
793  GetBuiltinEntry(rdx, id);
794  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
795 }
796 
797 
798 void MacroAssembler::GetBuiltinFunction(Register target,
799  Builtins::JavaScript id) {
800  // Load the builtins object into target register.
801  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
802  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
803  movq(target, FieldOperand(target,
804  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
805 }
806 
807 
808 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
809  ASSERT(!target.is(rdi));
810  // Load the JavaScript builtin function from the builtins object.
811  GetBuiltinFunction(rdi, id);
812  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
813 }
814 
815 
816 #define REG(Name) { kRegister_ ## Name ## _Code }
817 
818 static const Register saved_regs[] = {
819  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
820  REG(r9), REG(r10), REG(r11)
821 };
822 
823 #undef REG
824 
825 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
826 
827 
828 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
829  Register exclusion1,
830  Register exclusion2,
831  Register exclusion3) {
832  // We don't allow a GC during a store buffer overflow so there is no need to
833  // store the registers in any particular way, but we do have to store and
834  // restore them.
835  for (int i = 0; i < kNumberOfSavedRegs; i++) {
836  Register reg = saved_regs[i];
837  if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
838  push(reg);
839  }
840  }
841  // R12 to r15 are callee save on all platforms.
842  if (fp_mode == kSaveFPRegs) {
843  CpuFeatures::Scope scope(SSE2);
844  subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
845  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
846  XMMRegister reg = XMMRegister::from_code(i);
847  movsd(Operand(rsp, i * kDoubleSize), reg);
848  }
849  }
850 }
851 
852 
853 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
854  Register exclusion1,
855  Register exclusion2,
856  Register exclusion3) {
857  if (fp_mode == kSaveFPRegs) {
858  CpuFeatures::Scope scope(SSE2);
859  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
860  XMMRegister reg = XMMRegister::from_code(i);
861  movsd(reg, Operand(rsp, i * kDoubleSize));
862  }
863  addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
864  }
865  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
866  Register reg = saved_regs[i];
867  if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
868  pop(reg);
869  }
870  }
871 }
872 
873 
874 void MacroAssembler::Set(Register dst, int64_t x) {
875  if (x == 0) {
876  xorl(dst, dst);
877  } else if (is_uint32(x)) {
878  movl(dst, Immediate(static_cast<uint32_t>(x)));
879  } else if (is_int32(x)) {
880  movq(dst, Immediate(static_cast<int32_t>(x)));
881  } else {
882  movq(dst, x, RelocInfo::NONE);
883  }
884 }
885 
886 void MacroAssembler::Set(const Operand& dst, int64_t x) {
887  if (is_int32(x)) {
888  movq(dst, Immediate(static_cast<int32_t>(x)));
889  } else {
890  Set(kScratchRegister, x);
891  movq(dst, kScratchRegister);
892  }
893 }
894 
895 // ----------------------------------------------------------------------------
896 // Smi tagging, untagging and tag detection.
897 
898 Register MacroAssembler::GetSmiConstant(Smi* source) {
899  int value = source->value();
900  if (value == 0) {
902  return kScratchRegister;
903  }
904  if (value == 1) {
905  return kSmiConstantRegister;
906  }
907  LoadSmiConstant(kScratchRegister, source);
908  return kScratchRegister;
909 }
910 
911 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
912  if (emit_debug_code()) {
913  movq(dst,
914  reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
916  cmpq(dst, kSmiConstantRegister);
917  if (allow_stub_calls()) {
918  Assert(equal, "Uninitialized kSmiConstantRegister");
919  } else {
920  Label ok;
921  j(equal, &ok, Label::kNear);
922  int3();
923  bind(&ok);
924  }
925  }
926  int value = source->value();
927  if (value == 0) {
928  xorl(dst, dst);
929  return;
930  }
931  bool negative = value < 0;
932  unsigned int uvalue = negative ? -value : value;
933 
934  switch (uvalue) {
935  case 9:
936  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
937  break;
938  case 8:
939  xorl(dst, dst);
940  lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
941  break;
942  case 4:
943  xorl(dst, dst);
944  lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
945  break;
946  case 5:
947  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
948  break;
949  case 3:
950  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
951  break;
952  case 2:
953  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
954  break;
955  case 1:
956  movq(dst, kSmiConstantRegister);
957  break;
958  case 0:
959  UNREACHABLE();
960  return;
961  default:
962  movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
963  return;
964  }
965  if (negative) {
966  neg(dst);
967  }
968 }
969 
970 
971 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
972  STATIC_ASSERT(kSmiTag == 0);
973  if (!dst.is(src)) {
974  movl(dst, src);
975  }
976  shl(dst, Immediate(kSmiShift));
977 }
978 
979 
980 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
981  if (emit_debug_code()) {
982  testb(dst, Immediate(0x01));
983  Label ok;
984  j(zero, &ok, Label::kNear);
985  if (allow_stub_calls()) {
986  Abort("Integer32ToSmiField writing to non-smi location");
987  } else {
988  int3();
989  }
990  bind(&ok);
991  }
992  ASSERT(kSmiShift % kBitsPerByte == 0);
993  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
994 }
995 
996 
997 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
998  Register src,
999  int constant) {
1000  if (dst.is(src)) {
1001  addl(dst, Immediate(constant));
1002  } else {
1003  leal(dst, Operand(src, constant));
1004  }
1005  shl(dst, Immediate(kSmiShift));
1006 }
1007 
1008 
1009 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1010  STATIC_ASSERT(kSmiTag == 0);
1011  if (!dst.is(src)) {
1012  movq(dst, src);
1013  }
1014  shr(dst, Immediate(kSmiShift));
1015 }
1016 
1017 
1018 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1019  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1020 }
1021 
1022 
1023 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1024  STATIC_ASSERT(kSmiTag == 0);
1025  if (!dst.is(src)) {
1026  movq(dst, src);
1027  }
1028  sar(dst, Immediate(kSmiShift));
1029 }
1030 
1031 
1032 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1033  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1034 }
1035 
1036 
1037 void MacroAssembler::SmiTest(Register src) {
1038  testq(src, src);
1039 }
1040 
1041 
1042 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1043  if (emit_debug_code()) {
1044  AbortIfNotSmi(smi1);
1045  AbortIfNotSmi(smi2);
1046  }
1047  cmpq(smi1, smi2);
1048 }
1049 
1050 
1051 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1052  if (emit_debug_code()) {
1053  AbortIfNotSmi(dst);
1054  }
1055  Cmp(dst, src);
1056 }
1057 
1058 
1059 void MacroAssembler::Cmp(Register dst, Smi* src) {
1060  ASSERT(!dst.is(kScratchRegister));
1061  if (src->value() == 0) {
1062  testq(dst, dst);
1063  } else {
1064  Register constant_reg = GetSmiConstant(src);
1065  cmpq(dst, constant_reg);
1066  }
1067 }
1068 
1069 
1070 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1071  if (emit_debug_code()) {
1072  AbortIfNotSmi(dst);
1073  AbortIfNotSmi(src);
1074  }
1075  cmpq(dst, src);
1076 }
1077 
1078 
1079 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1080  if (emit_debug_code()) {
1081  AbortIfNotSmi(dst);
1082  AbortIfNotSmi(src);
1083  }
1084  cmpq(dst, src);
1085 }
1086 
1087 
1088 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1089  if (emit_debug_code()) {
1090  AbortIfNotSmi(dst);
1091  }
1092  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1093 }
1094 
1095 
1096 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1097  // The Operand cannot use the smi register.
1098  Register smi_reg = GetSmiConstant(src);
1099  ASSERT(!dst.AddressUsesRegister(smi_reg));
1100  cmpq(dst, smi_reg);
1101 }
1102 
1103 
1104 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1105  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1106 }
1107 
1108 
1109 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1110  Register src,
1111  int power) {
1112  ASSERT(power >= 0);
1113  ASSERT(power < 64);
1114  if (power == 0) {
1115  SmiToInteger64(dst, src);
1116  return;
1117  }
1118  if (!dst.is(src)) {
1119  movq(dst, src);
1120  }
1121  if (power < kSmiShift) {
1122  sar(dst, Immediate(kSmiShift - power));
1123  } else if (power > kSmiShift) {
1124  shl(dst, Immediate(power - kSmiShift));
1125  }
1126 }
1127 
1128 
1129 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1130  Register src,
1131  int power) {
1132  ASSERT((0 <= power) && (power < 32));
1133  if (dst.is(src)) {
1134  shr(dst, Immediate(power + kSmiShift));
1135  } else {
1136  UNIMPLEMENTED(); // Not used.
1137  }
1138 }
1139 
1140 
1141 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1142  Label* on_not_smis,
1143  Label::Distance near_jump) {
1144  if (dst.is(src1) || dst.is(src2)) {
1145  ASSERT(!src1.is(kScratchRegister));
1146  ASSERT(!src2.is(kScratchRegister));
1147  movq(kScratchRegister, src1);
1148  or_(kScratchRegister, src2);
1149  JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1150  movq(dst, kScratchRegister);
1151  } else {
1152  movq(dst, src1);
1153  or_(dst, src2);
1154  JumpIfNotSmi(dst, on_not_smis, near_jump);
1155  }
1156 }
1157 
1158 
1159 Condition MacroAssembler::CheckSmi(Register src) {
1160  STATIC_ASSERT(kSmiTag == 0);
1161  testb(src, Immediate(kSmiTagMask));
1162  return zero;
1163 }
1164 
1165 
1166 Condition MacroAssembler::CheckSmi(const Operand& src) {
1167  STATIC_ASSERT(kSmiTag == 0);
1168  testb(src, Immediate(kSmiTagMask));
1169  return zero;
1170 }
1171 
1172 
1173 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1174  STATIC_ASSERT(kSmiTag == 0);
1175  // Test that both bits of the mask 0x8000000000000001 are zero.
1176  movq(kScratchRegister, src);
1177  rol(kScratchRegister, Immediate(1));
1178  testb(kScratchRegister, Immediate(3));
1179  return zero;
1180 }
1181 
1182 
1183 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1184  if (first.is(second)) {
1185  return CheckSmi(first);
1186  }
1188  leal(kScratchRegister, Operand(first, second, times_1, 0));
1189  testb(kScratchRegister, Immediate(0x03));
1190  return zero;
1191 }
1192 
1193 
1194 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1195  Register second) {
1196  if (first.is(second)) {
1197  return CheckNonNegativeSmi(first);
1198  }
1199  movq(kScratchRegister, first);
1200  or_(kScratchRegister, second);
1201  rol(kScratchRegister, Immediate(1));
1202  testl(kScratchRegister, Immediate(3));
1203  return zero;
1204 }
1205 
1206 
1207 Condition MacroAssembler::CheckEitherSmi(Register first,
1208  Register second,
1209  Register scratch) {
1210  if (first.is(second)) {
1211  return CheckSmi(first);
1212  }
1213  if (scratch.is(second)) {
1214  andl(scratch, first);
1215  } else {
1216  if (!scratch.is(first)) {
1217  movl(scratch, first);
1218  }
1219  andl(scratch, second);
1220  }
1221  testb(scratch, Immediate(kSmiTagMask));
1222  return zero;
1223 }
1224 
1225 
1226 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1227  ASSERT(!src.is(kScratchRegister));
1228  // If we overflow by subtracting one, it's the minimal smi value.
1229  cmpq(src, kSmiConstantRegister);
1230  return overflow;
1231 }
1232 
1233 
1234 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1235  // A 32-bit integer value can always be converted to a smi.
1236  return always;
1237 }
1238 
1239 
1240 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1241  // An unsigned 32-bit integer value is valid as long as the high bit
1242  // is not set.
1243  testl(src, src);
1244  return positive;
1245 }
1246 
1247 
1248 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1249  if (dst.is(src)) {
1250  andl(dst, Immediate(kSmiTagMask));
1251  } else {
1252  movl(dst, Immediate(kSmiTagMask));
1253  andl(dst, src);
1254  }
1255 }
1256 
1257 
1258 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1259  if (!(src.AddressUsesRegister(dst))) {
1260  movl(dst, Immediate(kSmiTagMask));
1261  andl(dst, src);
1262  } else {
1263  movl(dst, src);
1264  andl(dst, Immediate(kSmiTagMask));
1265  }
1266 }
1267 
1268 
1269 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1270  Label* on_invalid,
1271  Label::Distance near_jump) {
1272  Condition is_valid = CheckInteger32ValidSmiValue(src);
1273  j(NegateCondition(is_valid), on_invalid, near_jump);
1274 }
1275 
1276 
1277 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1278  Label* on_invalid,
1279  Label::Distance near_jump) {
1280  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1281  j(NegateCondition(is_valid), on_invalid, near_jump);
1282 }
1283 
1284 
1285 void MacroAssembler::JumpIfSmi(Register src,
1286  Label* on_smi,
1287  Label::Distance near_jump) {
1288  Condition smi = CheckSmi(src);
1289  j(smi, on_smi, near_jump);
1290 }
1291 
1292 
1293 void MacroAssembler::JumpIfNotSmi(Register src,
1294  Label* on_not_smi,
1295  Label::Distance near_jump) {
1296  Condition smi = CheckSmi(src);
1297  j(NegateCondition(smi), on_not_smi, near_jump);
1298 }
1299 
1300 
1301 void MacroAssembler::JumpUnlessNonNegativeSmi(
1302  Register src, Label* on_not_smi_or_negative,
1303  Label::Distance near_jump) {
1304  Condition non_negative_smi = CheckNonNegativeSmi(src);
1305  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1306 }
1307 
1308 
1309 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1310  Smi* constant,
1311  Label* on_equals,
1312  Label::Distance near_jump) {
1313  SmiCompare(src, constant);
1314  j(equal, on_equals, near_jump);
1315 }
1316 
1317 
1318 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1319  Register src2,
1320  Label* on_not_both_smi,
1321  Label::Distance near_jump) {
1322  Condition both_smi = CheckBothSmi(src1, src2);
1323  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1324 }
1325 
1326 
1327 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1328  Register src2,
1329  Label* on_not_both_smi,
1330  Label::Distance near_jump) {
1331  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1332  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1333 }
1334 
1335 
1336 void MacroAssembler::SmiTryAddConstant(Register dst,
1337  Register src,
1338  Smi* constant,
1339  Label* on_not_smi_result,
1340  Label::Distance near_jump) {
1341  // Does not assume that src is a smi.
1342  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1343  STATIC_ASSERT(kSmiTag == 0);
1344  ASSERT(!dst.is(kScratchRegister));
1345  ASSERT(!src.is(kScratchRegister));
1346 
1347  JumpIfNotSmi(src, on_not_smi_result, near_jump);
1348  Register tmp = (dst.is(src) ? kScratchRegister : dst);
1349  LoadSmiConstant(tmp, constant);
1350  addq(tmp, src);
1351  j(overflow, on_not_smi_result, near_jump);
1352  if (dst.is(src)) {
1353  movq(dst, tmp);
1354  }
1355 }
1356 
1357 
1358 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1359  if (constant->value() == 0) {
1360  if (!dst.is(src)) {
1361  movq(dst, src);
1362  }
1363  return;
1364  } else if (dst.is(src)) {
1365  ASSERT(!dst.is(kScratchRegister));
1366  switch (constant->value()) {
1367  case 1:
1368  addq(dst, kSmiConstantRegister);
1369  return;
1370  case 2:
1371  lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1372  return;
1373  case 4:
1374  lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1375  return;
1376  case 8:
1377  lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1378  return;
1379  default:
1380  Register constant_reg = GetSmiConstant(constant);
1381  addq(dst, constant_reg);
1382  return;
1383  }
1384  } else {
1385  switch (constant->value()) {
1386  case 1:
1387  lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1388  return;
1389  case 2:
1390  lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1391  return;
1392  case 4:
1393  lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1394  return;
1395  case 8:
1396  lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1397  return;
1398  default:
1399  LoadSmiConstant(dst, constant);
1400  addq(dst, src);
1401  return;
1402  }
1403  }
1404 }
1405 
1406 
1407 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1408  if (constant->value() != 0) {
1409  addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1410  }
1411 }
1412 
1413 
1414 void MacroAssembler::SmiAddConstant(Register dst,
1415  Register src,
1416  Smi* constant,
1417  Label* on_not_smi_result,
1418  Label::Distance near_jump) {
1419  if (constant->value() == 0) {
1420  if (!dst.is(src)) {
1421  movq(dst, src);
1422  }
1423  } else if (dst.is(src)) {
1424  ASSERT(!dst.is(kScratchRegister));
1425 
1426  LoadSmiConstant(kScratchRegister, constant);
1427  addq(kScratchRegister, src);
1428  j(overflow, on_not_smi_result, near_jump);
1429  movq(dst, kScratchRegister);
1430  } else {
1431  LoadSmiConstant(dst, constant);
1432  addq(dst, src);
1433  j(overflow, on_not_smi_result, near_jump);
1434  }
1435 }
1436 
1437 
1438 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1439  if (constant->value() == 0) {
1440  if (!dst.is(src)) {
1441  movq(dst, src);
1442  }
1443  } else if (dst.is(src)) {
1444  ASSERT(!dst.is(kScratchRegister));
1445  Register constant_reg = GetSmiConstant(constant);
1446  subq(dst, constant_reg);
1447  } else {
1448  if (constant->value() == Smi::kMinValue) {
1449  LoadSmiConstant(dst, constant);
1450  // Adding and subtracting the min-value gives the same result, it only
1451  // differs on the overflow bit, which we don't check here.
1452  addq(dst, src);
1453  } else {
1454  // Subtract by adding the negation.
1455  LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1456  addq(dst, src);
1457  }
1458  }
1459 }
1460 
1461 
1462 void MacroAssembler::SmiSubConstant(Register dst,
1463  Register src,
1464  Smi* constant,
1465  Label* on_not_smi_result,
1466  Label::Distance near_jump) {
1467  if (constant->value() == 0) {
1468  if (!dst.is(src)) {
1469  movq(dst, src);
1470  }
1471  } else if (dst.is(src)) {
1472  ASSERT(!dst.is(kScratchRegister));
1473  if (constant->value() == Smi::kMinValue) {
1474  // Subtracting min-value from any non-negative value will overflow.
1475  // We test the non-negativeness before doing the subtraction.
1476  testq(src, src);
1477  j(not_sign, on_not_smi_result, near_jump);
1478  LoadSmiConstant(kScratchRegister, constant);
1479  subq(dst, kScratchRegister);
1480  } else {
1481  // Subtract by adding the negation.
1482  LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1483  addq(kScratchRegister, dst);
1484  j(overflow, on_not_smi_result, near_jump);
1485  movq(dst, kScratchRegister);
1486  }
1487  } else {
1488  if (constant->value() == Smi::kMinValue) {
1489  // Subtracting min-value from any non-negative value will overflow.
1490  // We test the non-negativeness before doing the subtraction.
1491  testq(src, src);
1492  j(not_sign, on_not_smi_result, near_jump);
1493  LoadSmiConstant(dst, constant);
1494  // Adding and subtracting the min-value gives the same result, it only
1495  // differs on the overflow bit, which we don't check here.
1496  addq(dst, src);
1497  } else {
1498  // Subtract by adding the negation.
1499  LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1500  addq(dst, src);
1501  j(overflow, on_not_smi_result, near_jump);
1502  }
1503  }
1504 }
1505 
1506 
1507 void MacroAssembler::SmiNeg(Register dst,
1508  Register src,
1509  Label* on_smi_result,
1510  Label::Distance near_jump) {
1511  if (dst.is(src)) {
1512  ASSERT(!dst.is(kScratchRegister));
1513  movq(kScratchRegister, src);
1514  neg(dst); // Low 32 bits are retained as zero by negation.
1515  // Test if result is zero or Smi::kMinValue.
1516  cmpq(dst, kScratchRegister);
1517  j(not_equal, on_smi_result, near_jump);
1518  movq(src, kScratchRegister);
1519  } else {
1520  movq(dst, src);
1521  neg(dst);
1522  cmpq(dst, src);
1523  // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1524  j(not_equal, on_smi_result, near_jump);
1525  }
1526 }
1527 
1528 
1529 void MacroAssembler::SmiAdd(Register dst,
1530  Register src1,
1531  Register src2,
1532  Label* on_not_smi_result,
1533  Label::Distance near_jump) {
1534  ASSERT_NOT_NULL(on_not_smi_result);
1535  ASSERT(!dst.is(src2));
1536  if (dst.is(src1)) {
1537  movq(kScratchRegister, src1);
1538  addq(kScratchRegister, src2);
1539  j(overflow, on_not_smi_result, near_jump);
1540  movq(dst, kScratchRegister);
1541  } else {
1542  movq(dst, src1);
1543  addq(dst, src2);
1544  j(overflow, on_not_smi_result, near_jump);
1545  }
1546 }
1547 
1548 
1549 void MacroAssembler::SmiAdd(Register dst,
1550  Register src1,
1551  const Operand& src2,
1552  Label* on_not_smi_result,
1553  Label::Distance near_jump) {
1554  ASSERT_NOT_NULL(on_not_smi_result);
1555  if (dst.is(src1)) {
1556  movq(kScratchRegister, src1);
1557  addq(kScratchRegister, src2);
1558  j(overflow, on_not_smi_result, near_jump);
1559  movq(dst, kScratchRegister);
1560  } else {
1561  ASSERT(!src2.AddressUsesRegister(dst));
1562  movq(dst, src1);
1563  addq(dst, src2);
1564  j(overflow, on_not_smi_result, near_jump);
1565  }
1566 }
1567 
1568 
1569 void MacroAssembler::SmiAdd(Register dst,
1570  Register src1,
1571  Register src2) {
1572  // No overflow checking. Use only when it's known that
1573  // overflowing is impossible.
1574  if (!dst.is(src1)) {
1575  if (emit_debug_code()) {
1576  movq(kScratchRegister, src1);
1577  addq(kScratchRegister, src2);
1578  Check(no_overflow, "Smi addition overflow");
1579  }
1580  lea(dst, Operand(src1, src2, times_1, 0));
1581  } else {
1582  addq(dst, src2);
1583  Assert(no_overflow, "Smi addition overflow");
1584  }
1585 }
1586 
1587 
1588 void MacroAssembler::SmiSub(Register dst,
1589  Register src1,
1590  Register src2,
1591  Label* on_not_smi_result,
1592  Label::Distance near_jump) {
1593  ASSERT_NOT_NULL(on_not_smi_result);
1594  ASSERT(!dst.is(src2));
1595  if (dst.is(src1)) {
1596  cmpq(dst, src2);
1597  j(overflow, on_not_smi_result, near_jump);
1598  subq(dst, src2);
1599  } else {
1600  movq(dst, src1);
1601  subq(dst, src2);
1602  j(overflow, on_not_smi_result, near_jump);
1603  }
1604 }
1605 
1606 
1607 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1608  // No overflow checking. Use only when it's known that
1609  // overflowing is impossible (e.g., subtracting two positive smis).
1610  ASSERT(!dst.is(src2));
1611  if (!dst.is(src1)) {
1612  movq(dst, src1);
1613  }
1614  subq(dst, src2);
1615  Assert(no_overflow, "Smi subtraction overflow");
1616 }
1617 
1618 
1619 void MacroAssembler::SmiSub(Register dst,
1620  Register src1,
1621  const Operand& src2,
1622  Label* on_not_smi_result,
1623  Label::Distance near_jump) {
1624  ASSERT_NOT_NULL(on_not_smi_result);
1625  if (dst.is(src1)) {
1626  movq(kScratchRegister, src2);
1627  cmpq(src1, kScratchRegister);
1628  j(overflow, on_not_smi_result, near_jump);
1629  subq(src1, kScratchRegister);
1630  } else {
1631  movq(dst, src1);
1632  subq(dst, src2);
1633  j(overflow, on_not_smi_result, near_jump);
1634  }
1635 }
1636 
1637 
1638 void MacroAssembler::SmiSub(Register dst,
1639  Register src1,
1640  const Operand& src2) {
1641  // No overflow checking. Use only when it's known that
1642  // overflowing is impossible (e.g., subtracting two positive smis).
1643  if (!dst.is(src1)) {
1644  movq(dst, src1);
1645  }
1646  subq(dst, src2);
1647  Assert(no_overflow, "Smi subtraction overflow");
1648 }
1649 
1650 
1651 void MacroAssembler::SmiMul(Register dst,
1652  Register src1,
1653  Register src2,
1654  Label* on_not_smi_result,
1655  Label::Distance near_jump) {
1656  ASSERT(!dst.is(src2));
1657  ASSERT(!dst.is(kScratchRegister));
1658  ASSERT(!src1.is(kScratchRegister));
1659  ASSERT(!src2.is(kScratchRegister));
1660 
1661  if (dst.is(src1)) {
1662  Label failure, zero_correct_result;
1663  movq(kScratchRegister, src1); // Create backup for later testing.
1664  SmiToInteger64(dst, src1);
1665  imul(dst, src2);
1666  j(overflow, &failure, Label::kNear);
1667 
1668  // Check for negative zero result. If product is zero, and one
1669  // argument is negative, go to slow case.
1670  Label correct_result;
1671  testq(dst, dst);
1672  j(not_zero, &correct_result, Label::kNear);
1673 
1674  movq(dst, kScratchRegister);
1675  xor_(dst, src2);
1676  // Result was positive zero.
1677  j(positive, &zero_correct_result, Label::kNear);
1678 
1679  bind(&failure); // Reused failure exit, restores src1.
1680  movq(src1, kScratchRegister);
1681  jmp(on_not_smi_result, near_jump);
1682 
1683  bind(&zero_correct_result);
1684  Set(dst, 0);
1685 
1686  bind(&correct_result);
1687  } else {
1688  SmiToInteger64(dst, src1);
1689  imul(dst, src2);
1690  j(overflow, on_not_smi_result, near_jump);
1691  // Check for negative zero result. If product is zero, and one
1692  // argument is negative, go to slow case.
1693  Label correct_result;
1694  testq(dst, dst);
1695  j(not_zero, &correct_result, Label::kNear);
1696  // One of src1 and src2 is zero, the check whether the other is
1697  // negative.
1698  movq(kScratchRegister, src1);
1699  xor_(kScratchRegister, src2);
1700  j(negative, on_not_smi_result, near_jump);
1701  bind(&correct_result);
1702  }
1703 }
1704 
1705 
1706 void MacroAssembler::SmiDiv(Register dst,
1707  Register src1,
1708  Register src2,
1709  Label* on_not_smi_result,
1710  Label::Distance near_jump) {
1711  ASSERT(!src1.is(kScratchRegister));
1712  ASSERT(!src2.is(kScratchRegister));
1713  ASSERT(!dst.is(kScratchRegister));
1714  ASSERT(!src2.is(rax));
1715  ASSERT(!src2.is(rdx));
1716  ASSERT(!src1.is(rdx));
1717 
1718  // Check for 0 divisor (result is +/-Infinity).
1719  testq(src2, src2);
1720  j(zero, on_not_smi_result, near_jump);
1721 
1722  if (src1.is(rax)) {
1723  movq(kScratchRegister, src1);
1724  }
1725  SmiToInteger32(rax, src1);
1726  // We need to rule out dividing Smi::kMinValue by -1, since that would
1727  // overflow in idiv and raise an exception.
1728  // We combine this with negative zero test (negative zero only happens
1729  // when dividing zero by a negative number).
1730 
1731  // We overshoot a little and go to slow case if we divide min-value
1732  // by any negative value, not just -1.
1733  Label safe_div;
1734  testl(rax, Immediate(0x7fffffff));
1735  j(not_zero, &safe_div, Label::kNear);
1736  testq(src2, src2);
1737  if (src1.is(rax)) {
1738  j(positive, &safe_div, Label::kNear);
1739  movq(src1, kScratchRegister);
1740  jmp(on_not_smi_result, near_jump);
1741  } else {
1742  j(negative, on_not_smi_result, near_jump);
1743  }
1744  bind(&safe_div);
1745 
1746  SmiToInteger32(src2, src2);
1747  // Sign extend src1 into edx:eax.
1748  cdq();
1749  idivl(src2);
1750  Integer32ToSmi(src2, src2);
1751  // Check that the remainder is zero.
1752  testl(rdx, rdx);
1753  if (src1.is(rax)) {
1754  Label smi_result;
1755  j(zero, &smi_result, Label::kNear);
1756  movq(src1, kScratchRegister);
1757  jmp(on_not_smi_result, near_jump);
1758  bind(&smi_result);
1759  } else {
1760  j(not_zero, on_not_smi_result, near_jump);
1761  }
1762  if (!dst.is(src1) && src1.is(rax)) {
1763  movq(src1, kScratchRegister);
1764  }
1765  Integer32ToSmi(dst, rax);
1766 }
1767 
1768 
1769 void MacroAssembler::SmiMod(Register dst,
1770  Register src1,
1771  Register src2,
1772  Label* on_not_smi_result,
1773  Label::Distance near_jump) {
1774  ASSERT(!dst.is(kScratchRegister));
1775  ASSERT(!src1.is(kScratchRegister));
1776  ASSERT(!src2.is(kScratchRegister));
1777  ASSERT(!src2.is(rax));
1778  ASSERT(!src2.is(rdx));
1779  ASSERT(!src1.is(rdx));
1780  ASSERT(!src1.is(src2));
1781 
1782  testq(src2, src2);
1783  j(zero, on_not_smi_result, near_jump);
1784 
1785  if (src1.is(rax)) {
1786  movq(kScratchRegister, src1);
1787  }
1788  SmiToInteger32(rax, src1);
1789  SmiToInteger32(src2, src2);
1790 
1791  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1792  Label safe_div;
1793  cmpl(rax, Immediate(Smi::kMinValue));
1794  j(not_equal, &safe_div, Label::kNear);
1795  cmpl(src2, Immediate(-1));
1796  j(not_equal, &safe_div, Label::kNear);
1797  // Retag inputs and go slow case.
1798  Integer32ToSmi(src2, src2);
1799  if (src1.is(rax)) {
1800  movq(src1, kScratchRegister);
1801  }
1802  jmp(on_not_smi_result, near_jump);
1803  bind(&safe_div);
1804 
1805  // Sign extend eax into edx:eax.
1806  cdq();
1807  idivl(src2);
1808  // Restore smi tags on inputs.
1809  Integer32ToSmi(src2, src2);
1810  if (src1.is(rax)) {
1811  movq(src1, kScratchRegister);
1812  }
1813  // Check for a negative zero result. If the result is zero, and the
1814  // dividend is negative, go slow to return a floating point negative zero.
1815  Label smi_result;
1816  testl(rdx, rdx);
1817  j(not_zero, &smi_result, Label::kNear);
1818  testq(src1, src1);
1819  j(negative, on_not_smi_result, near_jump);
1820  bind(&smi_result);
1821  Integer32ToSmi(dst, rdx);
1822 }
1823 
1824 
1825 void MacroAssembler::SmiNot(Register dst, Register src) {
1826  ASSERT(!dst.is(kScratchRegister));
1827  ASSERT(!src.is(kScratchRegister));
1828  // Set tag and padding bits before negating, so that they are zero afterwards.
1829  movl(kScratchRegister, Immediate(~0));
1830  if (dst.is(src)) {
1831  xor_(dst, kScratchRegister);
1832  } else {
1833  lea(dst, Operand(src, kScratchRegister, times_1, 0));
1834  }
1835  not_(dst);
1836 }
1837 
1838 
1839 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1840  ASSERT(!dst.is(src2));
1841  if (!dst.is(src1)) {
1842  movq(dst, src1);
1843  }
1844  and_(dst, src2);
1845 }
1846 
1847 
1848 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1849  if (constant->value() == 0) {
1850  Set(dst, 0);
1851  } else if (dst.is(src)) {
1852  ASSERT(!dst.is(kScratchRegister));
1853  Register constant_reg = GetSmiConstant(constant);
1854  and_(dst, constant_reg);
1855  } else {
1856  LoadSmiConstant(dst, constant);
1857  and_(dst, src);
1858  }
1859 }
1860 
1861 
1862 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1863  if (!dst.is(src1)) {
1864  ASSERT(!src1.is(src2));
1865  movq(dst, src1);
1866  }
1867  or_(dst, src2);
1868 }
1869 
1870 
1871 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1872  if (dst.is(src)) {
1873  ASSERT(!dst.is(kScratchRegister));
1874  Register constant_reg = GetSmiConstant(constant);
1875  or_(dst, constant_reg);
1876  } else {
1877  LoadSmiConstant(dst, constant);
1878  or_(dst, src);
1879  }
1880 }
1881 
1882 
1883 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1884  if (!dst.is(src1)) {
1885  ASSERT(!src1.is(src2));
1886  movq(dst, src1);
1887  }
1888  xor_(dst, src2);
1889 }
1890 
1891 
1892 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1893  if (dst.is(src)) {
1894  ASSERT(!dst.is(kScratchRegister));
1895  Register constant_reg = GetSmiConstant(constant);
1896  xor_(dst, constant_reg);
1897  } else {
1898  LoadSmiConstant(dst, constant);
1899  xor_(dst, src);
1900  }
1901 }
1902 
1903 
1904 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1905  Register src,
1906  int shift_value) {
1907  ASSERT(is_uint5(shift_value));
1908  if (shift_value > 0) {
1909  if (dst.is(src)) {
1910  sar(dst, Immediate(shift_value + kSmiShift));
1911  shl(dst, Immediate(kSmiShift));
1912  } else {
1913  UNIMPLEMENTED(); // Not used.
1914  }
1915  }
1916 }
1917 
1918 
1919 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1920  Register src,
1921  int shift_value) {
1922  if (!dst.is(src)) {
1923  movq(dst, src);
1924  }
1925  if (shift_value > 0) {
1926  shl(dst, Immediate(shift_value));
1927  }
1928 }
1929 
1930 
1931 void MacroAssembler::SmiShiftLogicalRightConstant(
1932  Register dst, Register src, int shift_value,
1933  Label* on_not_smi_result, Label::Distance near_jump) {
1934  // Logic right shift interprets its result as an *unsigned* number.
1935  if (dst.is(src)) {
1936  UNIMPLEMENTED(); // Not used.
1937  } else {
1938  movq(dst, src);
1939  if (shift_value == 0) {
1940  testq(dst, dst);
1941  j(negative, on_not_smi_result, near_jump);
1942  }
1943  shr(dst, Immediate(shift_value + kSmiShift));
1944  shl(dst, Immediate(kSmiShift));
1945  }
1946 }
1947 
1948 
1949 void MacroAssembler::SmiShiftLeft(Register dst,
1950  Register src1,
1951  Register src2) {
1952  ASSERT(!dst.is(rcx));
1953  // Untag shift amount.
1954  if (!dst.is(src1)) {
1955  movq(dst, src1);
1956  }
1957  SmiToInteger32(rcx, src2);
1958  // Shift amount specified by lower 5 bits, not six as the shl opcode.
1959  and_(rcx, Immediate(0x1f));
1960  shl_cl(dst);
1961 }
1962 
1963 
1964 void MacroAssembler::SmiShiftLogicalRight(Register dst,
1965  Register src1,
1966  Register src2,
1967  Label* on_not_smi_result,
1968  Label::Distance near_jump) {
1969  ASSERT(!dst.is(kScratchRegister));
1970  ASSERT(!src1.is(kScratchRegister));
1971  ASSERT(!src2.is(kScratchRegister));
1972  ASSERT(!dst.is(rcx));
1973  // dst and src1 can be the same, because the one case that bails out
1974  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
1975  if (src1.is(rcx) || src2.is(rcx)) {
1976  movq(kScratchRegister, rcx);
1977  }
1978  if (!dst.is(src1)) {
1979  movq(dst, src1);
1980  }
1981  SmiToInteger32(rcx, src2);
1982  orl(rcx, Immediate(kSmiShift));
1983  shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
1984  shl(dst, Immediate(kSmiShift));
1985  testq(dst, dst);
1986  if (src1.is(rcx) || src2.is(rcx)) {
1987  Label positive_result;
1988  j(positive, &positive_result, Label::kNear);
1989  if (src1.is(rcx)) {
1990  movq(src1, kScratchRegister);
1991  } else {
1992  movq(src2, kScratchRegister);
1993  }
1994  jmp(on_not_smi_result, near_jump);
1995  bind(&positive_result);
1996  } else {
1997  // src2 was zero and src1 negative.
1998  j(negative, on_not_smi_result, near_jump);
1999  }
2000 }
2001 
2002 
2003 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2004  Register src1,
2005  Register src2) {
2006  ASSERT(!dst.is(kScratchRegister));
2007  ASSERT(!src1.is(kScratchRegister));
2008  ASSERT(!src2.is(kScratchRegister));
2009  ASSERT(!dst.is(rcx));
2010  if (src1.is(rcx)) {
2011  movq(kScratchRegister, src1);
2012  } else if (src2.is(rcx)) {
2013  movq(kScratchRegister, src2);
2014  }
2015  if (!dst.is(src1)) {
2016  movq(dst, src1);
2017  }
2018  SmiToInteger32(rcx, src2);
2019  orl(rcx, Immediate(kSmiShift));
2020  sar_cl(dst); // Shift 32 + original rcx & 0x1f.
2021  shl(dst, Immediate(kSmiShift));
2022  if (src1.is(rcx)) {
2023  movq(src1, kScratchRegister);
2024  } else if (src2.is(rcx)) {
2025  movq(src2, kScratchRegister);
2026  }
2027 }
2028 
2029 
2030 void MacroAssembler::SelectNonSmi(Register dst,
2031  Register src1,
2032  Register src2,
2033  Label* on_not_smis,
2034  Label::Distance near_jump) {
2035  ASSERT(!dst.is(kScratchRegister));
2036  ASSERT(!src1.is(kScratchRegister));
2037  ASSERT(!src2.is(kScratchRegister));
2038  ASSERT(!dst.is(src1));
2039  ASSERT(!dst.is(src2));
2040  // Both operands must not be smis.
2041 #ifdef DEBUG
2042  if (allow_stub_calls()) { // Check contains a stub call.
2043  Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2044  Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
2045  }
2046 #endif
2047  STATIC_ASSERT(kSmiTag == 0);
2048  ASSERT_EQ(0, Smi::FromInt(0));
2049  movl(kScratchRegister, Immediate(kSmiTagMask));
2050  and_(kScratchRegister, src1);
2051  testl(kScratchRegister, src2);
2052  // If non-zero then both are smis.
2053  j(not_zero, on_not_smis, near_jump);
2054 
2055  // Exactly one operand is a smi.
2056  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2057  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2058  subq(kScratchRegister, Immediate(1));
2059  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2060  movq(dst, src1);
2061  xor_(dst, src2);
2062  and_(dst, kScratchRegister);
2063  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2064  xor_(dst, src1);
2065  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2066 }
2067 
2068 
2069 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2070  Register src,
2071  int shift) {
2072  ASSERT(is_uint6(shift));
2073  // There is a possible optimization if shift is in the range 60-63, but that
2074  // will (and must) never happen.
2075  if (!dst.is(src)) {
2076  movq(dst, src);
2077  }
2078  if (shift < kSmiShift) {
2079  sar(dst, Immediate(kSmiShift - shift));
2080  } else {
2081  shl(dst, Immediate(shift - kSmiShift));
2082  }
2083  return SmiIndex(dst, times_1);
2084 }
2085 
2086 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2087  Register src,
2088  int shift) {
2089  // Register src holds a positive smi.
2090  ASSERT(is_uint6(shift));
2091  if (!dst.is(src)) {
2092  movq(dst, src);
2093  }
2094  neg(dst);
2095  if (shift < kSmiShift) {
2096  sar(dst, Immediate(kSmiShift - shift));
2097  } else {
2098  shl(dst, Immediate(shift - kSmiShift));
2099  }
2100  return SmiIndex(dst, times_1);
2101 }
2102 
2103 
2104 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2105  ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2106  addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2107 }
2108 
2109 
2110 void MacroAssembler::JumpIfNotString(Register object,
2111  Register object_map,
2112  Label* not_string,
2113  Label::Distance near_jump) {
2114  Condition is_smi = CheckSmi(object);
2115  j(is_smi, not_string, near_jump);
2116  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2117  j(above_equal, not_string, near_jump);
2118 }
2119 
2120 
2121 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2122  Register first_object,
2123  Register second_object,
2124  Register scratch1,
2125  Register scratch2,
2126  Label* on_fail,
2127  Label::Distance near_jump) {
2128  // Check that both objects are not smis.
2129  Condition either_smi = CheckEitherSmi(first_object, second_object);
2130  j(either_smi, on_fail, near_jump);
2131 
2132  // Load instance type for both strings.
2133  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2134  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2135  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2136  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2137 
2138  // Check that both are flat ASCII strings.
2139  ASSERT(kNotStringTag != 0);
2140  const int kFlatAsciiStringMask =
2142  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2143 
2144  andl(scratch1, Immediate(kFlatAsciiStringMask));
2145  andl(scratch2, Immediate(kFlatAsciiStringMask));
2146  // Interleave the bits to check both scratch1 and scratch2 in one test.
2147  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2148  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2149  cmpl(scratch1,
2150  Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2151  j(not_equal, on_fail, near_jump);
2152 }
2153 
2154 
2155 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2156  Register instance_type,
2157  Register scratch,
2158  Label* failure,
2159  Label::Distance near_jump) {
2160  if (!scratch.is(instance_type)) {
2161  movl(scratch, instance_type);
2162  }
2163 
2164  const int kFlatAsciiStringMask =
2166 
2167  andl(scratch, Immediate(kFlatAsciiStringMask));
2168  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
2169  j(not_equal, failure, near_jump);
2170 }
2171 
2172 
2173 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2174  Register first_object_instance_type,
2175  Register second_object_instance_type,
2176  Register scratch1,
2177  Register scratch2,
2178  Label* on_fail,
2179  Label::Distance near_jump) {
2180  // Load instance type for both strings.
2181  movq(scratch1, first_object_instance_type);
2182  movq(scratch2, second_object_instance_type);
2183 
2184  // Check that both are flat ASCII strings.
2185  ASSERT(kNotStringTag != 0);
2186  const int kFlatAsciiStringMask =
2188  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2189 
2190  andl(scratch1, Immediate(kFlatAsciiStringMask));
2191  andl(scratch2, Immediate(kFlatAsciiStringMask));
2192  // Interleave the bits to check both scratch1 and scratch2 in one test.
2193  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2194  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2195  cmpl(scratch1,
2196  Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2197  j(not_equal, on_fail, near_jump);
2198 }
2199 
2200 
2201 
2202 void MacroAssembler::Move(Register dst, Register src) {
2203  if (!dst.is(src)) {
2204  movq(dst, src);
2205  }
2206 }
2207 
2208 
2209 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2210  ASSERT(!source->IsFailure());
2211  if (source->IsSmi()) {
2212  Move(dst, Smi::cast(*source));
2213  } else {
2214  movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
2215  }
2216 }
2217 
2218 
2219 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2220  ASSERT(!source->IsFailure());
2221  if (source->IsSmi()) {
2222  Move(dst, Smi::cast(*source));
2223  } else {
2224  movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2225  movq(dst, kScratchRegister);
2226  }
2227 }
2228 
2229 
2230 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2231  if (source->IsSmi()) {
2232  Cmp(dst, Smi::cast(*source));
2233  } else {
2234  Move(kScratchRegister, source);
2235  cmpq(dst, kScratchRegister);
2236  }
2237 }
2238 
2239 
2240 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2241  if (source->IsSmi()) {
2242  Cmp(dst, Smi::cast(*source));
2243  } else {
2244  ASSERT(source->IsHeapObject());
2245  movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2246  cmpq(dst, kScratchRegister);
2247  }
2248 }
2249 
2250 
2251 void MacroAssembler::Push(Handle<Object> source) {
2252  if (source->IsSmi()) {
2253  Push(Smi::cast(*source));
2254  } else {
2255  ASSERT(source->IsHeapObject());
2256  movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2257  push(kScratchRegister);
2258  }
2259 }
2260 
2261 
2262 void MacroAssembler::LoadHeapObject(Register result,
2263  Handle<HeapObject> object) {
2264  if (isolate()->heap()->InNewSpace(*object)) {
2265  Handle<JSGlobalPropertyCell> cell =
2266  isolate()->factory()->NewJSGlobalPropertyCell(object);
2267  movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2268  movq(result, Operand(result, 0));
2269  } else {
2270  Move(result, object);
2271  }
2272 }
2273 
2274 
2275 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2276  if (isolate()->heap()->InNewSpace(*object)) {
2277  Handle<JSGlobalPropertyCell> cell =
2278  isolate()->factory()->NewJSGlobalPropertyCell(object);
2279  movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2280  movq(kScratchRegister, Operand(kScratchRegister, 0));
2281  push(kScratchRegister);
2282  } else {
2283  Push(object);
2284  }
2285 }
2286 
2287 
2288 void MacroAssembler::LoadGlobalCell(Register dst,
2289  Handle<JSGlobalPropertyCell> cell) {
2290  if (dst.is(rax)) {
2291  load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
2292  } else {
2293  movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2294  movq(dst, Operand(dst, 0));
2295  }
2296 }
2297 
2298 
2299 void MacroAssembler::Push(Smi* source) {
2300  intptr_t smi = reinterpret_cast<intptr_t>(source);
2301  if (is_int32(smi)) {
2302  push(Immediate(static_cast<int32_t>(smi)));
2303  } else {
2304  Register constant = GetSmiConstant(source);
2305  push(constant);
2306  }
2307 }
2308 
2309 
2310 void MacroAssembler::Drop(int stack_elements) {
2311  if (stack_elements > 0) {
2312  addq(rsp, Immediate(stack_elements * kPointerSize));
2313  }
2314 }
2315 
2316 
2317 void MacroAssembler::Test(const Operand& src, Smi* source) {
2318  testl(Operand(src, kIntSize), Immediate(source->value()));
2319 }
2320 
2321 
2322 void MacroAssembler::TestBit(const Operand& src, int bits) {
2323  int byte_offset = bits / kBitsPerByte;
2324  int bit_in_byte = bits & (kBitsPerByte - 1);
2325  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2326 }
2327 
2328 
2329 void MacroAssembler::Jump(ExternalReference ext) {
2330  LoadAddress(kScratchRegister, ext);
2331  jmp(kScratchRegister);
2332 }
2333 
2334 
2335 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2336  movq(kScratchRegister, destination, rmode);
2337  jmp(kScratchRegister);
2338 }
2339 
2340 
2341 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2342  // TODO(X64): Inline this
2343  jmp(code_object, rmode);
2344 }
2345 
2346 
2347 int MacroAssembler::CallSize(ExternalReference ext) {
2348  // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2349  const int kCallInstructionSize = 3;
2350  return LoadAddressSize(ext) + kCallInstructionSize;
2351 }
2352 
2353 
2354 void MacroAssembler::Call(ExternalReference ext) {
2355 #ifdef DEBUG
2356  int end_position = pc_offset() + CallSize(ext);
2357 #endif
2358  LoadAddress(kScratchRegister, ext);
2359  call(kScratchRegister);
2360 #ifdef DEBUG
2361  CHECK_EQ(end_position, pc_offset());
2362 #endif
2363 }
2364 
2365 
2366 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2367 #ifdef DEBUG
2368  int end_position = pc_offset() + CallSize(destination, rmode);
2369 #endif
2370  movq(kScratchRegister, destination, rmode);
2371  call(kScratchRegister);
2372 #ifdef DEBUG
2373  CHECK_EQ(pc_offset(), end_position);
2374 #endif
2375 }
2376 
2377 
2378 void MacroAssembler::Call(Handle<Code> code_object,
2379  RelocInfo::Mode rmode,
2380  unsigned ast_id) {
2381 #ifdef DEBUG
2382  int end_position = pc_offset() + CallSize(code_object);
2383 #endif
2384  ASSERT(RelocInfo::IsCodeTarget(rmode));
2385  call(code_object, rmode, ast_id);
2386 #ifdef DEBUG
2387  CHECK_EQ(end_position, pc_offset());
2388 #endif
2389 }
2390 
2391 
2392 void MacroAssembler::Pushad() {
2393  push(rax);
2394  push(rcx);
2395  push(rdx);
2396  push(rbx);
2397  // Not pushing rsp or rbp.
2398  push(rsi);
2399  push(rdi);
2400  push(r8);
2401  push(r9);
2402  // r10 is kScratchRegister.
2403  push(r11);
2404  // r12 is kSmiConstantRegister.
2405  // r13 is kRootRegister.
2406  push(r14);
2407  push(r15);
2409  // Use lea for symmetry with Popad.
2410  int sp_delta =
2412  lea(rsp, Operand(rsp, -sp_delta));
2413 }
2414 
2415 
2416 void MacroAssembler::Popad() {
2417  // Popad must not change the flags, so use lea instead of addq.
2418  int sp_delta =
2420  lea(rsp, Operand(rsp, sp_delta));
2421  pop(r15);
2422  pop(r14);
2423  pop(r11);
2424  pop(r9);
2425  pop(r8);
2426  pop(rdi);
2427  pop(rsi);
2428  pop(rbx);
2429  pop(rdx);
2430  pop(rcx);
2431  pop(rax);
2432 }
2433 
2434 
2435 void MacroAssembler::Dropad() {
2436  addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2437 }
2438 
2439 
2440 // Order general registers are pushed by Pushad:
2441 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2442 const int
2443 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2444  0,
2445  1,
2446  2,
2447  3,
2448  -1,
2449  -1,
2450  4,
2451  5,
2452  6,
2453  7,
2454  -1,
2455  8,
2456  -1,
2457  -1,
2458  9,
2459  10
2460 };
2461 
2462 
2463 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2464  movq(SafepointRegisterSlot(dst), src);
2465 }
2466 
2467 
2468 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2469  movq(dst, SafepointRegisterSlot(src));
2470 }
2471 
2472 
2473 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2474  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2475 }
2476 
2477 
2478 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2479  int handler_index) {
2480  // Adjust this code if not the case.
2481  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2482  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2483  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2484  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2485  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2486  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2487 
2488  // We will build up the handler from the bottom by pushing on the stack.
2489  // First push the frame pointer and context.
2490  if (kind == StackHandler::JS_ENTRY) {
2491  // The frame pointer does not point to a JS frame so we save NULL for
2492  // rbp. We expect the code throwing an exception to check rbp before
2493  // dereferencing it to restore the context.
2494  push(Immediate(0)); // NULL frame pointer.
2495  Push(Smi::FromInt(0)); // No context.
2496  } else {
2497  push(rbp);
2498  push(rsi);
2499  }
2500 
2501  // Push the state and the code object.
2502  unsigned state =
2503  StackHandler::IndexField::encode(handler_index) |
2504  StackHandler::KindField::encode(kind);
2505  push(Immediate(state));
2506  Push(CodeObject());
2507 
2508  // Link the current handler as the next handler.
2509  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2510  push(ExternalOperand(handler_address));
2511  // Set this new handler as the current one.
2512  movq(ExternalOperand(handler_address), rsp);
2513 }
2514 
2515 
2516 void MacroAssembler::PopTryHandler() {
2517  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2518  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2519  pop(ExternalOperand(handler_address));
2520  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2521 }
2522 
2523 
2524 void MacroAssembler::JumpToHandlerEntry() {
2525  // Compute the handler entry address and jump to it. The handler table is
2526  // a fixed array of (smi-tagged) code offsets.
2527  // rax = exception, rdi = code object, rdx = state.
2528  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
2529  shr(rdx, Immediate(StackHandler::kKindWidth));
2530  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
2531  SmiToInteger64(rdx, rdx);
2532  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2533  jmp(rdi);
2534 }
2535 
2536 
2537 void MacroAssembler::Throw(Register value) {
2538  // Adjust this code if not the case.
2539  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2540  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2541  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2542  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2543  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2544  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2545 
2546  // The exception is expected in rax.
2547  if (!value.is(rax)) {
2548  movq(rax, value);
2549  }
2550  // Drop the stack pointer to the top of the top handler.
2551  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2552  movq(rsp, ExternalOperand(handler_address));
2553  // Restore the next handler.
2554  pop(ExternalOperand(handler_address));
2555 
2556  // Remove the code object and state, compute the handler address in rdi.
2557  pop(rdi); // Code object.
2558  pop(rdx); // Offset and state.
2559 
2560  // Restore the context and frame pointer.
2561  pop(rsi); // Context.
2562  pop(rbp); // Frame pointer.
2563 
2564  // If the handler is a JS frame, restore the context to the frame.
2565  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
2566  // rbp or rsi.
2567  Label skip;
2568  testq(rsi, rsi);
2569  j(zero, &skip, Label::kNear);
2570  movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2571  bind(&skip);
2572 
2573  JumpToHandlerEntry();
2574 }
2575 
2576 
2577 void MacroAssembler::ThrowUncatchable(Register value) {
2578  // Adjust this code if not the case.
2579  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2580  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2581  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2582  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2583  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2584  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2585 
2586  // The exception is expected in rax.
2587  if (!value.is(rax)) {
2588  movq(rax, value);
2589  }
2590  // Drop the stack pointer to the top of the top stack handler.
2591  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2592  Load(rsp, handler_address);
2593 
2594  // Unwind the handlers until the top ENTRY handler is found.
2595  Label fetch_next, check_kind;
2596  jmp(&check_kind, Label::kNear);
2597  bind(&fetch_next);
2598  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
2599 
2600  bind(&check_kind);
2601  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2602  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
2603  Immediate(StackHandler::KindField::kMask));
2604  j(not_zero, &fetch_next);
2605 
2606  // Set the top handler address to next handler past the top ENTRY handler.
2607  pop(ExternalOperand(handler_address));
2608 
2609  // Remove the code object and state, compute the handler address in rdi.
2610  pop(rdi); // Code object.
2611  pop(rdx); // Offset and state.
2612 
2613  // Clear the context pointer and frame pointer (0 was saved in the handler).
2614  pop(rsi);
2615  pop(rbp);
2616 
2617  JumpToHandlerEntry();
2618 }
2619 
2620 
2621 void MacroAssembler::Ret() {
2622  ret(0);
2623 }
2624 
2625 
2626 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2627  if (is_uint16(bytes_dropped)) {
2628  ret(bytes_dropped);
2629  } else {
2630  pop(scratch);
2631  addq(rsp, Immediate(bytes_dropped));
2632  push(scratch);
2633  ret(0);
2634  }
2635 }
2636 
2637 
2638 void MacroAssembler::FCmp() {
2639  fucomip();
2640  fstp(0);
2641 }
2642 
2643 
2644 void MacroAssembler::CmpObjectType(Register heap_object,
2646  Register map) {
2647  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2648  CmpInstanceType(map, type);
2649 }
2650 
2651 
2652 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2653  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2654  Immediate(static_cast<int8_t>(type)));
2655 }
2656 
2657 
2658 void MacroAssembler::CheckFastElements(Register map,
2659  Label* fail,
2660  Label::Distance distance) {
2665  cmpb(FieldOperand(map, Map::kBitField2Offset),
2666  Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2667  j(above, fail, distance);
2668 }
2669 
2670 
2671 void MacroAssembler::CheckFastObjectElements(Register map,
2672  Label* fail,
2673  Label::Distance distance) {
2678  cmpb(FieldOperand(map, Map::kBitField2Offset),
2679  Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2680  j(below_equal, fail, distance);
2681  cmpb(FieldOperand(map, Map::kBitField2Offset),
2682  Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2683  j(above, fail, distance);
2684 }
2685 
2686 
2687 void MacroAssembler::CheckFastSmiElements(Register map,
2688  Label* fail,
2689  Label::Distance distance) {
2692  cmpb(FieldOperand(map, Map::kBitField2Offset),
2693  Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2694  j(above, fail, distance);
2695 }
2696 
2697 
2698 void MacroAssembler::StoreNumberToDoubleElements(
2699  Register maybe_number,
2700  Register elements,
2701  Register index,
2702  XMMRegister xmm_scratch,
2703  Label* fail) {
2704  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2705 
2706  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2707 
2708  CheckMap(maybe_number,
2709  isolate()->factory()->heap_number_map(),
2710  fail,
2712 
2713  // Double value, canonicalize NaN.
2714  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2715  cmpl(FieldOperand(maybe_number, offset),
2716  Immediate(kNaNOrInfinityLowerBoundUpper32));
2717  j(greater_equal, &maybe_nan, Label::kNear);
2718 
2719  bind(&not_nan);
2720  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2721  bind(&have_double_value);
2722  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2723  xmm_scratch);
2724  jmp(&done);
2725 
2726  bind(&maybe_nan);
2727  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2728  // it's an Infinity, and the non-NaN code path applies.
2729  j(greater, &is_nan, Label::kNear);
2730  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
2731  j(zero, &not_nan);
2732  bind(&is_nan);
2733  // Convert all NaNs to the same canonical NaN value when they are stored in
2734  // the double array.
2735  Set(kScratchRegister, BitCast<uint64_t>(
2736  FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
2737  movq(xmm_scratch, kScratchRegister);
2738  jmp(&have_double_value, Label::kNear);
2739 
2740  bind(&smi_value);
2741  // Value is a smi. convert to a double and store.
2742  // Preserve original value.
2743  SmiToInteger32(kScratchRegister, maybe_number);
2744  cvtlsi2sd(xmm_scratch, kScratchRegister);
2745  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2746  xmm_scratch);
2747  bind(&done);
2748 }
2749 
2750 
2751 void MacroAssembler::CompareMap(Register obj,
2752  Handle<Map> map,
2753  Label* early_success,
2754  CompareMapMode mode) {
2755  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2756  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
2757  ElementsKind kind = map->elements_kind();
2758  if (IsFastElementsKind(kind)) {
2759  bool packed = IsFastPackedElementsKind(kind);
2760  Map* current_map = *map;
2761  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
2762  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
2763  current_map = current_map->LookupElementsTransitionMap(kind);
2764  if (!current_map) break;
2765  j(equal, early_success, Label::kNear);
2766  Cmp(FieldOperand(obj, HeapObject::kMapOffset),
2767  Handle<Map>(current_map));
2768  }
2769  }
2770  }
2771 }
2772 
2773 
2774 void MacroAssembler::CheckMap(Register obj,
2775  Handle<Map> map,
2776  Label* fail,
2777  SmiCheckType smi_check_type,
2778  CompareMapMode mode) {
2779  if (smi_check_type == DO_SMI_CHECK) {
2780  JumpIfSmi(obj, fail);
2781  }
2782 
2783  Label success;
2784  CompareMap(obj, map, &success, mode);
2785  j(not_equal, fail);
2786  bind(&success);
2787 }
2788 
2789 
2790 void MacroAssembler::ClampUint8(Register reg) {
2791  Label done;
2792  testl(reg, Immediate(0xFFFFFF00));
2793  j(zero, &done, Label::kNear);
2794  setcc(negative, reg); // 1 if negative, 0 if positive.
2795  decb(reg); // 0 if negative, 255 if positive.
2796  bind(&done);
2797 }
2798 
2799 
2800 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
2801  XMMRegister temp_xmm_reg,
2802  Register result_reg,
2803  Register temp_reg) {
2804  Label done;
2805  Set(result_reg, 0);
2806  xorps(temp_xmm_reg, temp_xmm_reg);
2807  ucomisd(input_reg, temp_xmm_reg);
2808  j(below, &done, Label::kNear);
2809  uint64_t one_half = BitCast<uint64_t, double>(0.5);
2810  Set(temp_reg, one_half);
2811  movq(temp_xmm_reg, temp_reg);
2812  addsd(temp_xmm_reg, input_reg);
2813  cvttsd2si(result_reg, temp_xmm_reg);
2814  testl(result_reg, Immediate(0xFFFFFF00));
2815  j(zero, &done, Label::kNear);
2816  Set(result_reg, 255);
2817  bind(&done);
2818 }
2819 
2820 
2821 void MacroAssembler::LoadInstanceDescriptors(Register map,
2822  Register descriptors) {
2823  movq(descriptors, FieldOperand(map,
2824  Map::kInstanceDescriptorsOrBitField3Offset));
2825  Label not_smi;
2826  JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
2827  Move(descriptors, isolate()->factory()->empty_descriptor_array());
2828  bind(&not_smi);
2829 }
2830 
2831 
2832 void MacroAssembler::DispatchMap(Register obj,
2833  Handle<Map> map,
2834  Handle<Code> success,
2835  SmiCheckType smi_check_type) {
2836  Label fail;
2837  if (smi_check_type == DO_SMI_CHECK) {
2838  JumpIfSmi(obj, &fail);
2839  }
2840  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2841  j(equal, success, RelocInfo::CODE_TARGET);
2842 
2843  bind(&fail);
2844 }
2845 
2846 
2847 void MacroAssembler::AbortIfNotNumber(Register object) {
2848  Label ok;
2849  Condition is_smi = CheckSmi(object);
2850  j(is_smi, &ok, Label::kNear);
2851  Cmp(FieldOperand(object, HeapObject::kMapOffset),
2852  isolate()->factory()->heap_number_map());
2853  Assert(equal, "Operand not a number");
2854  bind(&ok);
2855 }
2856 
2857 
2858 void MacroAssembler::AbortIfSmi(Register object) {
2859  Condition is_smi = CheckSmi(object);
2860  Assert(NegateCondition(is_smi), "Operand is a smi");
2861 }
2862 
2863 
2864 void MacroAssembler::AbortIfNotSmi(Register object) {
2865  Condition is_smi = CheckSmi(object);
2866  Assert(is_smi, "Operand is not a smi");
2867 }
2868 
2869 
2870 void MacroAssembler::AbortIfNotSmi(const Operand& object) {
2871  Condition is_smi = CheckSmi(object);
2872  Assert(is_smi, "Operand is not a smi");
2873 }
2874 
2875 
2876 void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
2877  ASSERT(!int32_register.is(kScratchRegister));
2878  movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
2879  cmpq(kScratchRegister, int32_register);
2880  Assert(above_equal, "32 bit value in register is not zero-extended");
2881 }
2882 
2883 
2884 void MacroAssembler::AbortIfNotString(Register object) {
2885  testb(object, Immediate(kSmiTagMask));
2886  Assert(not_equal, "Operand is not a string");
2887  push(object);
2888  movq(object, FieldOperand(object, HeapObject::kMapOffset));
2889  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
2890  pop(object);
2891  Assert(below, "Operand is not a string");
2892 }
2893 
2894 
2895 void MacroAssembler::AbortIfNotRootValue(Register src,
2896  Heap::RootListIndex root_value_index,
2897  const char* message) {
2898  ASSERT(!src.is(kScratchRegister));
2899  LoadRoot(kScratchRegister, root_value_index);
2900  cmpq(src, kScratchRegister);
2901  Check(equal, message);
2902 }
2903 
2904 
2905 
2906 Condition MacroAssembler::IsObjectStringType(Register heap_object,
2907  Register map,
2908  Register instance_type) {
2909  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2910  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
2912  testb(instance_type, Immediate(kIsNotStringMask));
2913  return zero;
2914 }
2915 
2916 
2917 void MacroAssembler::TryGetFunctionPrototype(Register function,
2918  Register result,
2919  Label* miss,
2920  bool miss_on_bound_function) {
2921  // Check that the receiver isn't a smi.
2922  testl(function, Immediate(kSmiTagMask));
2923  j(zero, miss);
2924 
2925  // Check that the function really is a function.
2926  CmpObjectType(function, JS_FUNCTION_TYPE, result);
2927  j(not_equal, miss);
2928 
2929  if (miss_on_bound_function) {
2930  movq(kScratchRegister,
2931  FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2932  // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
2933  // field).
2935  SharedFunctionInfo::kCompilerHintsOffset),
2936  SharedFunctionInfo::kBoundFunction);
2937  j(not_zero, miss);
2938  }
2939 
2940  // Make sure that the function has an instance prototype.
2941  Label non_instance;
2942  testb(FieldOperand(result, Map::kBitFieldOffset),
2943  Immediate(1 << Map::kHasNonInstancePrototype));
2944  j(not_zero, &non_instance, Label::kNear);
2945 
2946  // Get the prototype or initial map from the function.
2947  movq(result,
2948  FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2949 
2950  // If the prototype or initial map is the hole, don't return it and
2951  // simply miss the cache instead. This will allow us to allocate a
2952  // prototype object on-demand in the runtime system.
2953  CompareRoot(result, Heap::kTheHoleValueRootIndex);
2954  j(equal, miss);
2955 
2956  // If the function does not have an initial map, we're done.
2957  Label done;
2958  CmpObjectType(result, MAP_TYPE, kScratchRegister);
2959  j(not_equal, &done, Label::kNear);
2960 
2961  // Get the prototype from the initial map.
2962  movq(result, FieldOperand(result, Map::kPrototypeOffset));
2963  jmp(&done, Label::kNear);
2964 
2965  // Non-instance prototype: Fetch prototype from constructor field
2966  // in initial map.
2967  bind(&non_instance);
2968  movq(result, FieldOperand(result, Map::kConstructorOffset));
2969 
2970  // All done.
2971  bind(&done);
2972 }
2973 
2974 
2975 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
2976  if (FLAG_native_code_counters && counter->Enabled()) {
2977  Operand counter_operand = ExternalOperand(ExternalReference(counter));
2978  movl(counter_operand, Immediate(value));
2979  }
2980 }
2981 
2982 
2983 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
2984  ASSERT(value > 0);
2985  if (FLAG_native_code_counters && counter->Enabled()) {
2986  Operand counter_operand = ExternalOperand(ExternalReference(counter));
2987  if (value == 1) {
2988  incl(counter_operand);
2989  } else {
2990  addl(counter_operand, Immediate(value));
2991  }
2992  }
2993 }
2994 
2995 
2996 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
2997  ASSERT(value > 0);
2998  if (FLAG_native_code_counters && counter->Enabled()) {
2999  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3000  if (value == 1) {
3001  decl(counter_operand);
3002  } else {
3003  subl(counter_operand, Immediate(value));
3004  }
3005  }
3006 }
3007 
3008 
3009 #ifdef ENABLE_DEBUGGER_SUPPORT
3010 void MacroAssembler::DebugBreak() {
3011  Set(rax, 0); // No arguments.
3012  LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3013  CEntryStub ces(1);
3014  ASSERT(AllowThisStubCall(&ces));
3015  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3016 }
3017 #endif // ENABLE_DEBUGGER_SUPPORT
3018 
3019 
3020 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3021  // This macro takes the dst register to make the code more readable
3022  // at the call sites. However, the dst register has to be rcx to
3023  // follow the calling convention which requires the call type to be
3024  // in rcx.
3025  ASSERT(dst.is(rcx));
3026  if (call_kind == CALL_AS_FUNCTION) {
3027  LoadSmiConstant(dst, Smi::FromInt(1));
3028  } else {
3029  LoadSmiConstant(dst, Smi::FromInt(0));
3030  }
3031 }
3032 
3033 
3034 void MacroAssembler::InvokeCode(Register code,
3035  const ParameterCount& expected,
3036  const ParameterCount& actual,
3037  InvokeFlag flag,
3038  const CallWrapper& call_wrapper,
3039  CallKind call_kind) {
3040  // You can't call a function without a valid frame.
3041  ASSERT(flag == JUMP_FUNCTION || has_frame());
3042 
3043  Label done;
3044  bool definitely_mismatches = false;
3045  InvokePrologue(expected,
3046  actual,
3047  Handle<Code>::null(),
3048  code,
3049  &done,
3050  &definitely_mismatches,
3051  flag,
3052  Label::kNear,
3053  call_wrapper,
3054  call_kind);
3055  if (!definitely_mismatches) {
3056  if (flag == CALL_FUNCTION) {
3057  call_wrapper.BeforeCall(CallSize(code));
3058  SetCallKind(rcx, call_kind);
3059  call(code);
3060  call_wrapper.AfterCall();
3061  } else {
3062  ASSERT(flag == JUMP_FUNCTION);
3063  SetCallKind(rcx, call_kind);
3064  jmp(code);
3065  }
3066  bind(&done);
3067  }
3068 }
3069 
3070 
3071 void MacroAssembler::InvokeCode(Handle<Code> code,
3072  const ParameterCount& expected,
3073  const ParameterCount& actual,
3074  RelocInfo::Mode rmode,
3075  InvokeFlag flag,
3076  const CallWrapper& call_wrapper,
3077  CallKind call_kind) {
3078  // You can't call a function without a valid frame.
3079  ASSERT(flag == JUMP_FUNCTION || has_frame());
3080 
3081  Label done;
3082  bool definitely_mismatches = false;
3083  Register dummy = rax;
3084  InvokePrologue(expected,
3085  actual,
3086  code,
3087  dummy,
3088  &done,
3089  &definitely_mismatches,
3090  flag,
3091  Label::kNear,
3092  call_wrapper,
3093  call_kind);
3094  if (!definitely_mismatches) {
3095  if (flag == CALL_FUNCTION) {
3096  call_wrapper.BeforeCall(CallSize(code));
3097  SetCallKind(rcx, call_kind);
3098  Call(code, rmode);
3099  call_wrapper.AfterCall();
3100  } else {
3101  ASSERT(flag == JUMP_FUNCTION);
3102  SetCallKind(rcx, call_kind);
3103  Jump(code, rmode);
3104  }
3105  bind(&done);
3106  }
3107 }
3108 
3109 
3110 void MacroAssembler::InvokeFunction(Register function,
3111  const ParameterCount& actual,
3112  InvokeFlag flag,
3113  const CallWrapper& call_wrapper,
3114  CallKind call_kind) {
3115  // You can't call a function without a valid frame.
3116  ASSERT(flag == JUMP_FUNCTION || has_frame());
3117 
3118  ASSERT(function.is(rdi));
3119  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3120  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3121  movsxlq(rbx,
3122  FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3123  // Advances rdx to the end of the Code object header, to the start of
3124  // the executable code.
3125  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3126 
3127  ParameterCount expected(rbx);
3128  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3129 }
3130 
3131 
3132 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3133  const ParameterCount& actual,
3134  InvokeFlag flag,
3135  const CallWrapper& call_wrapper,
3136  CallKind call_kind) {
3137  // You can't call a function without a valid frame.
3138  ASSERT(flag == JUMP_FUNCTION || has_frame());
3139 
3140  // Get the function and setup the context.
3141  LoadHeapObject(rdi, function);
3142  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3143 
3144  // We call indirectly through the code field in the function to
3145  // allow recompilation to take effect without changing any of the
3146  // call sites.
3147  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3148  ParameterCount expected(function->shared()->formal_parameter_count());
3149  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3150 }
3151 
3152 
3153 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3154  const ParameterCount& actual,
3155  Handle<Code> code_constant,
3156  Register code_register,
3157  Label* done,
3158  bool* definitely_mismatches,
3159  InvokeFlag flag,
3160  Label::Distance near_jump,
3161  const CallWrapper& call_wrapper,
3162  CallKind call_kind) {
3163  bool definitely_matches = false;
3164  *definitely_mismatches = false;
3165  Label invoke;
3166  if (expected.is_immediate()) {
3167  ASSERT(actual.is_immediate());
3168  if (expected.immediate() == actual.immediate()) {
3169  definitely_matches = true;
3170  } else {
3171  Set(rax, actual.immediate());
3172  if (expected.immediate() ==
3173  SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3174  // Don't worry about adapting arguments for built-ins that
3175  // don't want that done. Skip adaption code by making it look
3176  // like we have a match between expected and actual number of
3177  // arguments.
3178  definitely_matches = true;
3179  } else {
3180  *definitely_mismatches = true;
3181  Set(rbx, expected.immediate());
3182  }
3183  }
3184  } else {
3185  if (actual.is_immediate()) {
3186  // Expected is in register, actual is immediate. This is the
3187  // case when we invoke function values without going through the
3188  // IC mechanism.
3189  cmpq(expected.reg(), Immediate(actual.immediate()));
3190  j(equal, &invoke, Label::kNear);
3191  ASSERT(expected.reg().is(rbx));
3192  Set(rax, actual.immediate());
3193  } else if (!expected.reg().is(actual.reg())) {
3194  // Both expected and actual are in (different) registers. This
3195  // is the case when we invoke functions using call and apply.
3196  cmpq(expected.reg(), actual.reg());
3197  j(equal, &invoke, Label::kNear);
3198  ASSERT(actual.reg().is(rax));
3199  ASSERT(expected.reg().is(rbx));
3200  }
3201  }
3202 
3203  if (!definitely_matches) {
3204  Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3205  if (!code_constant.is_null()) {
3206  movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3207  addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3208  } else if (!code_register.is(rdx)) {
3209  movq(rdx, code_register);
3210  }
3211 
3212  if (flag == CALL_FUNCTION) {
3213  call_wrapper.BeforeCall(CallSize(adaptor));
3214  SetCallKind(rcx, call_kind);
3215  Call(adaptor, RelocInfo::CODE_TARGET);
3216  call_wrapper.AfterCall();
3217  if (!*definitely_mismatches) {
3218  jmp(done, near_jump);
3219  }
3220  } else {
3221  SetCallKind(rcx, call_kind);
3222  Jump(adaptor, RelocInfo::CODE_TARGET);
3223  }
3224  bind(&invoke);
3225  }
3226 }
3227 
3228 
3229 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3230  push(rbp);
3231  movq(rbp, rsp);
3232  push(rsi); // Context.
3233  Push(Smi::FromInt(type));
3234  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3235  push(kScratchRegister);
3236  if (emit_debug_code()) {
3237  movq(kScratchRegister,
3238  isolate()->factory()->undefined_value(),
3239  RelocInfo::EMBEDDED_OBJECT);
3240  cmpq(Operand(rsp, 0), kScratchRegister);
3241  Check(not_equal, "code object not properly patched");
3242  }
3243 }
3244 
3245 
3246 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3247  if (emit_debug_code()) {
3248  Move(kScratchRegister, Smi::FromInt(type));
3249  cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3250  Check(equal, "stack frame types must match");
3251  }
3252  movq(rsp, rbp);
3253  pop(rbp);
3254 }
3255 
3256 
3257 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3258  // Set up the frame structure on the stack.
3259  // All constants are relative to the frame pointer of the exit frame.
3260  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
3261  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
3262  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3263  push(rbp);
3264  movq(rbp, rsp);
3265 
3266  // Reserve room for entry stack pointer and push the code object.
3267  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3268  push(Immediate(0)); // Saved entry sp, patched before call.
3269  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3270  push(kScratchRegister); // Accessed from EditFrame::code_slot.
3271 
3272  // Save the frame pointer and the context in top.
3273  if (save_rax) {
3274  movq(r14, rax); // Backup rax in callee-save register.
3275  }
3276 
3277  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3278  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3279 }
3280 
3281 
3282 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3283  bool save_doubles) {
3284 #ifdef _WIN64
3285  const int kShadowSpace = 4;
3286  arg_stack_space += kShadowSpace;
3287 #endif
3288  // Optionally save all XMM registers.
3289  if (save_doubles) {
3290  int space = XMMRegister::kNumRegisters * kDoubleSize +
3291  arg_stack_space * kPointerSize;
3292  subq(rsp, Immediate(space));
3293  int offset = -2 * kPointerSize;
3294  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3295  XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3296  movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3297  }
3298  } else if (arg_stack_space > 0) {
3299  subq(rsp, Immediate(arg_stack_space * kPointerSize));
3300  }
3301 
3302  // Get the required frame alignment for the OS.
3303  const int kFrameAlignment = OS::ActivationFrameAlignment();
3304  if (kFrameAlignment > 0) {
3305  ASSERT(IsPowerOf2(kFrameAlignment));
3306  ASSERT(is_int8(kFrameAlignment));
3307  and_(rsp, Immediate(-kFrameAlignment));
3308  }
3309 
3310  // Patch the saved entry sp.
3311  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3312 }
3313 
3314 
3315 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3316  EnterExitFramePrologue(true);
3317 
3318  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3319  // so it must be retained across the C-call.
3320  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3321  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
3322 
3323  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3324 }
3325 
3326 
3327 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3328  EnterExitFramePrologue(false);
3329  EnterExitFrameEpilogue(arg_stack_space, false);
3330 }
3331 
3332 
3333 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3334  // Registers:
3335  // r15 : argv
3336  if (save_doubles) {
3337  int offset = -2 * kPointerSize;
3338  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3339  XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3340  movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3341  }
3342  }
3343  // Get the return address from the stack and restore the frame pointer.
3344  movq(rcx, Operand(rbp, 1 * kPointerSize));
3345  movq(rbp, Operand(rbp, 0 * kPointerSize));
3346 
3347  // Drop everything up to and including the arguments and the receiver
3348  // from the caller stack.
3349  lea(rsp, Operand(r15, 1 * kPointerSize));
3350 
3351  // Push the return address to get ready to return.
3352  push(rcx);
3353 
3354  LeaveExitFrameEpilogue();
3355 }
3356 
3357 
3358 void MacroAssembler::LeaveApiExitFrame() {
3359  movq(rsp, rbp);
3360  pop(rbp);
3361 
3362  LeaveExitFrameEpilogue();
3363 }
3364 
3365 
3366 void MacroAssembler::LeaveExitFrameEpilogue() {
3367  // Restore current context from top and clear it in debug mode.
3368  ExternalReference context_address(Isolate::kContextAddress, isolate());
3369  Operand context_operand = ExternalOperand(context_address);
3370  movq(rsi, context_operand);
3371 #ifdef DEBUG
3372  movq(context_operand, Immediate(0));
3373 #endif
3374 
3375  // Clear the top frame.
3376  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3377  isolate());
3378  Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3379  movq(c_entry_fp_operand, Immediate(0));
3380 }
3381 
3382 
3383 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3384  Register scratch,
3385  Label* miss) {
3386  Label same_contexts;
3387 
3388  ASSERT(!holder_reg.is(scratch));
3389  ASSERT(!scratch.is(kScratchRegister));
3390  // Load current lexical context from the stack frame.
3391  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3392 
3393  // When generating debug code, make sure the lexical context is set.
3394  if (emit_debug_code()) {
3395  cmpq(scratch, Immediate(0));
3396  Check(not_equal, "we should not have an empty lexical context");
3397  }
3398  // Load the global context of the current context.
3399  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
3400  movq(scratch, FieldOperand(scratch, offset));
3401  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
3402 
3403  // Check the context is a global context.
3404  if (emit_debug_code()) {
3405  Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3406  isolate()->factory()->global_context_map());
3407  Check(equal, "JSGlobalObject::global_context should be a global context.");
3408  }
3409 
3410  // Check if both contexts are the same.
3411  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3412  j(equal, &same_contexts);
3413 
3414  // Compare security tokens.
3415  // Check that the security token in the calling global object is
3416  // compatible with the security token in the receiving global
3417  // object.
3418 
3419  // Check the context is a global context.
3420  if (emit_debug_code()) {
3421  // Preserve original value of holder_reg.
3422  push(holder_reg);
3423  movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3424  CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3425  Check(not_equal, "JSGlobalProxy::context() should not be null.");
3426 
3427  // Read the first word and compare to global_context_map(),
3428  movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3429  CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
3430  Check(equal, "JSGlobalObject::global_context should be a global context.");
3431  pop(holder_reg);
3432  }
3433 
3434  movq(kScratchRegister,
3435  FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
3436  int token_offset =
3437  Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3438  movq(scratch, FieldOperand(scratch, token_offset));
3439  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
3440  j(not_equal, miss);
3441 
3442  bind(&same_contexts);
3443 }
3444 
3445 
3446 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3447  // First of all we assign the hash seed to scratch.
3448  LoadRoot(scratch, Heap::kHashSeedRootIndex);
3449  SmiToInteger32(scratch, scratch);
3450 
3451  // Xor original key with a seed.
3452  xorl(r0, scratch);
3453 
3454  // Compute the hash code from the untagged key. This must be kept in sync
3455  // with ComputeIntegerHash in utils.h.
3456  //
3457  // hash = ~hash + (hash << 15);
3458  movl(scratch, r0);
3459  notl(r0);
3460  shll(scratch, Immediate(15));
3461  addl(r0, scratch);
3462  // hash = hash ^ (hash >> 12);
3463  movl(scratch, r0);
3464  shrl(scratch, Immediate(12));
3465  xorl(r0, scratch);
3466  // hash = hash + (hash << 2);
3467  leal(r0, Operand(r0, r0, times_4, 0));
3468  // hash = hash ^ (hash >> 4);
3469  movl(scratch, r0);
3470  shrl(scratch, Immediate(4));
3471  xorl(r0, scratch);
3472  // hash = hash * 2057;
3473  imull(r0, r0, Immediate(2057));
3474  // hash = hash ^ (hash >> 16);
3475  movl(scratch, r0);
3476  shrl(scratch, Immediate(16));
3477  xorl(r0, scratch);
3478 }
3479 
3480 
3481 
3482 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3483  Register elements,
3484  Register key,
3485  Register r0,
3486  Register r1,
3487  Register r2,
3488  Register result) {
3489  // Register use:
3490  //
3491  // elements - holds the slow-case elements of the receiver on entry.
3492  // Unchanged unless 'result' is the same register.
3493  //
3494  // key - holds the smi key on entry.
3495  // Unchanged unless 'result' is the same register.
3496  //
3497  // Scratch registers:
3498  //
3499  // r0 - holds the untagged key on entry and holds the hash once computed.
3500  //
3501  // r1 - used to hold the capacity mask of the dictionary
3502  //
3503  // r2 - used for the index into the dictionary.
3504  //
3505  // result - holds the result on exit if the load succeeded.
3506  // Allowed to be the same as 'key' or 'result'.
3507  // Unchanged on bailout so 'key' or 'result' can be used
3508  // in further computation.
3509 
3510  Label done;
3511 
3512  GetNumberHash(r0, r1);
3513 
3514  // Compute capacity mask.
3515  SmiToInteger32(r1, FieldOperand(elements,
3516  SeededNumberDictionary::kCapacityOffset));
3517  decl(r1);
3518 
3519  // Generate an unrolled loop that performs a few probes before giving up.
3520  const int kProbes = 4;
3521  for (int i = 0; i < kProbes; i++) {
3522  // Use r2 for index calculations and keep the hash intact in r0.
3523  movq(r2, r0);
3524  // Compute the masked index: (hash + i + i * i) & mask.
3525  if (i > 0) {
3526  addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
3527  }
3528  and_(r2, r1);
3529 
3530  // Scale the index by multiplying by the entry size.
3531  ASSERT(SeededNumberDictionary::kEntrySize == 3);
3532  lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
3533 
3534  // Check if the key matches.
3535  cmpq(key, FieldOperand(elements,
3536  r2,
3538  SeededNumberDictionary::kElementsStartOffset));
3539  if (i != (kProbes - 1)) {
3540  j(equal, &done);
3541  } else {
3542  j(not_equal, miss);
3543  }
3544  }
3545 
3546  bind(&done);
3547  // Check that the value is a normal propety.
3548  const int kDetailsOffset =
3549  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3550  ASSERT_EQ(NORMAL, 0);
3551  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
3552  Smi::FromInt(PropertyDetails::TypeField::kMask));
3553  j(not_zero, miss);
3554 
3555  // Get the value at the masked, scaled index.
3556  const int kValueOffset =
3557  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3558  movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
3559 }
3560 
3561 
3562 void MacroAssembler::LoadAllocationTopHelper(Register result,
3563  Register scratch,
3565  ExternalReference new_space_allocation_top =
3566  ExternalReference::new_space_allocation_top_address(isolate());
3567 
3568  // Just return if allocation top is already known.
3569  if ((flags & RESULT_CONTAINS_TOP) != 0) {
3570  // No use of scratch if allocation top is provided.
3571  ASSERT(!scratch.is_valid());
3572 #ifdef DEBUG
3573  // Assert that result actually contains top on entry.
3574  Operand top_operand = ExternalOperand(new_space_allocation_top);
3575  cmpq(result, top_operand);
3576  Check(equal, "Unexpected allocation top");
3577 #endif
3578  return;
3579  }
3580 
3581  // Move address of new object to result. Use scratch register if available,
3582  // and keep address in scratch until call to UpdateAllocationTopHelper.
3583  if (scratch.is_valid()) {
3584  LoadAddress(scratch, new_space_allocation_top);
3585  movq(result, Operand(scratch, 0));
3586  } else {
3587  Load(result, new_space_allocation_top);
3588  }
3589 }
3590 
3591 
3592 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
3593  Register scratch) {
3594  if (emit_debug_code()) {
3595  testq(result_end, Immediate(kObjectAlignmentMask));
3596  Check(zero, "Unaligned allocation in new space");
3597  }
3598 
3599  ExternalReference new_space_allocation_top =
3600  ExternalReference::new_space_allocation_top_address(isolate());
3601 
3602  // Update new top.
3603  if (scratch.is_valid()) {
3604  // Scratch already contains address of allocation top.
3605  movq(Operand(scratch, 0), result_end);
3606  } else {
3607  Store(new_space_allocation_top, result_end);
3608  }
3609 }
3610 
3611 
3612 void MacroAssembler::AllocateInNewSpace(int object_size,
3613  Register result,
3614  Register result_end,
3615  Register scratch,
3616  Label* gc_required,
3617  AllocationFlags flags) {
3618  if (!FLAG_inline_new) {
3619  if (emit_debug_code()) {
3620  // Trash the registers to simulate an allocation failure.
3621  movl(result, Immediate(0x7091));
3622  if (result_end.is_valid()) {
3623  movl(result_end, Immediate(0x7191));
3624  }
3625  if (scratch.is_valid()) {
3626  movl(scratch, Immediate(0x7291));
3627  }
3628  }
3629  jmp(gc_required);
3630  return;
3631  }
3632  ASSERT(!result.is(result_end));
3633 
3634  // Load address of new object into result.
3635  LoadAllocationTopHelper(result, scratch, flags);
3636 
3637  // Calculate new top and bail out if new space is exhausted.
3638  ExternalReference new_space_allocation_limit =
3639  ExternalReference::new_space_allocation_limit_address(isolate());
3640 
3641  Register top_reg = result_end.is_valid() ? result_end : result;
3642 
3643  if (!top_reg.is(result)) {
3644  movq(top_reg, result);
3645  }
3646  addq(top_reg, Immediate(object_size));
3647  j(carry, gc_required);
3648  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3649  cmpq(top_reg, limit_operand);
3650  j(above, gc_required);
3651 
3652  // Update allocation top.
3653  UpdateAllocationTopHelper(top_reg, scratch);
3654 
3655  if (top_reg.is(result)) {
3656  if ((flags & TAG_OBJECT) != 0) {
3657  subq(result, Immediate(object_size - kHeapObjectTag));
3658  } else {
3659  subq(result, Immediate(object_size));
3660  }
3661  } else if ((flags & TAG_OBJECT) != 0) {
3662  // Tag the result if requested.
3663  addq(result, Immediate(kHeapObjectTag));
3664  }
3665 }
3666 
3667 
3668 void MacroAssembler::AllocateInNewSpace(int header_size,
3669  ScaleFactor element_size,
3670  Register element_count,
3671  Register result,
3672  Register result_end,
3673  Register scratch,
3674  Label* gc_required,
3675  AllocationFlags flags) {
3676  if (!FLAG_inline_new) {
3677  if (emit_debug_code()) {
3678  // Trash the registers to simulate an allocation failure.
3679  movl(result, Immediate(0x7091));
3680  movl(result_end, Immediate(0x7191));
3681  if (scratch.is_valid()) {
3682  movl(scratch, Immediate(0x7291));
3683  }
3684  // Register element_count is not modified by the function.
3685  }
3686  jmp(gc_required);
3687  return;
3688  }
3689  ASSERT(!result.is(result_end));
3690 
3691  // Load address of new object into result.
3692  LoadAllocationTopHelper(result, scratch, flags);
3693 
3694  // Calculate new top and bail out if new space is exhausted.
3695  ExternalReference new_space_allocation_limit =
3696  ExternalReference::new_space_allocation_limit_address(isolate());
3697 
3698  // We assume that element_count*element_size + header_size does not
3699  // overflow.
3700  lea(result_end, Operand(element_count, element_size, header_size));
3701  addq(result_end, result);
3702  j(carry, gc_required);
3703  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3704  cmpq(result_end, limit_operand);
3705  j(above, gc_required);
3706 
3707  // Update allocation top.
3708  UpdateAllocationTopHelper(result_end, scratch);
3709 
3710  // Tag the result if requested.
3711  if ((flags & TAG_OBJECT) != 0) {
3712  addq(result, Immediate(kHeapObjectTag));
3713  }
3714 }
3715 
3716 
3717 void MacroAssembler::AllocateInNewSpace(Register object_size,
3718  Register result,
3719  Register result_end,
3720  Register scratch,
3721  Label* gc_required,
3722  AllocationFlags flags) {
3723  if (!FLAG_inline_new) {
3724  if (emit_debug_code()) {
3725  // Trash the registers to simulate an allocation failure.
3726  movl(result, Immediate(0x7091));
3727  movl(result_end, Immediate(0x7191));
3728  if (scratch.is_valid()) {
3729  movl(scratch, Immediate(0x7291));
3730  }
3731  // object_size is left unchanged by this function.
3732  }
3733  jmp(gc_required);
3734  return;
3735  }
3736  ASSERT(!result.is(result_end));
3737 
3738  // Load address of new object into result.
3739  LoadAllocationTopHelper(result, scratch, flags);
3740 
3741  // Calculate new top and bail out if new space is exhausted.
3742  ExternalReference new_space_allocation_limit =
3743  ExternalReference::new_space_allocation_limit_address(isolate());
3744  if (!object_size.is(result_end)) {
3745  movq(result_end, object_size);
3746  }
3747  addq(result_end, result);
3748  j(carry, gc_required);
3749  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3750  cmpq(result_end, limit_operand);
3751  j(above, gc_required);
3752 
3753  // Update allocation top.
3754  UpdateAllocationTopHelper(result_end, scratch);
3755 
3756  // Tag the result if requested.
3757  if ((flags & TAG_OBJECT) != 0) {
3758  addq(result, Immediate(kHeapObjectTag));
3759  }
3760 }
3761 
3762 
3763 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
3764  ExternalReference new_space_allocation_top =
3765  ExternalReference::new_space_allocation_top_address(isolate());
3766 
3767  // Make sure the object has no tag before resetting top.
3768  and_(object, Immediate(~kHeapObjectTagMask));
3769  Operand top_operand = ExternalOperand(new_space_allocation_top);
3770 #ifdef DEBUG
3771  cmpq(object, top_operand);
3772  Check(below, "Undo allocation of non allocated memory");
3773 #endif
3774  movq(top_operand, object);
3775 }
3776 
3777 
3778 void MacroAssembler::AllocateHeapNumber(Register result,
3779  Register scratch,
3780  Label* gc_required) {
3781  // Allocate heap number in new space.
3782  AllocateInNewSpace(HeapNumber::kSize,
3783  result,
3784  scratch,
3785  no_reg,
3786  gc_required,
3787  TAG_OBJECT);
3788 
3789  // Set the map.
3790  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
3791  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3792 }
3793 
3794 
3795 void MacroAssembler::AllocateTwoByteString(Register result,
3796  Register length,
3797  Register scratch1,
3798  Register scratch2,
3799  Register scratch3,
3800  Label* gc_required) {
3801  // Calculate the number of bytes needed for the characters in the string while
3802  // observing object alignment.
3803  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
3805  ASSERT(kShortSize == 2);
3806  // scratch1 = length * 2 + kObjectAlignmentMask.
3807  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
3808  kHeaderAlignment));
3809  and_(scratch1, Immediate(~kObjectAlignmentMask));
3810  if (kHeaderAlignment > 0) {
3811  subq(scratch1, Immediate(kHeaderAlignment));
3812  }
3813 
3814  // Allocate two byte string in new space.
3815  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
3816  times_1,
3817  scratch1,
3818  result,
3819  scratch2,
3820  scratch3,
3821  gc_required,
3822  TAG_OBJECT);
3823 
3824  // Set the map, length and hash field.
3825  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
3826  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3827  Integer32ToSmi(scratch1, length);
3828  movq(FieldOperand(result, String::kLengthOffset), scratch1);
3829  movq(FieldOperand(result, String::kHashFieldOffset),
3830  Immediate(String::kEmptyHashField));
3831 }
3832 
3833 
3834 void MacroAssembler::AllocateAsciiString(Register result,
3835  Register length,
3836  Register scratch1,
3837  Register scratch2,
3838  Register scratch3,
3839  Label* gc_required) {
3840  // Calculate the number of bytes needed for the characters in the string while
3841  // observing object alignment.
3842  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
3844  movl(scratch1, length);
3845  ASSERT(kCharSize == 1);
3846  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
3847  and_(scratch1, Immediate(~kObjectAlignmentMask));
3848  if (kHeaderAlignment > 0) {
3849  subq(scratch1, Immediate(kHeaderAlignment));
3850  }
3851 
3852  // Allocate ASCII string in new space.
3853  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
3854  times_1,
3855  scratch1,
3856  result,
3857  scratch2,
3858  scratch3,
3859  gc_required,
3860  TAG_OBJECT);
3861 
3862  // Set the map, length and hash field.
3863  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
3864  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3865  Integer32ToSmi(scratch1, length);
3866  movq(FieldOperand(result, String::kLengthOffset), scratch1);
3867  movq(FieldOperand(result, String::kHashFieldOffset),
3868  Immediate(String::kEmptyHashField));
3869 }
3870 
3871 
3872 void MacroAssembler::AllocateTwoByteConsString(Register result,
3873  Register scratch1,
3874  Register scratch2,
3875  Label* gc_required) {
3876  // Allocate heap number in new space.
3877  AllocateInNewSpace(ConsString::kSize,
3878  result,
3879  scratch1,
3880  scratch2,
3881  gc_required,
3882  TAG_OBJECT);
3883 
3884  // Set the map. The other fields are left uninitialized.
3885  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
3886  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3887 }
3888 
3889 
3890 void MacroAssembler::AllocateAsciiConsString(Register result,
3891  Register scratch1,
3892  Register scratch2,
3893  Label* gc_required) {
3894  // Allocate heap number in new space.
3895  AllocateInNewSpace(ConsString::kSize,
3896  result,
3897  scratch1,
3898  scratch2,
3899  gc_required,
3900  TAG_OBJECT);
3901 
3902  // Set the map. The other fields are left uninitialized.
3903  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
3904  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3905 }
3906 
3907 
3908 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3909  Register scratch1,
3910  Register scratch2,
3911  Label* gc_required) {
3912  // Allocate heap number in new space.
3913  AllocateInNewSpace(SlicedString::kSize,
3914  result,
3915  scratch1,
3916  scratch2,
3917  gc_required,
3918  TAG_OBJECT);
3919 
3920  // Set the map. The other fields are left uninitialized.
3921  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
3922  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3923 }
3924 
3925 
3926 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3927  Register scratch1,
3928  Register scratch2,
3929  Label* gc_required) {
3930  // Allocate heap number in new space.
3931  AllocateInNewSpace(SlicedString::kSize,
3932  result,
3933  scratch1,
3934  scratch2,
3935  gc_required,
3936  TAG_OBJECT);
3937 
3938  // Set the map. The other fields are left uninitialized.
3939  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
3940  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3941 }
3942 
3943 
3944 // Copy memory, byte-by-byte, from source to destination. Not optimized for
3945 // long or aligned copies. The contents of scratch and length are destroyed.
3946 // Destination is incremented by length, source, length and scratch are
3947 // clobbered.
3948 // A simpler loop is faster on small copies, but slower on large ones.
3949 // The cld() instruction must have been emitted, to set the direction flag(),
3950 // before calling this function.
3951 void MacroAssembler::CopyBytes(Register destination,
3952  Register source,
3953  Register length,
3954  int min_length,
3955  Register scratch) {
3956  ASSERT(min_length >= 0);
3957  if (FLAG_debug_code) {
3958  cmpl(length, Immediate(min_length));
3959  Assert(greater_equal, "Invalid min_length");
3960  }
3961  Label loop, done, short_string, short_loop;
3962 
3963  const int kLongStringLimit = 20;
3964  if (min_length <= kLongStringLimit) {
3965  cmpl(length, Immediate(kLongStringLimit));
3966  j(less_equal, &short_string);
3967  }
3968 
3969  ASSERT(source.is(rsi));
3970  ASSERT(destination.is(rdi));
3971  ASSERT(length.is(rcx));
3972 
3973  // Because source is 8-byte aligned in our uses of this function,
3974  // we keep source aligned for the rep movs operation by copying the odd bytes
3975  // at the end of the ranges.
3976  movq(scratch, length);
3977  shrl(length, Immediate(3));
3978  repmovsq();
3979  // Move remaining bytes of length.
3980  andl(scratch, Immediate(0x7));
3981  movq(length, Operand(source, scratch, times_1, -8));
3982  movq(Operand(destination, scratch, times_1, -8), length);
3983  addq(destination, scratch);
3984 
3985  if (min_length <= kLongStringLimit) {
3986  jmp(&done);
3987 
3988  bind(&short_string);
3989  if (min_length == 0) {
3990  testl(length, length);
3991  j(zero, &done);
3992  }
3993  lea(scratch, Operand(destination, length, times_1, 0));
3994 
3995  bind(&short_loop);
3996  movb(length, Operand(source, 0));
3997  movb(Operand(destination, 0), length);
3998  incq(source);
3999  incq(destination);
4000  cmpq(destination, scratch);
4001  j(not_equal, &short_loop);
4002 
4003  bind(&done);
4004  }
4005 }
4006 
4007 
4008 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4009  Register end_offset,
4010  Register filler) {
4011  Label loop, entry;
4012  jmp(&entry);
4013  bind(&loop);
4014  movq(Operand(start_offset, 0), filler);
4015  addq(start_offset, Immediate(kPointerSize));
4016  bind(&entry);
4017  cmpq(start_offset, end_offset);
4018  j(less, &loop);
4019 }
4020 
4021 
4022 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4023  if (context_chain_length > 0) {
4024  // Move up the chain of contexts to the context containing the slot.
4025  movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4026  for (int i = 1; i < context_chain_length; i++) {
4027  movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4028  }
4029  } else {
4030  // Slot is in the current function context. Move it into the
4031  // destination register in case we store into it (the write barrier
4032  // cannot be allowed to destroy the context in rsi).
4033  movq(dst, rsi);
4034  }
4035 
4036  // We should not have found a with context by walking the context
4037  // chain (i.e., the static scope chain and runtime context chain do
4038  // not agree). A variable occurring in such a scope should have
4039  // slot type LOOKUP and not CONTEXT.
4040  if (emit_debug_code()) {
4041  CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4042  Heap::kWithContextMapRootIndex);
4043  Check(not_equal, "Variable resolved to with context.");
4044  }
4045 }
4046 
4047 
4048 void MacroAssembler::LoadTransitionedArrayMapConditional(
4049  ElementsKind expected_kind,
4050  ElementsKind transitioned_kind,
4051  Register map_in_out,
4052  Register scratch,
4053  Label* no_map_match) {
4054  // Load the global or builtins object from the current context.
4055  movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
4056  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
4057 
4058  // Check that the function's map is the same as the expected cached map.
4059  movq(scratch, Operand(scratch,
4060  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4061 
4062  int offset = expected_kind * kPointerSize +
4063  FixedArrayBase::kHeaderSize;
4064  cmpq(map_in_out, FieldOperand(scratch, offset));
4065  j(not_equal, no_map_match);
4066 
4067  // Use the transitioned cached map.
4068  offset = transitioned_kind * kPointerSize +
4069  FixedArrayBase::kHeaderSize;
4070  movq(map_in_out, FieldOperand(scratch, offset));
4071 }
4072 
4073 
4074 void MacroAssembler::LoadInitialArrayMap(
4075  Register function_in, Register scratch,
4076  Register map_out, bool can_have_holes) {
4077  ASSERT(!function_in.is(map_out));
4078  Label done;
4079  movq(map_out, FieldOperand(function_in,
4080  JSFunction::kPrototypeOrInitialMapOffset));
4081  if (!FLAG_smi_only_arrays) {
4082  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4083  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4084  kind,
4085  map_out,
4086  scratch,
4087  &done);
4088  } else if (can_have_holes) {
4089  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4091  map_out,
4092  scratch,
4093  &done);
4094  }
4095  bind(&done);
4096 }
4097 
4098 #ifdef _WIN64
4099 static const int kRegisterPassedArguments = 4;
4100 #else
4101 static const int kRegisterPassedArguments = 6;
4102 #endif
4103 
4104 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4105  // Load the global or builtins object from the current context.
4106  movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
4107  // Load the global context from the global or builtins object.
4108  movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
4109  // Load the function from the global context.
4110  movq(function, Operand(function, Context::SlotOffset(index)));
4111 }
4112 
4113 
4114 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4115  Register map) {
4116  // Load the initial map. The global functions all have initial maps.
4117  movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4118  if (emit_debug_code()) {
4119  Label ok, fail;
4120  CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4121  jmp(&ok);
4122  bind(&fail);
4123  Abort("Global functions must have initial map");
4124  bind(&ok);
4125  }
4126 }
4127 
4128 
4129 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4130  // On Windows 64 stack slots are reserved by the caller for all arguments
4131  // including the ones passed in registers, and space is always allocated for
4132  // the four register arguments even if the function takes fewer than four
4133  // arguments.
4134  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4135  // and the caller does not reserve stack slots for them.
4136  ASSERT(num_arguments >= 0);
4137 #ifdef _WIN64
4138  const int kMinimumStackSlots = kRegisterPassedArguments;
4139  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4140  return num_arguments;
4141 #else
4142  if (num_arguments < kRegisterPassedArguments) return 0;
4143  return num_arguments - kRegisterPassedArguments;
4144 #endif
4145 }
4146 
4147 
4148 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4149  int frame_alignment = OS::ActivationFrameAlignment();
4150  ASSERT(frame_alignment != 0);
4151  ASSERT(num_arguments >= 0);
4152 
4153  // Make stack end at alignment and allocate space for arguments and old rsp.
4154  movq(kScratchRegister, rsp);
4155  ASSERT(IsPowerOf2(frame_alignment));
4156  int argument_slots_on_stack =
4157  ArgumentStackSlotsForCFunctionCall(num_arguments);
4158  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
4159  and_(rsp, Immediate(-frame_alignment));
4160  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
4161 }
4162 
4163 
4164 void MacroAssembler::CallCFunction(ExternalReference function,
4165  int num_arguments) {
4166  LoadAddress(rax, function);
4167  CallCFunction(rax, num_arguments);
4168 }
4169 
4170 
4171 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4172  ASSERT(has_frame());
4173  // Check stack alignment.
4174  if (emit_debug_code()) {
4175  CheckStackAlignment();
4176  }
4177 
4178  call(function);
4179  ASSERT(OS::ActivationFrameAlignment() != 0);
4180  ASSERT(num_arguments >= 0);
4181  int argument_slots_on_stack =
4182  ArgumentStackSlotsForCFunctionCall(num_arguments);
4183  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
4184 }
4185 
4186 
4187 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4188  if (r1.is(r2)) return true;
4189  if (r1.is(r3)) return true;
4190  if (r1.is(r4)) return true;
4191  if (r2.is(r3)) return true;
4192  if (r2.is(r4)) return true;
4193  if (r3.is(r4)) return true;
4194  return false;
4195 }
4196 
4197 
4198 CodePatcher::CodePatcher(byte* address, int size)
4199  : address_(address),
4200  size_(size),
4201  masm_(NULL, address, size + Assembler::kGap) {
4202  // Create a new macro assembler pointing to the address of the code to patch.
4203  // The size is adjusted with kGap on order for the assembler to generate size
4204  // bytes of instructions without failing with buffer size constraints.
4205  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4206 }
4207 
4208 
4209 CodePatcher::~CodePatcher() {
4210  // Indicate that code has changed.
4211  CPU::FlushICache(address_, size_);
4212 
4213  // Check that the code was patched as expected.
4214  ASSERT(masm_.pc_ == address_ + size_);
4215  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4216 }
4217 
4218 
4219 void MacroAssembler::CheckPageFlag(
4220  Register object,
4221  Register scratch,
4222  int mask,
4223  Condition cc,
4224  Label* condition_met,
4225  Label::Distance condition_met_distance) {
4226  ASSERT(cc == zero || cc == not_zero);
4227  if (scratch.is(object)) {
4228  and_(scratch, Immediate(~Page::kPageAlignmentMask));
4229  } else {
4230  movq(scratch, Immediate(~Page::kPageAlignmentMask));
4231  and_(scratch, object);
4232  }
4233  if (mask < (1 << kBitsPerByte)) {
4234  testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4235  Immediate(static_cast<uint8_t>(mask)));
4236  } else {
4237  testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4238  }
4239  j(cc, condition_met, condition_met_distance);
4240 }
4241 
4242 
4243 void MacroAssembler::JumpIfBlack(Register object,
4244  Register bitmap_scratch,
4245  Register mask_scratch,
4246  Label* on_black,
4247  Label::Distance on_black_distance) {
4248  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4249  GetMarkBits(object, bitmap_scratch, mask_scratch);
4250 
4251  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4252  // The mask_scratch register contains a 1 at the position of the first bit
4253  // and a 0 at all other positions, including the position of the second bit.
4254  movq(rcx, mask_scratch);
4255  // Make rcx into a mask that covers both marking bits using the operation
4256  // rcx = mask | (mask << 1).
4257  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4258  // Note that we are using a 4-byte aligned 8-byte load.
4259  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4260  cmpq(mask_scratch, rcx);
4261  j(equal, on_black, on_black_distance);
4262 }
4263 
4264 
4265 // Detect some, but not all, common pointer-free objects. This is used by the
4266 // incremental write barrier which doesn't care about oddballs (they are always
4267 // marked black immediately so this code is not hit).
4268 void MacroAssembler::JumpIfDataObject(
4269  Register value,
4270  Register scratch,
4271  Label* not_data_object,
4272  Label::Distance not_data_object_distance) {
4273  Label is_data_object;
4274  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4275  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4276  j(equal, &is_data_object, Label::kNear);
4278  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4279  // If it's a string and it's not a cons string then it's an object containing
4280  // no GC pointers.
4281  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4283  j(not_zero, not_data_object, not_data_object_distance);
4284  bind(&is_data_object);
4285 }
4286 
4287 
4288 void MacroAssembler::GetMarkBits(Register addr_reg,
4289  Register bitmap_reg,
4290  Register mask_reg) {
4291  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4292  movq(bitmap_reg, addr_reg);
4293  // Sign extended 32 bit immediate.
4294  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4295  movq(rcx, addr_reg);
4296  int shift =
4297  Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4298  shrl(rcx, Immediate(shift));
4299  and_(rcx,
4300  Immediate((Page::kPageAlignmentMask >> shift) &
4301  ~(Bitmap::kBytesPerCell - 1)));
4302 
4303  addq(bitmap_reg, rcx);
4304  movq(rcx, addr_reg);
4305  shrl(rcx, Immediate(kPointerSizeLog2));
4306  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4307  movl(mask_reg, Immediate(1));
4308  shl_cl(mask_reg);
4309 }
4310 
4311 
4312 void MacroAssembler::EnsureNotWhite(
4313  Register value,
4314  Register bitmap_scratch,
4315  Register mask_scratch,
4316  Label* value_is_white_and_not_data,
4317  Label::Distance distance) {
4318  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4319  GetMarkBits(value, bitmap_scratch, mask_scratch);
4320 
4321  // If the value is black or grey we don't need to do anything.
4322  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4323  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4324  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4325  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4326 
4327  Label done;
4328 
4329  // Since both black and grey have a 1 in the first position and white does
4330  // not have a 1 there we only need to check one bit.
4331  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4332  j(not_zero, &done, Label::kNear);
4333 
4334  if (FLAG_debug_code) {
4335  // Check for impossible bit pattern.
4336  Label ok;
4337  push(mask_scratch);
4338  // shl. May overflow making the check conservative.
4339  addq(mask_scratch, mask_scratch);
4340  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4341  j(zero, &ok, Label::kNear);
4342  int3();
4343  bind(&ok);
4344  pop(mask_scratch);
4345  }
4346 
4347  // Value is white. We check whether it is data that doesn't need scanning.
4348  // Currently only checks for HeapNumber and non-cons strings.
4349  Register map = rcx; // Holds map while checking type.
4350  Register length = rcx; // Holds length of object after checking type.
4351  Label not_heap_number;
4352  Label is_data_object;
4353 
4354  // Check for heap-number
4355  movq(map, FieldOperand(value, HeapObject::kMapOffset));
4356  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4357  j(not_equal, &not_heap_number, Label::kNear);
4358  movq(length, Immediate(HeapNumber::kSize));
4359  jmp(&is_data_object, Label::kNear);
4360 
4361  bind(&not_heap_number);
4362  // Check for strings.
4364  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4365  // If it's a string and it's not a cons string then it's an object containing
4366  // no GC pointers.
4367  Register instance_type = rcx;
4368  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4369  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4370  j(not_zero, value_is_white_and_not_data);
4371  // It's a non-indirect (non-cons and non-slice) string.
4372  // If it's external, the length is just ExternalString::kSize.
4373  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4374  Label not_external;
4375  // External strings are the only ones with the kExternalStringTag bit
4376  // set.
4379  testb(instance_type, Immediate(kExternalStringTag));
4380  j(zero, &not_external, Label::kNear);
4381  movq(length, Immediate(ExternalString::kSize));
4382  jmp(&is_data_object, Label::kNear);
4383 
4384  bind(&not_external);
4385  // Sequential string, either ASCII or UC16.
4386  ASSERT(kAsciiStringTag == 0x04);
4387  and_(length, Immediate(kStringEncodingMask));
4388  xor_(length, Immediate(kStringEncodingMask));
4389  addq(length, Immediate(0x04));
4390  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4391  imul(length, FieldOperand(value, String::kLengthOffset));
4392  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4393  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4394  and_(length, Immediate(~kObjectAlignmentMask));
4395 
4396  bind(&is_data_object);
4397  // Value is a data object, and it is white. Mark it black. Since we know
4398  // that the object is white we can make it black by flipping one bit.
4399  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4400 
4401  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4402  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4403 
4404  bind(&done);
4405 }
4406 
4407 
4408 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4409  Label next;
4410  Register empty_fixed_array_value = r8;
4411  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4412  Register empty_descriptor_array_value = r9;
4413  LoadRoot(empty_descriptor_array_value,
4414  Heap::kEmptyDescriptorArrayRootIndex);
4415  movq(rcx, rax);
4416  bind(&next);
4417 
4418  // Check that there are no elements. Register rcx contains the
4419  // current JS object we've reached through the prototype chain.
4420  cmpq(empty_fixed_array_value,
4421  FieldOperand(rcx, JSObject::kElementsOffset));
4422  j(not_equal, call_runtime);
4423 
4424  // Check that instance descriptors are not empty so that we can
4425  // check for an enum cache. Leave the map in rbx for the subsequent
4426  // prototype load.
4427  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4428  movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
4429  JumpIfSmi(rdx, call_runtime);
4430 
4431  // Check that there is an enum cache in the non-empty instance
4432  // descriptors (rdx). This is the case if the next enumeration
4433  // index field does not contain a smi.
4434  movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
4435  JumpIfSmi(rdx, call_runtime);
4436 
4437  // For all objects but the receiver, check that the cache is empty.
4438  Label check_prototype;
4439  cmpq(rcx, rax);
4440  j(equal, &check_prototype, Label::kNear);
4441  movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
4442  cmpq(rdx, empty_fixed_array_value);
4443  j(not_equal, call_runtime);
4444 
4445  // Load the prototype from the map and loop if non-null.
4446  bind(&check_prototype);
4447  movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
4448  cmpq(rcx, null_value);
4449  j(not_equal, &next);
4450 }
4451 
4452 
4453 } } // namespace v8::internal
4454 
4455 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:172
const Register rdx
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:3855
#define CHECK_EQ(expected, value)
Definition: checks.h:219
const Register r14
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:478
const Register r3
const Register r11
const int kNumRegisters
Definition: constants-arm.h:95
const Register rbp
#define ASSERT_NOT_NULL(p)
Definition: checks.h:285
const Register rsi
Flag flags[]
Definition: flags.cc:1467
bool is_int8(int x)
Definition: assembler.h:830
const int kNumSafepointSavedRegisters
Definition: frames-arm.h:98
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:246
bool is_uint6(int x)
Definition: assembler.h:843
const uint32_t kStringRepresentationMask
Definition: objects.h:455
const Register r2
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:3850
const int kIntSize
Definition: globals.h:231
const Register kRootRegister
const uint32_t kNotStringTag
Definition: objects.h:438
#define UNREACHABLE()
Definition: checks.h:50
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:232
const uint32_t kIsIndirectStringMask
Definition: objects.h:462
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
const Register r9
const int kPointerSize
Definition: globals.h:234
Operand FieldOperand(Register object, int offset)
const Address kZapValue
Definition: v8globals.h:89
const int kHeapObjectTag
Definition: v8.h:3848
bool IsAligned(T value, U alignment)
Definition: utils.h:206
bool is_uint5(int x)
Definition: assembler.h:842
const Register rbx
bool is_uint16(int x)
Definition: assembler.h:847
const uint32_t kHoleNanLower32
Definition: v8globals.h:477
const Register rsp
Operand StackSpaceOperand(int index)
const Register rax
const Register rdi
const int kBitsPerByte
Definition: globals.h:251
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:50
int TenToThe(int exponent)
Definition: utils.h:794
const int kRootRegisterBias
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:437
bool is_int32(int64_t x)
Definition: assembler-x64.h:53
InvokeFlag
const uint32_t kIsNotStringMask
Definition: objects.h:436
const Register r1
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const Register kScratchRegister
#define UNIMPLEMENTED()
Definition: checks.h:48
v8::Handle< v8::Value > Load(const v8::Arguments &args)
Definition: shell.cc:159
const int kSmiShiftSize
Definition: v8.h:3899
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kSmiTagSize
Definition: v8.h:3854
const Register r8
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kThisPropertyAssignmentsOffset flag
Definition: objects-inl.h:3682
const Register rcx
#define HEAP
Definition: isolate.h:1408
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const int kShortSize
Definition: globals.h:230
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping true
Definition: flags.cc:157
const int kSmiTag
Definition: v8.h:3853
const int kSmiConstantRegisterValue
const uint32_t kIsIndirectStringTag
Definition: objects.h:463
const Register r10
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
const Register kSmiConstantRegister
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:451
const int kCharSize
Definition: globals.h:229
const Register r15
FlagType type() const
Definition: flags.cc:1358
const uint32_t kStringEncodingMask
Definition: objects.h:449
const Register r4