v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "assembler-x64.h"
35 #include "macro-assembler-x64.h"
36 #include "serialize.h"
37 #include "debug.h"
38 #include "heap.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
44  : Assembler(arg_isolate, buffer, size),
45  generating_stub_(false),
46  allow_stub_calls_(true),
47  has_frame_(false),
48  root_array_available_(true) {
49  if (isolate() != NULL) {
50  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51  isolate());
52  }
53 }
54 
55 
56 static const int kInvalidRootRegisterDelta = -1;
57 
58 
59 intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
60  if (predictable_code_size() &&
61  (other.address() < reinterpret_cast<Address>(isolate()) ||
62  other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
63  return kInvalidRootRegisterDelta;
64  }
65  Address roots_register_value = kRootRegisterBias +
66  reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
67  intptr_t delta = other.address() - roots_register_value;
68  return delta;
69 }
70 
71 
72 Operand MacroAssembler::ExternalOperand(ExternalReference target,
73  Register scratch) {
74  if (root_array_available_ && !Serializer::enabled()) {
75  intptr_t delta = RootRegisterDelta(target);
76  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
77  Serializer::TooLateToEnableNow();
78  return Operand(kRootRegister, static_cast<int32_t>(delta));
79  }
80  }
81  movq(scratch, target);
82  return Operand(scratch, 0);
83 }
84 
85 
86 void MacroAssembler::Load(Register destination, ExternalReference source) {
87  if (root_array_available_ && !Serializer::enabled()) {
88  intptr_t delta = RootRegisterDelta(source);
89  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
90  Serializer::TooLateToEnableNow();
91  movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
92  return;
93  }
94  }
95  // Safe code.
96  if (destination.is(rax)) {
97  load_rax(source);
98  } else {
99  movq(kScratchRegister, source);
100  movq(destination, Operand(kScratchRegister, 0));
101  }
102 }
103 
104 
105 void MacroAssembler::Store(ExternalReference destination, Register source) {
106  if (root_array_available_ && !Serializer::enabled()) {
107  intptr_t delta = RootRegisterDelta(destination);
108  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
109  Serializer::TooLateToEnableNow();
110  movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
111  return;
112  }
113  }
114  // Safe code.
115  if (source.is(rax)) {
116  store_rax(destination);
117  } else {
118  movq(kScratchRegister, destination);
119  movq(Operand(kScratchRegister, 0), source);
120  }
121 }
122 
123 
124 void MacroAssembler::LoadAddress(Register destination,
125  ExternalReference source) {
126  if (root_array_available_ && !Serializer::enabled()) {
127  intptr_t delta = RootRegisterDelta(source);
128  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
129  Serializer::TooLateToEnableNow();
130  lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
131  return;
132  }
133  }
134  // Safe code.
135  movq(destination, source);
136 }
137 
138 
139 int MacroAssembler::LoadAddressSize(ExternalReference source) {
140  if (root_array_available_ && !Serializer::enabled()) {
141  // This calculation depends on the internals of LoadAddress.
142  // It's correctness is ensured by the asserts in the Call
143  // instruction below.
144  intptr_t delta = RootRegisterDelta(source);
145  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
146  Serializer::TooLateToEnableNow();
147  // Operand is lea(scratch, Operand(kRootRegister, delta));
148  // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
149  int size = 4;
150  if (!is_int8(static_cast<int32_t>(delta))) {
151  size += 3; // Need full four-byte displacement in lea.
152  }
153  return size;
154  }
155  }
156  // Size of movq(destination, src);
157  return 10;
158 }
159 
160 
161 void MacroAssembler::PushAddress(ExternalReference source) {
162  int64_t address = reinterpret_cast<int64_t>(source.address());
163  if (is_int32(address) && !Serializer::enabled()) {
164  if (emit_debug_code()) {
165  movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
166  }
167  push(Immediate(static_cast<int32_t>(address)));
168  return;
169  }
170  LoadAddress(kScratchRegister, source);
171  push(kScratchRegister);
172 }
173 
174 
175 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
176  ASSERT(root_array_available_);
177  movq(destination, Operand(kRootRegister,
178  (index << kPointerSizeLog2) - kRootRegisterBias));
179 }
180 
181 
182 void MacroAssembler::LoadRootIndexed(Register destination,
183  Register variable_offset,
184  int fixed_offset) {
185  ASSERT(root_array_available_);
186  movq(destination,
187  Operand(kRootRegister,
188  variable_offset, times_pointer_size,
189  (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
190 }
191 
192 
193 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
194  ASSERT(root_array_available_);
195  movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
196  source);
197 }
198 
199 
200 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
201  ASSERT(root_array_available_);
202  push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
203 }
204 
205 
206 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
207  ASSERT(root_array_available_);
208  cmpq(with, Operand(kRootRegister,
209  (index << kPointerSizeLog2) - kRootRegisterBias));
210 }
211 
212 
213 void MacroAssembler::CompareRoot(const Operand& with,
214  Heap::RootListIndex index) {
215  ASSERT(root_array_available_);
216  ASSERT(!with.AddressUsesRegister(kScratchRegister));
217  LoadRoot(kScratchRegister, index);
218  cmpq(with, kScratchRegister);
219 }
220 
221 
222 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
223  Register addr,
224  Register scratch,
225  SaveFPRegsMode save_fp,
226  RememberedSetFinalAction and_then) {
227  if (emit_debug_code()) {
228  Label ok;
229  JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
230  int3();
231  bind(&ok);
232  }
233  // Load store buffer top.
234  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
235  // Store pointer to buffer.
236  movq(Operand(scratch, 0), addr);
237  // Increment buffer top.
238  addq(scratch, Immediate(kPointerSize));
239  // Write back new top of buffer.
240  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
241  // Call stub on end of buffer.
242  Label done;
243  // Check for end of buffer.
244  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
245  if (and_then == kReturnAtEnd) {
246  Label buffer_overflowed;
247  j(not_equal, &buffer_overflowed, Label::kNear);
248  ret(0);
249  bind(&buffer_overflowed);
250  } else {
251  ASSERT(and_then == kFallThroughAtEnd);
252  j(equal, &done, Label::kNear);
253  }
254  StoreBufferOverflowStub store_buffer_overflow =
255  StoreBufferOverflowStub(save_fp);
256  CallStub(&store_buffer_overflow);
257  if (and_then == kReturnAtEnd) {
258  ret(0);
259  } else {
260  ASSERT(and_then == kFallThroughAtEnd);
261  bind(&done);
262  }
263 }
264 
265 
266 void MacroAssembler::InNewSpace(Register object,
267  Register scratch,
268  Condition cc,
269  Label* branch,
270  Label::Distance distance) {
271  if (Serializer::enabled()) {
272  // Can't do arithmetic on external references if it might get serialized.
273  // The mask isn't really an address. We load it as an external reference in
274  // case the size of the new space is different between the snapshot maker
275  // and the running system.
276  if (scratch.is(object)) {
277  movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
278  and_(scratch, kScratchRegister);
279  } else {
280  movq(scratch, ExternalReference::new_space_mask(isolate()));
281  and_(scratch, object);
282  }
283  movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
284  cmpq(scratch, kScratchRegister);
285  j(cc, branch, distance);
286  } else {
287  ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
288  intptr_t new_space_start =
289  reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
290  movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
291  if (scratch.is(object)) {
292  addq(scratch, kScratchRegister);
293  } else {
294  lea(scratch, Operand(object, kScratchRegister, times_1, 0));
295  }
296  and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
297  j(cc, branch, distance);
298  }
299 }
300 
301 
302 void MacroAssembler::RecordWriteField(
303  Register object,
304  int offset,
305  Register value,
306  Register dst,
307  SaveFPRegsMode save_fp,
308  RememberedSetAction remembered_set_action,
309  SmiCheck smi_check) {
310  // The compiled code assumes that record write doesn't change the
311  // context register, so we check that none of the clobbered
312  // registers are rsi.
313  ASSERT(!value.is(rsi) && !dst.is(rsi));
314 
315  // First, check if a write barrier is even needed. The tests below
316  // catch stores of Smis.
317  Label done;
318 
319  // Skip barrier if writing a smi.
320  if (smi_check == INLINE_SMI_CHECK) {
321  JumpIfSmi(value, &done);
322  }
323 
324  // Although the object register is tagged, the offset is relative to the start
325  // of the object, so so offset must be a multiple of kPointerSize.
326  ASSERT(IsAligned(offset, kPointerSize));
327 
328  lea(dst, FieldOperand(object, offset));
329  if (emit_debug_code()) {
330  Label ok;
331  testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
332  j(zero, &ok, Label::kNear);
333  int3();
334  bind(&ok);
335  }
336 
337  RecordWrite(
338  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
339 
340  bind(&done);
341 
342  // Clobber clobbered input registers when running with the debug-code flag
343  // turned on to provoke errors.
344  if (emit_debug_code()) {
345  movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
346  movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
347  }
348 }
349 
350 
351 void MacroAssembler::RecordWriteArray(Register object,
352  Register value,
353  Register index,
354  SaveFPRegsMode save_fp,
355  RememberedSetAction remembered_set_action,
356  SmiCheck smi_check) {
357  // First, check if a write barrier is even needed. The tests below
358  // catch stores of Smis.
359  Label done;
360 
361  // Skip barrier if writing a smi.
362  if (smi_check == INLINE_SMI_CHECK) {
363  JumpIfSmi(value, &done);
364  }
365 
366  // Array access: calculate the destination address. Index is not a smi.
367  Register dst = index;
368  lea(dst, Operand(object, index, times_pointer_size,
369  FixedArray::kHeaderSize - kHeapObjectTag));
370 
371  RecordWrite(
372  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
373 
374  bind(&done);
375 
376  // Clobber clobbered input registers when running with the debug-code flag
377  // turned on to provoke errors.
378  if (emit_debug_code()) {
379  movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
380  movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
381  }
382 }
383 
384 
385 void MacroAssembler::RecordWrite(Register object,
386  Register address,
387  Register value,
388  SaveFPRegsMode fp_mode,
389  RememberedSetAction remembered_set_action,
390  SmiCheck smi_check) {
391  // The compiled code assumes that record write doesn't change the
392  // context register, so we check that none of the clobbered
393  // registers are rsi.
394  ASSERT(!value.is(rsi) && !address.is(rsi));
395 
396  ASSERT(!object.is(value));
397  ASSERT(!object.is(address));
398  ASSERT(!value.is(address));
399  AssertNotSmi(object);
400 
401  if (remembered_set_action == OMIT_REMEMBERED_SET &&
402  !FLAG_incremental_marking) {
403  return;
404  }
405 
406  if (emit_debug_code()) {
407  Label ok;
408  cmpq(value, Operand(address, 0));
409  j(equal, &ok, Label::kNear);
410  int3();
411  bind(&ok);
412  }
413 
414  // First, check if a write barrier is even needed. The tests below
415  // catch stores of smis and stores into the young generation.
416  Label done;
417 
418  if (smi_check == INLINE_SMI_CHECK) {
419  // Skip barrier if writing a smi.
420  JumpIfSmi(value, &done);
421  }
422 
423  CheckPageFlag(value,
424  value, // Used as scratch.
425  MemoryChunk::kPointersToHereAreInterestingMask,
426  zero,
427  &done,
428  Label::kNear);
429 
430  CheckPageFlag(object,
431  value, // Used as scratch.
432  MemoryChunk::kPointersFromHereAreInterestingMask,
433  zero,
434  &done,
435  Label::kNear);
436 
437  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
438  CallStub(&stub);
439 
440  bind(&done);
441 
442  // Clobber clobbered registers when running with the debug-code flag
443  // turned on to provoke errors.
444  if (emit_debug_code()) {
445  movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
446  movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
447  }
448 }
449 
450 
451 void MacroAssembler::Assert(Condition cc, const char* msg) {
452  if (emit_debug_code()) Check(cc, msg);
453 }
454 
455 
456 void MacroAssembler::AssertFastElements(Register elements) {
457  if (emit_debug_code()) {
458  Label ok;
459  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
460  Heap::kFixedArrayMapRootIndex);
461  j(equal, &ok, Label::kNear);
462  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
463  Heap::kFixedDoubleArrayMapRootIndex);
464  j(equal, &ok, Label::kNear);
465  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
466  Heap::kFixedCOWArrayMapRootIndex);
467  j(equal, &ok, Label::kNear);
468  Abort("JSObject with fast elements map has slow elements");
469  bind(&ok);
470  }
471 }
472 
473 
474 void MacroAssembler::Check(Condition cc, const char* msg) {
475  Label L;
476  j(cc, &L, Label::kNear);
477  Abort(msg);
478  // Control will not return here.
479  bind(&L);
480 }
481 
482 
483 void MacroAssembler::CheckStackAlignment() {
484  int frame_alignment = OS::ActivationFrameAlignment();
485  int frame_alignment_mask = frame_alignment - 1;
486  if (frame_alignment > kPointerSize) {
487  ASSERT(IsPowerOf2(frame_alignment));
488  Label alignment_as_expected;
489  testq(rsp, Immediate(frame_alignment_mask));
490  j(zero, &alignment_as_expected, Label::kNear);
491  // Abort if stack is not aligned.
492  int3();
493  bind(&alignment_as_expected);
494  }
495 }
496 
497 
498 void MacroAssembler::NegativeZeroTest(Register result,
499  Register op,
500  Label* then_label) {
501  Label ok;
502  testl(result, result);
503  j(not_zero, &ok, Label::kNear);
504  testl(op, op);
505  j(sign, then_label);
506  bind(&ok);
507 }
508 
509 
510 void MacroAssembler::Abort(const char* msg) {
511  // We want to pass the msg string like a smi to avoid GC
512  // problems, however msg is not guaranteed to be aligned
513  // properly. Instead, we pass an aligned pointer that is
514  // a proper v8 smi, but also pass the alignment difference
515  // from the real pointer as a smi.
516  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
517  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
518  // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
519  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
520 #ifdef DEBUG
521  if (msg != NULL) {
522  RecordComment("Abort message: ");
523  RecordComment(msg);
524  }
525 #endif
526  push(rax);
528  push(kScratchRegister);
529  movq(kScratchRegister,
530  reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
532  push(kScratchRegister);
533 
534  if (!has_frame_) {
535  // We don't actually want to generate a pile of code for this, so just
536  // claim there is a stack frame, without generating one.
537  FrameScope scope(this, StackFrame::NONE);
538  CallRuntime(Runtime::kAbort, 2);
539  } else {
540  CallRuntime(Runtime::kAbort, 2);
541  }
542  // Control will not return here.
543  int3();
544 }
545 
546 
547 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
548  ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
549  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
550 }
551 
552 
553 void MacroAssembler::TailCallStub(CodeStub* stub) {
554  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
555  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
556 }
557 
558 
559 void MacroAssembler::StubReturn(int argc) {
560  ASSERT(argc >= 1 && generating_stub());
561  ret((argc - 1) * kPointerSize);
562 }
563 
564 
565 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
566  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
567  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
568 }
569 
570 
571 void MacroAssembler::IllegalOperation(int num_arguments) {
572  if (num_arguments > 0) {
573  addq(rsp, Immediate(num_arguments * kPointerSize));
574  }
575  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
576 }
577 
578 
579 void MacroAssembler::IndexFromHash(Register hash, Register index) {
580  // The assert checks that the constants for the maximum number of digits
581  // for an array index cached in the hash field and the number of bits
582  // reserved for it does not conflict.
583  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
584  (1 << String::kArrayIndexValueBits));
585  // We want the smi-tagged index in key. Even if we subsequently go to
586  // the slow case, converting the key to a smi is always valid.
587  // key: string key
588  // hash: key's hash field, including its array index value.
589  and_(hash, Immediate(String::kArrayIndexValueMask));
590  shr(hash, Immediate(String::kHashShift));
591  // Here we actually clobber the key which will be used if calling into
592  // runtime later. However as the new key is the numeric value of a string key
593  // there is no difference in using either key.
594  Integer32ToSmi(index, hash);
595 }
596 
597 
598 void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
599  CallRuntime(Runtime::FunctionForId(id), num_arguments);
600 }
601 
602 
603 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
604  const Runtime::Function* function = Runtime::FunctionForId(id);
605  Set(rax, function->nargs);
606  LoadAddress(rbx, ExternalReference(function, isolate()));
607  CEntryStub ces(1, kSaveFPRegs);
608  CallStub(&ces);
609 }
610 
611 
612 void MacroAssembler::CallRuntime(const Runtime::Function* f,
613  int num_arguments) {
614  // If the expected number of arguments of the runtime function is
615  // constant, we check that the actual number of arguments match the
616  // expectation.
617  if (f->nargs >= 0 && f->nargs != num_arguments) {
618  IllegalOperation(num_arguments);
619  return;
620  }
621 
622  // TODO(1236192): Most runtime routines don't need the number of
623  // arguments passed in because it is constant. At some point we
624  // should remove this need and make the runtime routine entry code
625  // smarter.
626  Set(rax, num_arguments);
627  LoadAddress(rbx, ExternalReference(f, isolate()));
628  CEntryStub ces(f->result_size);
629  CallStub(&ces);
630 }
631 
632 
633 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
634  int num_arguments) {
635  Set(rax, num_arguments);
636  LoadAddress(rbx, ext);
637 
638  CEntryStub stub(1);
639  CallStub(&stub);
640 }
641 
642 
643 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
644  int num_arguments,
645  int result_size) {
646  // ----------- S t a t e -------------
647  // -- rsp[0] : return address
648  // -- rsp[8] : argument num_arguments - 1
649  // ...
650  // -- rsp[8 * num_arguments] : argument 0 (receiver)
651  // -----------------------------------
652 
653  // TODO(1236192): Most runtime routines don't need the number of
654  // arguments passed in because it is constant. At some point we
655  // should remove this need and make the runtime routine entry code
656  // smarter.
657  Set(rax, num_arguments);
658  JumpToExternalReference(ext, result_size);
659 }
660 
661 
662 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
663  int num_arguments,
664  int result_size) {
665  TailCallExternalReference(ExternalReference(fid, isolate()),
666  num_arguments,
667  result_size);
668 }
669 
670 
671 static int Offset(ExternalReference ref0, ExternalReference ref1) {
672  int64_t offset = (ref0.address() - ref1.address());
673  // Check that fits into int.
674  ASSERT(static_cast<int>(offset) == offset);
675  return static_cast<int>(offset);
676 }
677 
678 
679 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
680 #if defined(_WIN64) && !defined(__MINGW64__)
681  // We need to prepare a slot for result handle on stack and put
682  // a pointer to it into 1st arg register.
683  EnterApiExitFrame(arg_stack_space + 1);
684 
685  // rcx must be used to pass the pointer to the return value slot.
686  lea(rcx, StackSpaceOperand(arg_stack_space));
687 #else
688  EnterApiExitFrame(arg_stack_space);
689 #endif
690 }
691 
692 
693 void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
694  int stack_space) {
695  Label empty_result;
696  Label prologue;
697  Label promote_scheduled_exception;
698  Label delete_allocated_handles;
699  Label leave_exit_frame;
700  Label write_back;
701 
702  Factory* factory = isolate()->factory();
703  ExternalReference next_address =
704  ExternalReference::handle_scope_next_address();
705  const int kNextOffset = 0;
706  const int kLimitOffset = Offset(
707  ExternalReference::handle_scope_limit_address(),
708  next_address);
709  const int kLevelOffset = Offset(
710  ExternalReference::handle_scope_level_address(),
711  next_address);
712  ExternalReference scheduled_exception_address =
713  ExternalReference::scheduled_exception_address(isolate());
714 
715  // Allocate HandleScope in callee-save registers.
716  Register prev_next_address_reg = r14;
717  Register prev_limit_reg = rbx;
718  Register base_reg = r15;
719  movq(base_reg, next_address);
720  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
721  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
722  addl(Operand(base_reg, kLevelOffset), Immediate(1));
723  // Call the api function!
724  movq(rax, reinterpret_cast<int64_t>(function_address),
726  call(rax);
727 
728 #if defined(_WIN64) && !defined(__MINGW64__)
729  // rax keeps a pointer to v8::Handle, unpack it.
730  movq(rax, Operand(rax, 0));
731 #endif
732  // Check if the result handle holds 0.
733  testq(rax, rax);
734  j(zero, &empty_result);
735  // It was non-zero. Dereference to get the result value.
736  movq(rax, Operand(rax, 0));
737  bind(&prologue);
738 
739  // No more valid handles (the result handle was the last one). Restore
740  // previous handle scope.
741  subl(Operand(base_reg, kLevelOffset), Immediate(1));
742  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
743  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
744  j(not_equal, &delete_allocated_handles);
745  bind(&leave_exit_frame);
746 
747  // Check if the function scheduled an exception.
748  movq(rsi, scheduled_exception_address);
749  Cmp(Operand(rsi, 0), factory->the_hole_value());
750  j(not_equal, &promote_scheduled_exception);
751 
752 #if ENABLE_EXTRA_CHECKS
753  // Check if the function returned a valid JavaScript value.
754  Label ok;
755  Register return_value = rax;
756  Register map = rcx;
757 
758  JumpIfSmi(return_value, &ok, Label::kNear);
759  movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
760 
761  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
762  j(below, &ok, Label::kNear);
763 
764  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
765  j(above_equal, &ok, Label::kNear);
766 
767  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
768  j(equal, &ok, Label::kNear);
769 
770  CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
771  j(equal, &ok, Label::kNear);
772 
773  CompareRoot(return_value, Heap::kTrueValueRootIndex);
774  j(equal, &ok, Label::kNear);
775 
776  CompareRoot(return_value, Heap::kFalseValueRootIndex);
777  j(equal, &ok, Label::kNear);
778 
779  CompareRoot(return_value, Heap::kNullValueRootIndex);
780  j(equal, &ok, Label::kNear);
781 
782  Abort("API call returned invalid object");
783 
784  bind(&ok);
785 #endif
786 
787  LeaveApiExitFrame();
788  ret(stack_space * kPointerSize);
789 
790  bind(&empty_result);
791  // It was zero; the result is undefined.
792  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
793  jmp(&prologue);
794 
795  bind(&promote_scheduled_exception);
796  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
797 
798  // HandleScope limit has changed. Delete allocated extensions.
799  bind(&delete_allocated_handles);
800  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
801  movq(prev_limit_reg, rax);
802 #ifdef _WIN64
803  LoadAddress(rcx, ExternalReference::isolate_address());
804 #else
805  LoadAddress(rdi, ExternalReference::isolate_address());
806 #endif
807  LoadAddress(rax,
808  ExternalReference::delete_handle_scope_extensions(isolate()));
809  call(rax);
810  movq(rax, prev_limit_reg);
811  jmp(&leave_exit_frame);
812 }
813 
814 
815 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
816  int result_size) {
817  // Set the entry point and jump to the C entry runtime stub.
818  LoadAddress(rbx, ext);
819  CEntryStub ces(result_size);
820  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
821 }
822 
823 
824 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
826  const CallWrapper& call_wrapper) {
827  // You can't call a builtin without a valid frame.
828  ASSERT(flag == JUMP_FUNCTION || has_frame());
829 
830  // Rely on the assertion to check that the number of provided
831  // arguments match the expected number of arguments. Fake a
832  // parameter count to avoid emitting code to do the check.
833  ParameterCount expected(0);
834  GetBuiltinEntry(rdx, id);
835  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
836 }
837 
838 
839 void MacroAssembler::GetBuiltinFunction(Register target,
840  Builtins::JavaScript id) {
841  // Load the builtins object into target register.
842  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
843  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
844  movq(target, FieldOperand(target,
845  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
846 }
847 
848 
849 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
850  ASSERT(!target.is(rdi));
851  // Load the JavaScript builtin function from the builtins object.
852  GetBuiltinFunction(rdi, id);
853  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
854 }
855 
856 
857 #define REG(Name) { kRegister_ ## Name ## _Code }
858 
859 static const Register saved_regs[] = {
860  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
861  REG(r9), REG(r10), REG(r11)
862 };
863 
864 #undef REG
865 
866 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
867 
868 
869 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
870  Register exclusion1,
871  Register exclusion2,
872  Register exclusion3) {
873  // We don't allow a GC during a store buffer overflow so there is no need to
874  // store the registers in any particular way, but we do have to store and
875  // restore them.
876  for (int i = 0; i < kNumberOfSavedRegs; i++) {
877  Register reg = saved_regs[i];
878  if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
879  push(reg);
880  }
881  }
882  // R12 to r15 are callee save on all platforms.
883  if (fp_mode == kSaveFPRegs) {
884  CpuFeatures::Scope scope(SSE2);
885  subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
886  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
887  XMMRegister reg = XMMRegister::from_code(i);
888  movsd(Operand(rsp, i * kDoubleSize), reg);
889  }
890  }
891 }
892 
893 
894 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
895  Register exclusion1,
896  Register exclusion2,
897  Register exclusion3) {
898  if (fp_mode == kSaveFPRegs) {
899  CpuFeatures::Scope scope(SSE2);
900  for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
901  XMMRegister reg = XMMRegister::from_code(i);
902  movsd(reg, Operand(rsp, i * kDoubleSize));
903  }
904  addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
905  }
906  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
907  Register reg = saved_regs[i];
908  if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
909  pop(reg);
910  }
911  }
912 }
913 
914 
915 void MacroAssembler::Set(Register dst, int64_t x) {
916  if (x == 0) {
917  xorl(dst, dst);
918  } else if (is_uint32(x)) {
919  movl(dst, Immediate(static_cast<uint32_t>(x)));
920  } else if (is_int32(x)) {
921  movq(dst, Immediate(static_cast<int32_t>(x)));
922  } else {
923  movq(dst, x, RelocInfo::NONE);
924  }
925 }
926 
927 void MacroAssembler::Set(const Operand& dst, int64_t x) {
928  if (is_int32(x)) {
929  movq(dst, Immediate(static_cast<int32_t>(x)));
930  } else {
931  Set(kScratchRegister, x);
932  movq(dst, kScratchRegister);
933  }
934 }
935 
936 
937 bool MacroAssembler::IsUnsafeInt(const int x) {
938  static const int kMaxBits = 17;
939  return !is_intn(x, kMaxBits);
940 }
941 
942 
943 void MacroAssembler::SafeMove(Register dst, Smi* src) {
944  ASSERT(!dst.is(kScratchRegister));
945  ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
946  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
947  Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
948  Move(kScratchRegister, Smi::FromInt(jit_cookie()));
949  xor_(dst, kScratchRegister);
950  } else {
951  Move(dst, src);
952  }
953 }
954 
955 
956 void MacroAssembler::SafePush(Smi* src) {
957  ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
958  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
959  Push(Smi::FromInt(src->value() ^ jit_cookie()));
960  Move(kScratchRegister, Smi::FromInt(jit_cookie()));
961  xor_(Operand(rsp, 0), kScratchRegister);
962  } else {
963  Push(src);
964  }
965 }
966 
967 
968 // ----------------------------------------------------------------------------
969 // Smi tagging, untagging and tag detection.
970 
971 Register MacroAssembler::GetSmiConstant(Smi* source) {
972  int value = source->value();
973  if (value == 0) {
975  return kScratchRegister;
976  }
977  if (value == 1) {
978  return kSmiConstantRegister;
979  }
980  LoadSmiConstant(kScratchRegister, source);
981  return kScratchRegister;
982 }
983 
984 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
985  if (emit_debug_code()) {
986  movq(dst,
987  reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
989  cmpq(dst, kSmiConstantRegister);
990  if (allow_stub_calls()) {
991  Assert(equal, "Uninitialized kSmiConstantRegister");
992  } else {
993  Label ok;
994  j(equal, &ok, Label::kNear);
995  int3();
996  bind(&ok);
997  }
998  }
999  int value = source->value();
1000  if (value == 0) {
1001  xorl(dst, dst);
1002  return;
1003  }
1004  bool negative = value < 0;
1005  unsigned int uvalue = negative ? -value : value;
1006 
1007  switch (uvalue) {
1008  case 9:
1009  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1010  break;
1011  case 8:
1012  xorl(dst, dst);
1013  lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1014  break;
1015  case 4:
1016  xorl(dst, dst);
1017  lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1018  break;
1019  case 5:
1020  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1021  break;
1022  case 3:
1023  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1024  break;
1025  case 2:
1026  lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1027  break;
1028  case 1:
1029  movq(dst, kSmiConstantRegister);
1030  break;
1031  case 0:
1032  UNREACHABLE();
1033  return;
1034  default:
1035  movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
1036  return;
1037  }
1038  if (negative) {
1039  neg(dst);
1040  }
1041 }
1042 
1043 
1044 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1045  STATIC_ASSERT(kSmiTag == 0);
1046  if (!dst.is(src)) {
1047  movl(dst, src);
1048  }
1049  shl(dst, Immediate(kSmiShift));
1050 }
1051 
1052 
1053 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1054  if (emit_debug_code()) {
1055  testb(dst, Immediate(0x01));
1056  Label ok;
1057  j(zero, &ok, Label::kNear);
1058  if (allow_stub_calls()) {
1059  Abort("Integer32ToSmiField writing to non-smi location");
1060  } else {
1061  int3();
1062  }
1063  bind(&ok);
1064  }
1065  ASSERT(kSmiShift % kBitsPerByte == 0);
1066  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1067 }
1068 
1069 
1070 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1071  Register src,
1072  int constant) {
1073  if (dst.is(src)) {
1074  addl(dst, Immediate(constant));
1075  } else {
1076  leal(dst, Operand(src, constant));
1077  }
1078  shl(dst, Immediate(kSmiShift));
1079 }
1080 
1081 
1082 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1083  STATIC_ASSERT(kSmiTag == 0);
1084  if (!dst.is(src)) {
1085  movq(dst, src);
1086  }
1087  shr(dst, Immediate(kSmiShift));
1088 }
1089 
1090 
1091 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1092  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1093 }
1094 
1095 
1096 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1097  STATIC_ASSERT(kSmiTag == 0);
1098  if (!dst.is(src)) {
1099  movq(dst, src);
1100  }
1101  sar(dst, Immediate(kSmiShift));
1102 }
1103 
1104 
1105 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1106  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1107 }
1108 
1109 
1110 void MacroAssembler::SmiTest(Register src) {
1111  testq(src, src);
1112 }
1113 
1114 
1115 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1116  AssertSmi(smi1);
1117  AssertSmi(smi2);
1118  cmpq(smi1, smi2);
1119 }
1120 
1121 
1122 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1123  AssertSmi(dst);
1124  Cmp(dst, src);
1125 }
1126 
1127 
1128 void MacroAssembler::Cmp(Register dst, Smi* src) {
1129  ASSERT(!dst.is(kScratchRegister));
1130  if (src->value() == 0) {
1131  testq(dst, dst);
1132  } else {
1133  Register constant_reg = GetSmiConstant(src);
1134  cmpq(dst, constant_reg);
1135  }
1136 }
1137 
1138 
1139 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1140  AssertSmi(dst);
1141  AssertSmi(src);
1142  cmpq(dst, src);
1143 }
1144 
1145 
1146 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1147  AssertSmi(dst);
1148  AssertSmi(src);
1149  cmpq(dst, src);
1150 }
1151 
1152 
1153 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1154  AssertSmi(dst);
1155  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1156 }
1157 
1158 
1159 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1160  // The Operand cannot use the smi register.
1161  Register smi_reg = GetSmiConstant(src);
1162  ASSERT(!dst.AddressUsesRegister(smi_reg));
1163  cmpq(dst, smi_reg);
1164 }
1165 
1166 
1167 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1168  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1169 }
1170 
1171 
1172 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1173  Register src,
1174  int power) {
1175  ASSERT(power >= 0);
1176  ASSERT(power < 64);
1177  if (power == 0) {
1178  SmiToInteger64(dst, src);
1179  return;
1180  }
1181  if (!dst.is(src)) {
1182  movq(dst, src);
1183  }
1184  if (power < kSmiShift) {
1185  sar(dst, Immediate(kSmiShift - power));
1186  } else if (power > kSmiShift) {
1187  shl(dst, Immediate(power - kSmiShift));
1188  }
1189 }
1190 
1191 
1192 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1193  Register src,
1194  int power) {
1195  ASSERT((0 <= power) && (power < 32));
1196  if (dst.is(src)) {
1197  shr(dst, Immediate(power + kSmiShift));
1198  } else {
1199  UNIMPLEMENTED(); // Not used.
1200  }
1201 }
1202 
1203 
1204 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1205  Label* on_not_smis,
1206  Label::Distance near_jump) {
1207  if (dst.is(src1) || dst.is(src2)) {
1208  ASSERT(!src1.is(kScratchRegister));
1209  ASSERT(!src2.is(kScratchRegister));
1210  movq(kScratchRegister, src1);
1211  or_(kScratchRegister, src2);
1212  JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1213  movq(dst, kScratchRegister);
1214  } else {
1215  movq(dst, src1);
1216  or_(dst, src2);
1217  JumpIfNotSmi(dst, on_not_smis, near_jump);
1218  }
1219 }
1220 
1221 
1222 Condition MacroAssembler::CheckSmi(Register src) {
1223  STATIC_ASSERT(kSmiTag == 0);
1224  testb(src, Immediate(kSmiTagMask));
1225  return zero;
1226 }
1227 
1228 
1229 Condition MacroAssembler::CheckSmi(const Operand& src) {
1230  STATIC_ASSERT(kSmiTag == 0);
1231  testb(src, Immediate(kSmiTagMask));
1232  return zero;
1233 }
1234 
1235 
1236 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1237  STATIC_ASSERT(kSmiTag == 0);
1238  // Test that both bits of the mask 0x8000000000000001 are zero.
1239  movq(kScratchRegister, src);
1240  rol(kScratchRegister, Immediate(1));
1241  testb(kScratchRegister, Immediate(3));
1242  return zero;
1243 }
1244 
1245 
1246 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1247  if (first.is(second)) {
1248  return CheckSmi(first);
1249  }
1251  leal(kScratchRegister, Operand(first, second, times_1, 0));
1252  testb(kScratchRegister, Immediate(0x03));
1253  return zero;
1254 }
1255 
1256 
1257 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1258  Register second) {
1259  if (first.is(second)) {
1260  return CheckNonNegativeSmi(first);
1261  }
1262  movq(kScratchRegister, first);
1263  or_(kScratchRegister, second);
1264  rol(kScratchRegister, Immediate(1));
1265  testl(kScratchRegister, Immediate(3));
1266  return zero;
1267 }
1268 
1269 
1270 Condition MacroAssembler::CheckEitherSmi(Register first,
1271  Register second,
1272  Register scratch) {
1273  if (first.is(second)) {
1274  return CheckSmi(first);
1275  }
1276  if (scratch.is(second)) {
1277  andl(scratch, first);
1278  } else {
1279  if (!scratch.is(first)) {
1280  movl(scratch, first);
1281  }
1282  andl(scratch, second);
1283  }
1284  testb(scratch, Immediate(kSmiTagMask));
1285  return zero;
1286 }
1287 
1288 
1289 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1290  ASSERT(!src.is(kScratchRegister));
1291  // If we overflow by subtracting one, it's the minimal smi value.
1292  cmpq(src, kSmiConstantRegister);
1293  return overflow;
1294 }
1295 
1296 
1297 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1298  // A 32-bit integer value can always be converted to a smi.
1299  return always;
1300 }
1301 
1302 
1303 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1304  // An unsigned 32-bit integer value is valid as long as the high bit
1305  // is not set.
1306  testl(src, src);
1307  return positive;
1308 }
1309 
1310 
1311 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1312  if (dst.is(src)) {
1313  andl(dst, Immediate(kSmiTagMask));
1314  } else {
1315  movl(dst, Immediate(kSmiTagMask));
1316  andl(dst, src);
1317  }
1318 }
1319 
1320 
1321 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1322  if (!(src.AddressUsesRegister(dst))) {
1323  movl(dst, Immediate(kSmiTagMask));
1324  andl(dst, src);
1325  } else {
1326  movl(dst, src);
1327  andl(dst, Immediate(kSmiTagMask));
1328  }
1329 }
1330 
1331 
1332 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1333  Label* on_invalid,
1334  Label::Distance near_jump) {
1335  Condition is_valid = CheckInteger32ValidSmiValue(src);
1336  j(NegateCondition(is_valid), on_invalid, near_jump);
1337 }
1338 
1339 
1340 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1341  Label* on_invalid,
1342  Label::Distance near_jump) {
1343  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1344  j(NegateCondition(is_valid), on_invalid, near_jump);
1345 }
1346 
1347 
1348 void MacroAssembler::JumpIfSmi(Register src,
1349  Label* on_smi,
1350  Label::Distance near_jump) {
1351  Condition smi = CheckSmi(src);
1352  j(smi, on_smi, near_jump);
1353 }
1354 
1355 
1356 void MacroAssembler::JumpIfNotSmi(Register src,
1357  Label* on_not_smi,
1358  Label::Distance near_jump) {
1359  Condition smi = CheckSmi(src);
1360  j(NegateCondition(smi), on_not_smi, near_jump);
1361 }
1362 
1363 
1364 void MacroAssembler::JumpUnlessNonNegativeSmi(
1365  Register src, Label* on_not_smi_or_negative,
1366  Label::Distance near_jump) {
1367  Condition non_negative_smi = CheckNonNegativeSmi(src);
1368  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1369 }
1370 
1371 
1372 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1373  Smi* constant,
1374  Label* on_equals,
1375  Label::Distance near_jump) {
1376  SmiCompare(src, constant);
1377  j(equal, on_equals, near_jump);
1378 }
1379 
1380 
1381 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1382  Register src2,
1383  Label* on_not_both_smi,
1384  Label::Distance near_jump) {
1385  Condition both_smi = CheckBothSmi(src1, src2);
1386  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1387 }
1388 
1389 
1390 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1391  Register src2,
1392  Label* on_not_both_smi,
1393  Label::Distance near_jump) {
1394  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1395  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1396 }
1397 
1398 
1399 void MacroAssembler::SmiTryAddConstant(Register dst,
1400  Register src,
1401  Smi* constant,
1402  Label* on_not_smi_result,
1403  Label::Distance near_jump) {
1404  // Does not assume that src is a smi.
1405  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1406  STATIC_ASSERT(kSmiTag == 0);
1407  ASSERT(!dst.is(kScratchRegister));
1408  ASSERT(!src.is(kScratchRegister));
1409 
1410  JumpIfNotSmi(src, on_not_smi_result, near_jump);
1411  Register tmp = (dst.is(src) ? kScratchRegister : dst);
1412  LoadSmiConstant(tmp, constant);
1413  addq(tmp, src);
1414  j(overflow, on_not_smi_result, near_jump);
1415  if (dst.is(src)) {
1416  movq(dst, tmp);
1417  }
1418 }
1419 
1420 
1421 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1422  if (constant->value() == 0) {
1423  if (!dst.is(src)) {
1424  movq(dst, src);
1425  }
1426  return;
1427  } else if (dst.is(src)) {
1428  ASSERT(!dst.is(kScratchRegister));
1429  switch (constant->value()) {
1430  case 1:
1431  addq(dst, kSmiConstantRegister);
1432  return;
1433  case 2:
1434  lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1435  return;
1436  case 4:
1437  lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1438  return;
1439  case 8:
1440  lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1441  return;
1442  default:
1443  Register constant_reg = GetSmiConstant(constant);
1444  addq(dst, constant_reg);
1445  return;
1446  }
1447  } else {
1448  switch (constant->value()) {
1449  case 1:
1450  lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1451  return;
1452  case 2:
1453  lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1454  return;
1455  case 4:
1456  lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1457  return;
1458  case 8:
1459  lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1460  return;
1461  default:
1462  LoadSmiConstant(dst, constant);
1463  addq(dst, src);
1464  return;
1465  }
1466  }
1467 }
1468 
1469 
1470 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1471  if (constant->value() != 0) {
1472  addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1473  }
1474 }
1475 
1476 
1477 void MacroAssembler::SmiAddConstant(Register dst,
1478  Register src,
1479  Smi* constant,
1480  Label* on_not_smi_result,
1481  Label::Distance near_jump) {
1482  if (constant->value() == 0) {
1483  if (!dst.is(src)) {
1484  movq(dst, src);
1485  }
1486  } else if (dst.is(src)) {
1487  ASSERT(!dst.is(kScratchRegister));
1488 
1489  LoadSmiConstant(kScratchRegister, constant);
1490  addq(kScratchRegister, src);
1491  j(overflow, on_not_smi_result, near_jump);
1492  movq(dst, kScratchRegister);
1493  } else {
1494  LoadSmiConstant(dst, constant);
1495  addq(dst, src);
1496  j(overflow, on_not_smi_result, near_jump);
1497  }
1498 }
1499 
1500 
1501 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1502  if (constant->value() == 0) {
1503  if (!dst.is(src)) {
1504  movq(dst, src);
1505  }
1506  } else if (dst.is(src)) {
1507  ASSERT(!dst.is(kScratchRegister));
1508  Register constant_reg = GetSmiConstant(constant);
1509  subq(dst, constant_reg);
1510  } else {
1511  if (constant->value() == Smi::kMinValue) {
1512  LoadSmiConstant(dst, constant);
1513  // Adding and subtracting the min-value gives the same result, it only
1514  // differs on the overflow bit, which we don't check here.
1515  addq(dst, src);
1516  } else {
1517  // Subtract by adding the negation.
1518  LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1519  addq(dst, src);
1520  }
1521  }
1522 }
1523 
1524 
1525 void MacroAssembler::SmiSubConstant(Register dst,
1526  Register src,
1527  Smi* constant,
1528  Label* on_not_smi_result,
1529  Label::Distance near_jump) {
1530  if (constant->value() == 0) {
1531  if (!dst.is(src)) {
1532  movq(dst, src);
1533  }
1534  } else if (dst.is(src)) {
1535  ASSERT(!dst.is(kScratchRegister));
1536  if (constant->value() == Smi::kMinValue) {
1537  // Subtracting min-value from any non-negative value will overflow.
1538  // We test the non-negativeness before doing the subtraction.
1539  testq(src, src);
1540  j(not_sign, on_not_smi_result, near_jump);
1541  LoadSmiConstant(kScratchRegister, constant);
1542  subq(dst, kScratchRegister);
1543  } else {
1544  // Subtract by adding the negation.
1545  LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1546  addq(kScratchRegister, dst);
1547  j(overflow, on_not_smi_result, near_jump);
1548  movq(dst, kScratchRegister);
1549  }
1550  } else {
1551  if (constant->value() == Smi::kMinValue) {
1552  // Subtracting min-value from any non-negative value will overflow.
1553  // We test the non-negativeness before doing the subtraction.
1554  testq(src, src);
1555  j(not_sign, on_not_smi_result, near_jump);
1556  LoadSmiConstant(dst, constant);
1557  // Adding and subtracting the min-value gives the same result, it only
1558  // differs on the overflow bit, which we don't check here.
1559  addq(dst, src);
1560  } else {
1561  // Subtract by adding the negation.
1562  LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1563  addq(dst, src);
1564  j(overflow, on_not_smi_result, near_jump);
1565  }
1566  }
1567 }
1568 
1569 
1570 void MacroAssembler::SmiNeg(Register dst,
1571  Register src,
1572  Label* on_smi_result,
1573  Label::Distance near_jump) {
1574  if (dst.is(src)) {
1575  ASSERT(!dst.is(kScratchRegister));
1576  movq(kScratchRegister, src);
1577  neg(dst); // Low 32 bits are retained as zero by negation.
1578  // Test if result is zero or Smi::kMinValue.
1579  cmpq(dst, kScratchRegister);
1580  j(not_equal, on_smi_result, near_jump);
1581  movq(src, kScratchRegister);
1582  } else {
1583  movq(dst, src);
1584  neg(dst);
1585  cmpq(dst, src);
1586  // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1587  j(not_equal, on_smi_result, near_jump);
1588  }
1589 }
1590 
1591 
1592 void MacroAssembler::SmiAdd(Register dst,
1593  Register src1,
1594  Register src2,
1595  Label* on_not_smi_result,
1596  Label::Distance near_jump) {
1597  ASSERT_NOT_NULL(on_not_smi_result);
1598  ASSERT(!dst.is(src2));
1599  if (dst.is(src1)) {
1600  movq(kScratchRegister, src1);
1601  addq(kScratchRegister, src2);
1602  j(overflow, on_not_smi_result, near_jump);
1603  movq(dst, kScratchRegister);
1604  } else {
1605  movq(dst, src1);
1606  addq(dst, src2);
1607  j(overflow, on_not_smi_result, near_jump);
1608  }
1609 }
1610 
1611 
1612 void MacroAssembler::SmiAdd(Register dst,
1613  Register src1,
1614  const Operand& src2,
1615  Label* on_not_smi_result,
1616  Label::Distance near_jump) {
1617  ASSERT_NOT_NULL(on_not_smi_result);
1618  if (dst.is(src1)) {
1619  movq(kScratchRegister, src1);
1620  addq(kScratchRegister, src2);
1621  j(overflow, on_not_smi_result, near_jump);
1622  movq(dst, kScratchRegister);
1623  } else {
1624  ASSERT(!src2.AddressUsesRegister(dst));
1625  movq(dst, src1);
1626  addq(dst, src2);
1627  j(overflow, on_not_smi_result, near_jump);
1628  }
1629 }
1630 
1631 
1632 void MacroAssembler::SmiAdd(Register dst,
1633  Register src1,
1634  Register src2) {
1635  // No overflow checking. Use only when it's known that
1636  // overflowing is impossible.
1637  if (!dst.is(src1)) {
1638  if (emit_debug_code()) {
1639  movq(kScratchRegister, src1);
1640  addq(kScratchRegister, src2);
1641  Check(no_overflow, "Smi addition overflow");
1642  }
1643  lea(dst, Operand(src1, src2, times_1, 0));
1644  } else {
1645  addq(dst, src2);
1646  Assert(no_overflow, "Smi addition overflow");
1647  }
1648 }
1649 
1650 
1651 void MacroAssembler::SmiSub(Register dst,
1652  Register src1,
1653  Register src2,
1654  Label* on_not_smi_result,
1655  Label::Distance near_jump) {
1656  ASSERT_NOT_NULL(on_not_smi_result);
1657  ASSERT(!dst.is(src2));
1658  if (dst.is(src1)) {
1659  cmpq(dst, src2);
1660  j(overflow, on_not_smi_result, near_jump);
1661  subq(dst, src2);
1662  } else {
1663  movq(dst, src1);
1664  subq(dst, src2);
1665  j(overflow, on_not_smi_result, near_jump);
1666  }
1667 }
1668 
1669 
1670 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1671  // No overflow checking. Use only when it's known that
1672  // overflowing is impossible (e.g., subtracting two positive smis).
1673  ASSERT(!dst.is(src2));
1674  if (!dst.is(src1)) {
1675  movq(dst, src1);
1676  }
1677  subq(dst, src2);
1678  Assert(no_overflow, "Smi subtraction overflow");
1679 }
1680 
1681 
1682 void MacroAssembler::SmiSub(Register dst,
1683  Register src1,
1684  const Operand& src2,
1685  Label* on_not_smi_result,
1686  Label::Distance near_jump) {
1687  ASSERT_NOT_NULL(on_not_smi_result);
1688  if (dst.is(src1)) {
1689  movq(kScratchRegister, src2);
1690  cmpq(src1, kScratchRegister);
1691  j(overflow, on_not_smi_result, near_jump);
1692  subq(src1, kScratchRegister);
1693  } else {
1694  movq(dst, src1);
1695  subq(dst, src2);
1696  j(overflow, on_not_smi_result, near_jump);
1697  }
1698 }
1699 
1700 
1701 void MacroAssembler::SmiSub(Register dst,
1702  Register src1,
1703  const Operand& src2) {
1704  // No overflow checking. Use only when it's known that
1705  // overflowing is impossible (e.g., subtracting two positive smis).
1706  if (!dst.is(src1)) {
1707  movq(dst, src1);
1708  }
1709  subq(dst, src2);
1710  Assert(no_overflow, "Smi subtraction overflow");
1711 }
1712 
1713 
1714 void MacroAssembler::SmiMul(Register dst,
1715  Register src1,
1716  Register src2,
1717  Label* on_not_smi_result,
1718  Label::Distance near_jump) {
1719  ASSERT(!dst.is(src2));
1720  ASSERT(!dst.is(kScratchRegister));
1721  ASSERT(!src1.is(kScratchRegister));
1722  ASSERT(!src2.is(kScratchRegister));
1723 
1724  if (dst.is(src1)) {
1725  Label failure, zero_correct_result;
1726  movq(kScratchRegister, src1); // Create backup for later testing.
1727  SmiToInteger64(dst, src1);
1728  imul(dst, src2);
1729  j(overflow, &failure, Label::kNear);
1730 
1731  // Check for negative zero result. If product is zero, and one
1732  // argument is negative, go to slow case.
1733  Label correct_result;
1734  testq(dst, dst);
1735  j(not_zero, &correct_result, Label::kNear);
1736 
1737  movq(dst, kScratchRegister);
1738  xor_(dst, src2);
1739  // Result was positive zero.
1740  j(positive, &zero_correct_result, Label::kNear);
1741 
1742  bind(&failure); // Reused failure exit, restores src1.
1743  movq(src1, kScratchRegister);
1744  jmp(on_not_smi_result, near_jump);
1745 
1746  bind(&zero_correct_result);
1747  Set(dst, 0);
1748 
1749  bind(&correct_result);
1750  } else {
1751  SmiToInteger64(dst, src1);
1752  imul(dst, src2);
1753  j(overflow, on_not_smi_result, near_jump);
1754  // Check for negative zero result. If product is zero, and one
1755  // argument is negative, go to slow case.
1756  Label correct_result;
1757  testq(dst, dst);
1758  j(not_zero, &correct_result, Label::kNear);
1759  // One of src1 and src2 is zero, the check whether the other is
1760  // negative.
1761  movq(kScratchRegister, src1);
1762  xor_(kScratchRegister, src2);
1763  j(negative, on_not_smi_result, near_jump);
1764  bind(&correct_result);
1765  }
1766 }
1767 
1768 
1769 void MacroAssembler::SmiDiv(Register dst,
1770  Register src1,
1771  Register src2,
1772  Label* on_not_smi_result,
1773  Label::Distance near_jump) {
1774  ASSERT(!src1.is(kScratchRegister));
1775  ASSERT(!src2.is(kScratchRegister));
1776  ASSERT(!dst.is(kScratchRegister));
1777  ASSERT(!src2.is(rax));
1778  ASSERT(!src2.is(rdx));
1779  ASSERT(!src1.is(rdx));
1780 
1781  // Check for 0 divisor (result is +/-Infinity).
1782  testq(src2, src2);
1783  j(zero, on_not_smi_result, near_jump);
1784 
1785  if (src1.is(rax)) {
1786  movq(kScratchRegister, src1);
1787  }
1788  SmiToInteger32(rax, src1);
1789  // We need to rule out dividing Smi::kMinValue by -1, since that would
1790  // overflow in idiv and raise an exception.
1791  // We combine this with negative zero test (negative zero only happens
1792  // when dividing zero by a negative number).
1793 
1794  // We overshoot a little and go to slow case if we divide min-value
1795  // by any negative value, not just -1.
1796  Label safe_div;
1797  testl(rax, Immediate(0x7fffffff));
1798  j(not_zero, &safe_div, Label::kNear);
1799  testq(src2, src2);
1800  if (src1.is(rax)) {
1801  j(positive, &safe_div, Label::kNear);
1802  movq(src1, kScratchRegister);
1803  jmp(on_not_smi_result, near_jump);
1804  } else {
1805  j(negative, on_not_smi_result, near_jump);
1806  }
1807  bind(&safe_div);
1808 
1809  SmiToInteger32(src2, src2);
1810  // Sign extend src1 into edx:eax.
1811  cdq();
1812  idivl(src2);
1813  Integer32ToSmi(src2, src2);
1814  // Check that the remainder is zero.
1815  testl(rdx, rdx);
1816  if (src1.is(rax)) {
1817  Label smi_result;
1818  j(zero, &smi_result, Label::kNear);
1819  movq(src1, kScratchRegister);
1820  jmp(on_not_smi_result, near_jump);
1821  bind(&smi_result);
1822  } else {
1823  j(not_zero, on_not_smi_result, near_jump);
1824  }
1825  if (!dst.is(src1) && src1.is(rax)) {
1826  movq(src1, kScratchRegister);
1827  }
1828  Integer32ToSmi(dst, rax);
1829 }
1830 
1831 
1832 void MacroAssembler::SmiMod(Register dst,
1833  Register src1,
1834  Register src2,
1835  Label* on_not_smi_result,
1836  Label::Distance near_jump) {
1837  ASSERT(!dst.is(kScratchRegister));
1838  ASSERT(!src1.is(kScratchRegister));
1839  ASSERT(!src2.is(kScratchRegister));
1840  ASSERT(!src2.is(rax));
1841  ASSERT(!src2.is(rdx));
1842  ASSERT(!src1.is(rdx));
1843  ASSERT(!src1.is(src2));
1844 
1845  testq(src2, src2);
1846  j(zero, on_not_smi_result, near_jump);
1847 
1848  if (src1.is(rax)) {
1849  movq(kScratchRegister, src1);
1850  }
1851  SmiToInteger32(rax, src1);
1852  SmiToInteger32(src2, src2);
1853 
1854  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1855  Label safe_div;
1856  cmpl(rax, Immediate(Smi::kMinValue));
1857  j(not_equal, &safe_div, Label::kNear);
1858  cmpl(src2, Immediate(-1));
1859  j(not_equal, &safe_div, Label::kNear);
1860  // Retag inputs and go slow case.
1861  Integer32ToSmi(src2, src2);
1862  if (src1.is(rax)) {
1863  movq(src1, kScratchRegister);
1864  }
1865  jmp(on_not_smi_result, near_jump);
1866  bind(&safe_div);
1867 
1868  // Sign extend eax into edx:eax.
1869  cdq();
1870  idivl(src2);
1871  // Restore smi tags on inputs.
1872  Integer32ToSmi(src2, src2);
1873  if (src1.is(rax)) {
1874  movq(src1, kScratchRegister);
1875  }
1876  // Check for a negative zero result. If the result is zero, and the
1877  // dividend is negative, go slow to return a floating point negative zero.
1878  Label smi_result;
1879  testl(rdx, rdx);
1880  j(not_zero, &smi_result, Label::kNear);
1881  testq(src1, src1);
1882  j(negative, on_not_smi_result, near_jump);
1883  bind(&smi_result);
1884  Integer32ToSmi(dst, rdx);
1885 }
1886 
1887 
1888 void MacroAssembler::SmiNot(Register dst, Register src) {
1889  ASSERT(!dst.is(kScratchRegister));
1890  ASSERT(!src.is(kScratchRegister));
1891  // Set tag and padding bits before negating, so that they are zero afterwards.
1892  movl(kScratchRegister, Immediate(~0));
1893  if (dst.is(src)) {
1894  xor_(dst, kScratchRegister);
1895  } else {
1896  lea(dst, Operand(src, kScratchRegister, times_1, 0));
1897  }
1898  not_(dst);
1899 }
1900 
1901 
1902 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1903  ASSERT(!dst.is(src2));
1904  if (!dst.is(src1)) {
1905  movq(dst, src1);
1906  }
1907  and_(dst, src2);
1908 }
1909 
1910 
1911 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1912  if (constant->value() == 0) {
1913  Set(dst, 0);
1914  } else if (dst.is(src)) {
1915  ASSERT(!dst.is(kScratchRegister));
1916  Register constant_reg = GetSmiConstant(constant);
1917  and_(dst, constant_reg);
1918  } else {
1919  LoadSmiConstant(dst, constant);
1920  and_(dst, src);
1921  }
1922 }
1923 
1924 
1925 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1926  if (!dst.is(src1)) {
1927  ASSERT(!src1.is(src2));
1928  movq(dst, src1);
1929  }
1930  or_(dst, src2);
1931 }
1932 
1933 
1934 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1935  if (dst.is(src)) {
1936  ASSERT(!dst.is(kScratchRegister));
1937  Register constant_reg = GetSmiConstant(constant);
1938  or_(dst, constant_reg);
1939  } else {
1940  LoadSmiConstant(dst, constant);
1941  or_(dst, src);
1942  }
1943 }
1944 
1945 
1946 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1947  if (!dst.is(src1)) {
1948  ASSERT(!src1.is(src2));
1949  movq(dst, src1);
1950  }
1951  xor_(dst, src2);
1952 }
1953 
1954 
1955 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
1956  if (dst.is(src)) {
1957  ASSERT(!dst.is(kScratchRegister));
1958  Register constant_reg = GetSmiConstant(constant);
1959  xor_(dst, constant_reg);
1960  } else {
1961  LoadSmiConstant(dst, constant);
1962  xor_(dst, src);
1963  }
1964 }
1965 
1966 
1967 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
1968  Register src,
1969  int shift_value) {
1970  ASSERT(is_uint5(shift_value));
1971  if (shift_value > 0) {
1972  if (dst.is(src)) {
1973  sar(dst, Immediate(shift_value + kSmiShift));
1974  shl(dst, Immediate(kSmiShift));
1975  } else {
1976  UNIMPLEMENTED(); // Not used.
1977  }
1978  }
1979 }
1980 
1981 
1982 void MacroAssembler::SmiShiftLeftConstant(Register dst,
1983  Register src,
1984  int shift_value) {
1985  if (!dst.is(src)) {
1986  movq(dst, src);
1987  }
1988  if (shift_value > 0) {
1989  shl(dst, Immediate(shift_value));
1990  }
1991 }
1992 
1993 
1994 void MacroAssembler::SmiShiftLogicalRightConstant(
1995  Register dst, Register src, int shift_value,
1996  Label* on_not_smi_result, Label::Distance near_jump) {
1997  // Logic right shift interprets its result as an *unsigned* number.
1998  if (dst.is(src)) {
1999  UNIMPLEMENTED(); // Not used.
2000  } else {
2001  movq(dst, src);
2002  if (shift_value == 0) {
2003  testq(dst, dst);
2004  j(negative, on_not_smi_result, near_jump);
2005  }
2006  shr(dst, Immediate(shift_value + kSmiShift));
2007  shl(dst, Immediate(kSmiShift));
2008  }
2009 }
2010 
2011 
2012 void MacroAssembler::SmiShiftLeft(Register dst,
2013  Register src1,
2014  Register src2) {
2015  ASSERT(!dst.is(rcx));
2016  // Untag shift amount.
2017  if (!dst.is(src1)) {
2018  movq(dst, src1);
2019  }
2020  SmiToInteger32(rcx, src2);
2021  // Shift amount specified by lower 5 bits, not six as the shl opcode.
2022  and_(rcx, Immediate(0x1f));
2023  shl_cl(dst);
2024 }
2025 
2026 
2027 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2028  Register src1,
2029  Register src2,
2030  Label* on_not_smi_result,
2031  Label::Distance near_jump) {
2032  ASSERT(!dst.is(kScratchRegister));
2033  ASSERT(!src1.is(kScratchRegister));
2034  ASSERT(!src2.is(kScratchRegister));
2035  ASSERT(!dst.is(rcx));
2036  // dst and src1 can be the same, because the one case that bails out
2037  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2038  if (src1.is(rcx) || src2.is(rcx)) {
2039  movq(kScratchRegister, rcx);
2040  }
2041  if (!dst.is(src1)) {
2042  movq(dst, src1);
2043  }
2044  SmiToInteger32(rcx, src2);
2045  orl(rcx, Immediate(kSmiShift));
2046  shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
2047  shl(dst, Immediate(kSmiShift));
2048  testq(dst, dst);
2049  if (src1.is(rcx) || src2.is(rcx)) {
2050  Label positive_result;
2051  j(positive, &positive_result, Label::kNear);
2052  if (src1.is(rcx)) {
2053  movq(src1, kScratchRegister);
2054  } else {
2055  movq(src2, kScratchRegister);
2056  }
2057  jmp(on_not_smi_result, near_jump);
2058  bind(&positive_result);
2059  } else {
2060  // src2 was zero and src1 negative.
2061  j(negative, on_not_smi_result, near_jump);
2062  }
2063 }
2064 
2065 
2066 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2067  Register src1,
2068  Register src2) {
2069  ASSERT(!dst.is(kScratchRegister));
2070  ASSERT(!src1.is(kScratchRegister));
2071  ASSERT(!src2.is(kScratchRegister));
2072  ASSERT(!dst.is(rcx));
2073  if (src1.is(rcx)) {
2074  movq(kScratchRegister, src1);
2075  } else if (src2.is(rcx)) {
2076  movq(kScratchRegister, src2);
2077  }
2078  if (!dst.is(src1)) {
2079  movq(dst, src1);
2080  }
2081  SmiToInteger32(rcx, src2);
2082  orl(rcx, Immediate(kSmiShift));
2083  sar_cl(dst); // Shift 32 + original rcx & 0x1f.
2084  shl(dst, Immediate(kSmiShift));
2085  if (src1.is(rcx)) {
2086  movq(src1, kScratchRegister);
2087  } else if (src2.is(rcx)) {
2088  movq(src2, kScratchRegister);
2089  }
2090 }
2091 
2092 
2093 void MacroAssembler::SelectNonSmi(Register dst,
2094  Register src1,
2095  Register src2,
2096  Label* on_not_smis,
2097  Label::Distance near_jump) {
2098  ASSERT(!dst.is(kScratchRegister));
2099  ASSERT(!src1.is(kScratchRegister));
2100  ASSERT(!src2.is(kScratchRegister));
2101  ASSERT(!dst.is(src1));
2102  ASSERT(!dst.is(src2));
2103  // Both operands must not be smis.
2104 #ifdef DEBUG
2105  if (allow_stub_calls()) { // Check contains a stub call.
2106  Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2107  Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
2108  }
2109 #endif
2110  STATIC_ASSERT(kSmiTag == 0);
2111  ASSERT_EQ(0, Smi::FromInt(0));
2112  movl(kScratchRegister, Immediate(kSmiTagMask));
2113  and_(kScratchRegister, src1);
2114  testl(kScratchRegister, src2);
2115  // If non-zero then both are smis.
2116  j(not_zero, on_not_smis, near_jump);
2117 
2118  // Exactly one operand is a smi.
2119  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2120  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2121  subq(kScratchRegister, Immediate(1));
2122  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2123  movq(dst, src1);
2124  xor_(dst, src2);
2125  and_(dst, kScratchRegister);
2126  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2127  xor_(dst, src1);
2128  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2129 }
2130 
2131 
2132 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2133  Register src,
2134  int shift) {
2135  ASSERT(is_uint6(shift));
2136  // There is a possible optimization if shift is in the range 60-63, but that
2137  // will (and must) never happen.
2138  if (!dst.is(src)) {
2139  movq(dst, src);
2140  }
2141  if (shift < kSmiShift) {
2142  sar(dst, Immediate(kSmiShift - shift));
2143  } else {
2144  shl(dst, Immediate(shift - kSmiShift));
2145  }
2146  return SmiIndex(dst, times_1);
2147 }
2148 
2149 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2150  Register src,
2151  int shift) {
2152  // Register src holds a positive smi.
2153  ASSERT(is_uint6(shift));
2154  if (!dst.is(src)) {
2155  movq(dst, src);
2156  }
2157  neg(dst);
2158  if (shift < kSmiShift) {
2159  sar(dst, Immediate(kSmiShift - shift));
2160  } else {
2161  shl(dst, Immediate(shift - kSmiShift));
2162  }
2163  return SmiIndex(dst, times_1);
2164 }
2165 
2166 
2167 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2168  ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2169  addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2170 }
2171 
2172 
2173 void MacroAssembler::JumpIfNotString(Register object,
2174  Register object_map,
2175  Label* not_string,
2176  Label::Distance near_jump) {
2177  Condition is_smi = CheckSmi(object);
2178  j(is_smi, not_string, near_jump);
2179  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2180  j(above_equal, not_string, near_jump);
2181 }
2182 
2183 
2184 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2185  Register first_object,
2186  Register second_object,
2187  Register scratch1,
2188  Register scratch2,
2189  Label* on_fail,
2190  Label::Distance near_jump) {
2191  // Check that both objects are not smis.
2192  Condition either_smi = CheckEitherSmi(first_object, second_object);
2193  j(either_smi, on_fail, near_jump);
2194 
2195  // Load instance type for both strings.
2196  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2197  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2198  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2199  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2200 
2201  // Check that both are flat ASCII strings.
2202  ASSERT(kNotStringTag != 0);
2203  const int kFlatAsciiStringMask =
2205  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2206 
2207  andl(scratch1, Immediate(kFlatAsciiStringMask));
2208  andl(scratch2, Immediate(kFlatAsciiStringMask));
2209  // Interleave the bits to check both scratch1 and scratch2 in one test.
2210  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2211  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2212  cmpl(scratch1,
2213  Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2214  j(not_equal, on_fail, near_jump);
2215 }
2216 
2217 
2218 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2219  Register instance_type,
2220  Register scratch,
2221  Label* failure,
2222  Label::Distance near_jump) {
2223  if (!scratch.is(instance_type)) {
2224  movl(scratch, instance_type);
2225  }
2226 
2227  const int kFlatAsciiStringMask =
2229 
2230  andl(scratch, Immediate(kFlatAsciiStringMask));
2231  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
2232  j(not_equal, failure, near_jump);
2233 }
2234 
2235 
2236 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2237  Register first_object_instance_type,
2238  Register second_object_instance_type,
2239  Register scratch1,
2240  Register scratch2,
2241  Label* on_fail,
2242  Label::Distance near_jump) {
2243  // Load instance type for both strings.
2244  movq(scratch1, first_object_instance_type);
2245  movq(scratch2, second_object_instance_type);
2246 
2247  // Check that both are flat ASCII strings.
2248  ASSERT(kNotStringTag != 0);
2249  const int kFlatAsciiStringMask =
2251  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2252 
2253  andl(scratch1, Immediate(kFlatAsciiStringMask));
2254  andl(scratch2, Immediate(kFlatAsciiStringMask));
2255  // Interleave the bits to check both scratch1 and scratch2 in one test.
2256  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2257  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2258  cmpl(scratch1,
2259  Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2260  j(not_equal, on_fail, near_jump);
2261 }
2262 
2263 
2264 
2265 void MacroAssembler::Move(Register dst, Register src) {
2266  if (!dst.is(src)) {
2267  movq(dst, src);
2268  }
2269 }
2270 
2271 
2272 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2273  ASSERT(!source->IsFailure());
2274  if (source->IsSmi()) {
2275  Move(dst, Smi::cast(*source));
2276  } else {
2277  movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
2278  }
2279 }
2280 
2281 
2282 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2283  ASSERT(!source->IsFailure());
2284  if (source->IsSmi()) {
2285  Move(dst, Smi::cast(*source));
2286  } else {
2287  movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2288  movq(dst, kScratchRegister);
2289  }
2290 }
2291 
2292 
2293 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2294  if (source->IsSmi()) {
2295  Cmp(dst, Smi::cast(*source));
2296  } else {
2297  Move(kScratchRegister, source);
2298  cmpq(dst, kScratchRegister);
2299  }
2300 }
2301 
2302 
2303 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2304  if (source->IsSmi()) {
2305  Cmp(dst, Smi::cast(*source));
2306  } else {
2307  ASSERT(source->IsHeapObject());
2308  movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2309  cmpq(dst, kScratchRegister);
2310  }
2311 }
2312 
2313 
2314 void MacroAssembler::Push(Handle<Object> source) {
2315  if (source->IsSmi()) {
2316  Push(Smi::cast(*source));
2317  } else {
2318  ASSERT(source->IsHeapObject());
2319  movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
2320  push(kScratchRegister);
2321  }
2322 }
2323 
2324 
2325 void MacroAssembler::LoadHeapObject(Register result,
2326  Handle<HeapObject> object) {
2327  if (isolate()->heap()->InNewSpace(*object)) {
2328  Handle<JSGlobalPropertyCell> cell =
2329  isolate()->factory()->NewJSGlobalPropertyCell(object);
2330  movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2331  movq(result, Operand(result, 0));
2332  } else {
2333  Move(result, object);
2334  }
2335 }
2336 
2337 
2338 void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
2339  if (isolate()->heap()->InNewSpace(*object)) {
2340  Handle<JSGlobalPropertyCell> cell =
2341  isolate()->factory()->NewJSGlobalPropertyCell(object);
2342  movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2343  movq(kScratchRegister, Operand(kScratchRegister, 0));
2344  push(kScratchRegister);
2345  } else {
2346  Push(object);
2347  }
2348 }
2349 
2350 
2351 void MacroAssembler::LoadGlobalCell(Register dst,
2352  Handle<JSGlobalPropertyCell> cell) {
2353  if (dst.is(rax)) {
2354  load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
2355  } else {
2356  movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
2357  movq(dst, Operand(dst, 0));
2358  }
2359 }
2360 
2361 
2362 void MacroAssembler::Push(Smi* source) {
2363  intptr_t smi = reinterpret_cast<intptr_t>(source);
2364  if (is_int32(smi)) {
2365  push(Immediate(static_cast<int32_t>(smi)));
2366  } else {
2367  Register constant = GetSmiConstant(source);
2368  push(constant);
2369  }
2370 }
2371 
2372 
2373 void MacroAssembler::Drop(int stack_elements) {
2374  if (stack_elements > 0) {
2375  addq(rsp, Immediate(stack_elements * kPointerSize));
2376  }
2377 }
2378 
2379 
2380 void MacroAssembler::Test(const Operand& src, Smi* source) {
2381  testl(Operand(src, kIntSize), Immediate(source->value()));
2382 }
2383 
2384 
2385 void MacroAssembler::TestBit(const Operand& src, int bits) {
2386  int byte_offset = bits / kBitsPerByte;
2387  int bit_in_byte = bits & (kBitsPerByte - 1);
2388  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2389 }
2390 
2391 
2392 void MacroAssembler::Jump(ExternalReference ext) {
2393  LoadAddress(kScratchRegister, ext);
2394  jmp(kScratchRegister);
2395 }
2396 
2397 
2398 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2399  movq(kScratchRegister, destination, rmode);
2400  jmp(kScratchRegister);
2401 }
2402 
2403 
2404 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2405  // TODO(X64): Inline this
2406  jmp(code_object, rmode);
2407 }
2408 
2409 
2410 int MacroAssembler::CallSize(ExternalReference ext) {
2411  // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2412  const int kCallInstructionSize = 3;
2413  return LoadAddressSize(ext) + kCallInstructionSize;
2414 }
2415 
2416 
2417 void MacroAssembler::Call(ExternalReference ext) {
2418 #ifdef DEBUG
2419  int end_position = pc_offset() + CallSize(ext);
2420 #endif
2421  LoadAddress(kScratchRegister, ext);
2422  call(kScratchRegister);
2423 #ifdef DEBUG
2424  CHECK_EQ(end_position, pc_offset());
2425 #endif
2426 }
2427 
2428 
2429 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2430 #ifdef DEBUG
2431  int end_position = pc_offset() + CallSize(destination, rmode);
2432 #endif
2433  movq(kScratchRegister, destination, rmode);
2434  call(kScratchRegister);
2435 #ifdef DEBUG
2436  CHECK_EQ(pc_offset(), end_position);
2437 #endif
2438 }
2439 
2440 
2441 void MacroAssembler::Call(Handle<Code> code_object,
2442  RelocInfo::Mode rmode,
2443  TypeFeedbackId ast_id) {
2444 #ifdef DEBUG
2445  int end_position = pc_offset() + CallSize(code_object);
2446 #endif
2447  ASSERT(RelocInfo::IsCodeTarget(rmode));
2448  call(code_object, rmode, ast_id);
2449 #ifdef DEBUG
2450  CHECK_EQ(end_position, pc_offset());
2451 #endif
2452 }
2453 
2454 
2455 void MacroAssembler::Pushad() {
2456  push(rax);
2457  push(rcx);
2458  push(rdx);
2459  push(rbx);
2460  // Not pushing rsp or rbp.
2461  push(rsi);
2462  push(rdi);
2463  push(r8);
2464  push(r9);
2465  // r10 is kScratchRegister.
2466  push(r11);
2467  // r12 is kSmiConstantRegister.
2468  // r13 is kRootRegister.
2469  push(r14);
2470  push(r15);
2472  // Use lea for symmetry with Popad.
2473  int sp_delta =
2475  lea(rsp, Operand(rsp, -sp_delta));
2476 }
2477 
2478 
2479 void MacroAssembler::Popad() {
2480  // Popad must not change the flags, so use lea instead of addq.
2481  int sp_delta =
2483  lea(rsp, Operand(rsp, sp_delta));
2484  pop(r15);
2485  pop(r14);
2486  pop(r11);
2487  pop(r9);
2488  pop(r8);
2489  pop(rdi);
2490  pop(rsi);
2491  pop(rbx);
2492  pop(rdx);
2493  pop(rcx);
2494  pop(rax);
2495 }
2496 
2497 
2498 void MacroAssembler::Dropad() {
2499  addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2500 }
2501 
2502 
2503 // Order general registers are pushed by Pushad:
2504 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2505 const int
2506 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2507  0,
2508  1,
2509  2,
2510  3,
2511  -1,
2512  -1,
2513  4,
2514  5,
2515  6,
2516  7,
2517  -1,
2518  8,
2519  -1,
2520  -1,
2521  9,
2522  10
2523 };
2524 
2525 
2526 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2527  const Immediate& imm) {
2528  movq(SafepointRegisterSlot(dst), imm);
2529 }
2530 
2531 
2532 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2533  movq(SafepointRegisterSlot(dst), src);
2534 }
2535 
2536 
2537 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2538  movq(dst, SafepointRegisterSlot(src));
2539 }
2540 
2541 
2542 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2543  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2544 }
2545 
2546 
2547 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2548  int handler_index) {
2549  // Adjust this code if not the case.
2550  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2551  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2552  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2553  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2554  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2555  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2556 
2557  // We will build up the handler from the bottom by pushing on the stack.
2558  // First push the frame pointer and context.
2559  if (kind == StackHandler::JS_ENTRY) {
2560  // The frame pointer does not point to a JS frame so we save NULL for
2561  // rbp. We expect the code throwing an exception to check rbp before
2562  // dereferencing it to restore the context.
2563  push(Immediate(0)); // NULL frame pointer.
2564  Push(Smi::FromInt(0)); // No context.
2565  } else {
2566  push(rbp);
2567  push(rsi);
2568  }
2569 
2570  // Push the state and the code object.
2571  unsigned state =
2572  StackHandler::IndexField::encode(handler_index) |
2573  StackHandler::KindField::encode(kind);
2574  push(Immediate(state));
2575  Push(CodeObject());
2576 
2577  // Link the current handler as the next handler.
2578  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2579  push(ExternalOperand(handler_address));
2580  // Set this new handler as the current one.
2581  movq(ExternalOperand(handler_address), rsp);
2582 }
2583 
2584 
2585 void MacroAssembler::PopTryHandler() {
2586  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2587  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2588  pop(ExternalOperand(handler_address));
2589  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2590 }
2591 
2592 
2593 void MacroAssembler::JumpToHandlerEntry() {
2594  // Compute the handler entry address and jump to it. The handler table is
2595  // a fixed array of (smi-tagged) code offsets.
2596  // rax = exception, rdi = code object, rdx = state.
2597  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
2598  shr(rdx, Immediate(StackHandler::kKindWidth));
2599  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
2600  SmiToInteger64(rdx, rdx);
2601  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2602  jmp(rdi);
2603 }
2604 
2605 
2606 void MacroAssembler::Throw(Register value) {
2607  // Adjust this code if not the case.
2608  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2609  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2610  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2611  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2612  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2613  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2614 
2615  // The exception is expected in rax.
2616  if (!value.is(rax)) {
2617  movq(rax, value);
2618  }
2619  // Drop the stack pointer to the top of the top handler.
2620  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2621  movq(rsp, ExternalOperand(handler_address));
2622  // Restore the next handler.
2623  pop(ExternalOperand(handler_address));
2624 
2625  // Remove the code object and state, compute the handler address in rdi.
2626  pop(rdi); // Code object.
2627  pop(rdx); // Offset and state.
2628 
2629  // Restore the context and frame pointer.
2630  pop(rsi); // Context.
2631  pop(rbp); // Frame pointer.
2632 
2633  // If the handler is a JS frame, restore the context to the frame.
2634  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
2635  // rbp or rsi.
2636  Label skip;
2637  testq(rsi, rsi);
2638  j(zero, &skip, Label::kNear);
2639  movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2640  bind(&skip);
2641 
2642  JumpToHandlerEntry();
2643 }
2644 
2645 
2646 void MacroAssembler::ThrowUncatchable(Register value) {
2647  // Adjust this code if not the case.
2648  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2649  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2650  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2651  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2652  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2653  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2654 
2655  // The exception is expected in rax.
2656  if (!value.is(rax)) {
2657  movq(rax, value);
2658  }
2659  // Drop the stack pointer to the top of the top stack handler.
2660  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2661  Load(rsp, handler_address);
2662 
2663  // Unwind the handlers until the top ENTRY handler is found.
2664  Label fetch_next, check_kind;
2665  jmp(&check_kind, Label::kNear);
2666  bind(&fetch_next);
2667  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
2668 
2669  bind(&check_kind);
2670  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2671  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
2672  Immediate(StackHandler::KindField::kMask));
2673  j(not_zero, &fetch_next);
2674 
2675  // Set the top handler address to next handler past the top ENTRY handler.
2676  pop(ExternalOperand(handler_address));
2677 
2678  // Remove the code object and state, compute the handler address in rdi.
2679  pop(rdi); // Code object.
2680  pop(rdx); // Offset and state.
2681 
2682  // Clear the context pointer and frame pointer (0 was saved in the handler).
2683  pop(rsi);
2684  pop(rbp);
2685 
2686  JumpToHandlerEntry();
2687 }
2688 
2689 
2690 void MacroAssembler::Ret() {
2691  ret(0);
2692 }
2693 
2694 
2695 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2696  if (is_uint16(bytes_dropped)) {
2697  ret(bytes_dropped);
2698  } else {
2699  pop(scratch);
2700  addq(rsp, Immediate(bytes_dropped));
2701  push(scratch);
2702  ret(0);
2703  }
2704 }
2705 
2706 
2707 void MacroAssembler::FCmp() {
2708  fucomip();
2709  fstp(0);
2710 }
2711 
2712 
2713 void MacroAssembler::CmpObjectType(Register heap_object,
2714  InstanceType type,
2715  Register map) {
2716  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2717  CmpInstanceType(map, type);
2718 }
2719 
2720 
2721 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2722  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2723  Immediate(static_cast<int8_t>(type)));
2724 }
2725 
2726 
2727 void MacroAssembler::CheckFastElements(Register map,
2728  Label* fail,
2729  Label::Distance distance) {
2734  cmpb(FieldOperand(map, Map::kBitField2Offset),
2735  Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2736  j(above, fail, distance);
2737 }
2738 
2739 
2740 void MacroAssembler::CheckFastObjectElements(Register map,
2741  Label* fail,
2742  Label::Distance distance) {
2747  cmpb(FieldOperand(map, Map::kBitField2Offset),
2748  Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2749  j(below_equal, fail, distance);
2750  cmpb(FieldOperand(map, Map::kBitField2Offset),
2751  Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2752  j(above, fail, distance);
2753 }
2754 
2755 
2756 void MacroAssembler::CheckFastSmiElements(Register map,
2757  Label* fail,
2758  Label::Distance distance) {
2761  cmpb(FieldOperand(map, Map::kBitField2Offset),
2762  Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2763  j(above, fail, distance);
2764 }
2765 
2766 
2767 void MacroAssembler::StoreNumberToDoubleElements(
2768  Register maybe_number,
2769  Register elements,
2770  Register index,
2771  XMMRegister xmm_scratch,
2772  Label* fail) {
2773  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2774 
2775  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2776 
2777  CheckMap(maybe_number,
2778  isolate()->factory()->heap_number_map(),
2779  fail,
2781 
2782  // Double value, canonicalize NaN.
2783  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2784  cmpl(FieldOperand(maybe_number, offset),
2785  Immediate(kNaNOrInfinityLowerBoundUpper32));
2786  j(greater_equal, &maybe_nan, Label::kNear);
2787 
2788  bind(&not_nan);
2789  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2790  bind(&have_double_value);
2791  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2792  xmm_scratch);
2793  jmp(&done);
2794 
2795  bind(&maybe_nan);
2796  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2797  // it's an Infinity, and the non-NaN code path applies.
2798  j(greater, &is_nan, Label::kNear);
2799  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
2800  j(zero, &not_nan);
2801  bind(&is_nan);
2802  // Convert all NaNs to the same canonical NaN value when they are stored in
2803  // the double array.
2804  Set(kScratchRegister, BitCast<uint64_t>(
2805  FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
2806  movq(xmm_scratch, kScratchRegister);
2807  jmp(&have_double_value, Label::kNear);
2808 
2809  bind(&smi_value);
2810  // Value is a smi. convert to a double and store.
2811  // Preserve original value.
2812  SmiToInteger32(kScratchRegister, maybe_number);
2813  cvtlsi2sd(xmm_scratch, kScratchRegister);
2814  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
2815  xmm_scratch);
2816  bind(&done);
2817 }
2818 
2819 
2820 void MacroAssembler::CompareMap(Register obj,
2821  Handle<Map> map,
2822  Label* early_success,
2823  CompareMapMode mode) {
2824  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2825  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
2826  ElementsKind kind = map->elements_kind();
2827  if (IsFastElementsKind(kind)) {
2828  bool packed = IsFastPackedElementsKind(kind);
2829  Map* current_map = *map;
2830  while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
2831  kind = GetNextMoreGeneralFastElementsKind(kind, packed);
2832  current_map = current_map->LookupElementsTransitionMap(kind);
2833  if (!current_map) break;
2834  j(equal, early_success, Label::kNear);
2835  Cmp(FieldOperand(obj, HeapObject::kMapOffset),
2836  Handle<Map>(current_map));
2837  }
2838  }
2839  }
2840 }
2841 
2842 
2843 void MacroAssembler::CheckMap(Register obj,
2844  Handle<Map> map,
2845  Label* fail,
2846  SmiCheckType smi_check_type,
2847  CompareMapMode mode) {
2848  if (smi_check_type == DO_SMI_CHECK) {
2849  JumpIfSmi(obj, fail);
2850  }
2851 
2852  Label success;
2853  CompareMap(obj, map, &success, mode);
2854  j(not_equal, fail);
2855  bind(&success);
2856 }
2857 
2858 
2859 void MacroAssembler::ClampUint8(Register reg) {
2860  Label done;
2861  testl(reg, Immediate(0xFFFFFF00));
2862  j(zero, &done, Label::kNear);
2863  setcc(negative, reg); // 1 if negative, 0 if positive.
2864  decb(reg); // 0 if negative, 255 if positive.
2865  bind(&done);
2866 }
2867 
2868 
2869 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
2870  XMMRegister temp_xmm_reg,
2871  Register result_reg) {
2872  Label done;
2873  Label conv_failure;
2874  xorps(temp_xmm_reg, temp_xmm_reg);
2875  cvtsd2si(result_reg, input_reg);
2876  testl(result_reg, Immediate(0xFFFFFF00));
2877  j(zero, &done, Label::kNear);
2878  cmpl(result_reg, Immediate(0x80000000));
2879  j(equal, &conv_failure, Label::kNear);
2880  movl(result_reg, Immediate(0));
2881  setcc(above, result_reg);
2882  subl(result_reg, Immediate(1));
2883  andl(result_reg, Immediate(255));
2884  jmp(&done, Label::kNear);
2885  bind(&conv_failure);
2886  Set(result_reg, 0);
2887  ucomisd(input_reg, temp_xmm_reg);
2888  j(below, &done, Label::kNear);
2889  Set(result_reg, 255);
2890  bind(&done);
2891 }
2892 
2893 
2894 static double kUint32Bias =
2895  static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
2896 
2897 
2898 void MacroAssembler::LoadUint32(XMMRegister dst,
2899  Register src,
2900  XMMRegister scratch) {
2901  Label done;
2902  cmpl(src, Immediate(0));
2903  movq(kScratchRegister,
2904  reinterpret_cast<int64_t>(&kUint32Bias),
2905  RelocInfo::NONE);
2906  movsd(scratch, Operand(kScratchRegister, 0));
2907  cvtlsi2sd(dst, src);
2908  j(not_sign, &done, Label::kNear);
2909  addsd(dst, scratch);
2910  bind(&done);
2911 }
2912 
2913 
2914 void MacroAssembler::LoadInstanceDescriptors(Register map,
2915  Register descriptors) {
2916  movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
2917 }
2918 
2919 
2920 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
2921  movq(dst, FieldOperand(map, Map::kBitField3Offset));
2922  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
2923 }
2924 
2925 
2926 void MacroAssembler::EnumLength(Register dst, Register map) {
2927  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
2928  movq(dst, FieldOperand(map, Map::kBitField3Offset));
2929  Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
2930  and_(dst, kScratchRegister);
2931 }
2932 
2933 
2934 void MacroAssembler::DispatchMap(Register obj,
2935  Handle<Map> map,
2936  Handle<Code> success,
2937  SmiCheckType smi_check_type) {
2938  Label fail;
2939  if (smi_check_type == DO_SMI_CHECK) {
2940  JumpIfSmi(obj, &fail);
2941  }
2942  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
2943  j(equal, success, RelocInfo::CODE_TARGET);
2944 
2945  bind(&fail);
2946 }
2947 
2948 
2949 void MacroAssembler::AssertNumber(Register object) {
2950  if (emit_debug_code()) {
2951  Label ok;
2952  Condition is_smi = CheckSmi(object);
2953  j(is_smi, &ok, Label::kNear);
2954  Cmp(FieldOperand(object, HeapObject::kMapOffset),
2955  isolate()->factory()->heap_number_map());
2956  Check(equal, "Operand is not a number");
2957  bind(&ok);
2958  }
2959 }
2960 
2961 
2962 void MacroAssembler::AssertNotSmi(Register object) {
2963  if (emit_debug_code()) {
2964  Condition is_smi = CheckSmi(object);
2965  Check(NegateCondition(is_smi), "Operand is a smi");
2966  }
2967 }
2968 
2969 
2970 void MacroAssembler::AssertSmi(Register object) {
2971  if (emit_debug_code()) {
2972  Condition is_smi = CheckSmi(object);
2973  Check(is_smi, "Operand is not a smi");
2974  }
2975 }
2976 
2977 
2978 void MacroAssembler::AssertSmi(const Operand& object) {
2979  if (emit_debug_code()) {
2980  Condition is_smi = CheckSmi(object);
2981  Check(is_smi, "Operand is not a smi");
2982  }
2983 }
2984 
2985 
2986 void MacroAssembler::AssertZeroExtended(Register int32_register) {
2987  if (emit_debug_code()) {
2988  ASSERT(!int32_register.is(kScratchRegister));
2989  movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
2990  cmpq(kScratchRegister, int32_register);
2991  Check(above_equal, "32 bit value in register is not zero-extended");
2992  }
2993 }
2994 
2995 
2996 void MacroAssembler::AssertString(Register object) {
2997  if (emit_debug_code()) {
2998  testb(object, Immediate(kSmiTagMask));
2999  Check(not_equal, "Operand is a smi and not a string");
3000  push(object);
3001  movq(object, FieldOperand(object, HeapObject::kMapOffset));
3002  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3003  pop(object);
3004  Check(below, "Operand is not a string");
3005  }
3006 }
3007 
3008 
3009 void MacroAssembler::AssertRootValue(Register src,
3010  Heap::RootListIndex root_value_index,
3011  const char* message) {
3012  if (emit_debug_code()) {
3013  ASSERT(!src.is(kScratchRegister));
3014  LoadRoot(kScratchRegister, root_value_index);
3015  cmpq(src, kScratchRegister);
3016  Check(equal, message);
3017  }
3018 }
3019 
3020 
3021 
3022 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3023  Register map,
3024  Register instance_type) {
3025  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3026  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3028  testb(instance_type, Immediate(kIsNotStringMask));
3029  return zero;
3030 }
3031 
3032 
3033 void MacroAssembler::TryGetFunctionPrototype(Register function,
3034  Register result,
3035  Label* miss,
3036  bool miss_on_bound_function) {
3037  // Check that the receiver isn't a smi.
3038  testl(function, Immediate(kSmiTagMask));
3039  j(zero, miss);
3040 
3041  // Check that the function really is a function.
3042  CmpObjectType(function, JS_FUNCTION_TYPE, result);
3043  j(not_equal, miss);
3044 
3045  if (miss_on_bound_function) {
3046  movq(kScratchRegister,
3047  FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3048  // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3049  // field).
3051  SharedFunctionInfo::kCompilerHintsOffset),
3052  SharedFunctionInfo::kBoundFunction);
3053  j(not_zero, miss);
3054  }
3055 
3056  // Make sure that the function has an instance prototype.
3057  Label non_instance;
3058  testb(FieldOperand(result, Map::kBitFieldOffset),
3059  Immediate(1 << Map::kHasNonInstancePrototype));
3060  j(not_zero, &non_instance, Label::kNear);
3061 
3062  // Get the prototype or initial map from the function.
3063  movq(result,
3064  FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3065 
3066  // If the prototype or initial map is the hole, don't return it and
3067  // simply miss the cache instead. This will allow us to allocate a
3068  // prototype object on-demand in the runtime system.
3069  CompareRoot(result, Heap::kTheHoleValueRootIndex);
3070  j(equal, miss);
3071 
3072  // If the function does not have an initial map, we're done.
3073  Label done;
3074  CmpObjectType(result, MAP_TYPE, kScratchRegister);
3075  j(not_equal, &done, Label::kNear);
3076 
3077  // Get the prototype from the initial map.
3078  movq(result, FieldOperand(result, Map::kPrototypeOffset));
3079  jmp(&done, Label::kNear);
3080 
3081  // Non-instance prototype: Fetch prototype from constructor field
3082  // in initial map.
3083  bind(&non_instance);
3084  movq(result, FieldOperand(result, Map::kConstructorOffset));
3085 
3086  // All done.
3087  bind(&done);
3088 }
3089 
3090 
3091 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3092  if (FLAG_native_code_counters && counter->Enabled()) {
3093  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3094  movl(counter_operand, Immediate(value));
3095  }
3096 }
3097 
3098 
3099 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3100  ASSERT(value > 0);
3101  if (FLAG_native_code_counters && counter->Enabled()) {
3102  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3103  if (value == 1) {
3104  incl(counter_operand);
3105  } else {
3106  addl(counter_operand, Immediate(value));
3107  }
3108  }
3109 }
3110 
3111 
3112 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3113  ASSERT(value > 0);
3114  if (FLAG_native_code_counters && counter->Enabled()) {
3115  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3116  if (value == 1) {
3117  decl(counter_operand);
3118  } else {
3119  subl(counter_operand, Immediate(value));
3120  }
3121  }
3122 }
3123 
3124 
3125 #ifdef ENABLE_DEBUGGER_SUPPORT
3126 void MacroAssembler::DebugBreak() {
3127  Set(rax, 0); // No arguments.
3128  LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3129  CEntryStub ces(1);
3130  ASSERT(AllowThisStubCall(&ces));
3131  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3132 }
3133 #endif // ENABLE_DEBUGGER_SUPPORT
3134 
3135 
3136 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3137  // This macro takes the dst register to make the code more readable
3138  // at the call sites. However, the dst register has to be rcx to
3139  // follow the calling convention which requires the call type to be
3140  // in rcx.
3141  ASSERT(dst.is(rcx));
3142  if (call_kind == CALL_AS_FUNCTION) {
3143  LoadSmiConstant(dst, Smi::FromInt(1));
3144  } else {
3145  LoadSmiConstant(dst, Smi::FromInt(0));
3146  }
3147 }
3148 
3149 
3150 void MacroAssembler::InvokeCode(Register code,
3151  const ParameterCount& expected,
3152  const ParameterCount& actual,
3153  InvokeFlag flag,
3154  const CallWrapper& call_wrapper,
3155  CallKind call_kind) {
3156  // You can't call a function without a valid frame.
3157  ASSERT(flag == JUMP_FUNCTION || has_frame());
3158 
3159  Label done;
3160  bool definitely_mismatches = false;
3161  InvokePrologue(expected,
3162  actual,
3163  Handle<Code>::null(),
3164  code,
3165  &done,
3166  &definitely_mismatches,
3167  flag,
3168  Label::kNear,
3169  call_wrapper,
3170  call_kind);
3171  if (!definitely_mismatches) {
3172  if (flag == CALL_FUNCTION) {
3173  call_wrapper.BeforeCall(CallSize(code));
3174  SetCallKind(rcx, call_kind);
3175  call(code);
3176  call_wrapper.AfterCall();
3177  } else {
3178  ASSERT(flag == JUMP_FUNCTION);
3179  SetCallKind(rcx, call_kind);
3180  jmp(code);
3181  }
3182  bind(&done);
3183  }
3184 }
3185 
3186 
3187 void MacroAssembler::InvokeCode(Handle<Code> code,
3188  const ParameterCount& expected,
3189  const ParameterCount& actual,
3190  RelocInfo::Mode rmode,
3191  InvokeFlag flag,
3192  const CallWrapper& call_wrapper,
3193  CallKind call_kind) {
3194  // You can't call a function without a valid frame.
3195  ASSERT(flag == JUMP_FUNCTION || has_frame());
3196 
3197  Label done;
3198  bool definitely_mismatches = false;
3199  Register dummy = rax;
3200  InvokePrologue(expected,
3201  actual,
3202  code,
3203  dummy,
3204  &done,
3205  &definitely_mismatches,
3206  flag,
3207  Label::kNear,
3208  call_wrapper,
3209  call_kind);
3210  if (!definitely_mismatches) {
3211  if (flag == CALL_FUNCTION) {
3212  call_wrapper.BeforeCall(CallSize(code));
3213  SetCallKind(rcx, call_kind);
3214  Call(code, rmode);
3215  call_wrapper.AfterCall();
3216  } else {
3217  ASSERT(flag == JUMP_FUNCTION);
3218  SetCallKind(rcx, call_kind);
3219  Jump(code, rmode);
3220  }
3221  bind(&done);
3222  }
3223 }
3224 
3225 
3226 void MacroAssembler::InvokeFunction(Register function,
3227  const ParameterCount& actual,
3228  InvokeFlag flag,
3229  const CallWrapper& call_wrapper,
3230  CallKind call_kind) {
3231  // You can't call a function without a valid frame.
3232  ASSERT(flag == JUMP_FUNCTION || has_frame());
3233 
3234  ASSERT(function.is(rdi));
3235  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3236  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3237  movsxlq(rbx,
3238  FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3239  // Advances rdx to the end of the Code object header, to the start of
3240  // the executable code.
3241  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3242 
3243  ParameterCount expected(rbx);
3244  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3245 }
3246 
3247 
3248 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3249  const ParameterCount& actual,
3250  InvokeFlag flag,
3251  const CallWrapper& call_wrapper,
3252  CallKind call_kind) {
3253  // You can't call a function without a valid frame.
3254  ASSERT(flag == JUMP_FUNCTION || has_frame());
3255 
3256  // Get the function and setup the context.
3257  LoadHeapObject(rdi, function);
3258  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3259 
3260  // We call indirectly through the code field in the function to
3261  // allow recompilation to take effect without changing any of the
3262  // call sites.
3263  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3264  ParameterCount expected(function->shared()->formal_parameter_count());
3265  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3266 }
3267 
3268 
3269 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3270  const ParameterCount& actual,
3271  Handle<Code> code_constant,
3272  Register code_register,
3273  Label* done,
3274  bool* definitely_mismatches,
3275  InvokeFlag flag,
3276  Label::Distance near_jump,
3277  const CallWrapper& call_wrapper,
3278  CallKind call_kind) {
3279  bool definitely_matches = false;
3280  *definitely_mismatches = false;
3281  Label invoke;
3282  if (expected.is_immediate()) {
3283  ASSERT(actual.is_immediate());
3284  if (expected.immediate() == actual.immediate()) {
3285  definitely_matches = true;
3286  } else {
3287  Set(rax, actual.immediate());
3288  if (expected.immediate() ==
3289  SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3290  // Don't worry about adapting arguments for built-ins that
3291  // don't want that done. Skip adaption code by making it look
3292  // like we have a match between expected and actual number of
3293  // arguments.
3294  definitely_matches = true;
3295  } else {
3296  *definitely_mismatches = true;
3297  Set(rbx, expected.immediate());
3298  }
3299  }
3300  } else {
3301  if (actual.is_immediate()) {
3302  // Expected is in register, actual is immediate. This is the
3303  // case when we invoke function values without going through the
3304  // IC mechanism.
3305  cmpq(expected.reg(), Immediate(actual.immediate()));
3306  j(equal, &invoke, Label::kNear);
3307  ASSERT(expected.reg().is(rbx));
3308  Set(rax, actual.immediate());
3309  } else if (!expected.reg().is(actual.reg())) {
3310  // Both expected and actual are in (different) registers. This
3311  // is the case when we invoke functions using call and apply.
3312  cmpq(expected.reg(), actual.reg());
3313  j(equal, &invoke, Label::kNear);
3314  ASSERT(actual.reg().is(rax));
3315  ASSERT(expected.reg().is(rbx));
3316  }
3317  }
3318 
3319  if (!definitely_matches) {
3320  Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3321  if (!code_constant.is_null()) {
3322  movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3323  addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3324  } else if (!code_register.is(rdx)) {
3325  movq(rdx, code_register);
3326  }
3327 
3328  if (flag == CALL_FUNCTION) {
3329  call_wrapper.BeforeCall(CallSize(adaptor));
3330  SetCallKind(rcx, call_kind);
3331  Call(adaptor, RelocInfo::CODE_TARGET);
3332  call_wrapper.AfterCall();
3333  if (!*definitely_mismatches) {
3334  jmp(done, near_jump);
3335  }
3336  } else {
3337  SetCallKind(rcx, call_kind);
3338  Jump(adaptor, RelocInfo::CODE_TARGET);
3339  }
3340  bind(&invoke);
3341  }
3342 }
3343 
3344 
3345 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3346  push(rbp);
3347  movq(rbp, rsp);
3348  push(rsi); // Context.
3349  Push(Smi::FromInt(type));
3350  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3351  push(kScratchRegister);
3352  if (emit_debug_code()) {
3353  movq(kScratchRegister,
3354  isolate()->factory()->undefined_value(),
3355  RelocInfo::EMBEDDED_OBJECT);
3356  cmpq(Operand(rsp, 0), kScratchRegister);
3357  Check(not_equal, "code object not properly patched");
3358  }
3359 }
3360 
3361 
3362 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3363  if (emit_debug_code()) {
3364  Move(kScratchRegister, Smi::FromInt(type));
3365  cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3366  Check(equal, "stack frame types must match");
3367  }
3368  movq(rsp, rbp);
3369  pop(rbp);
3370 }
3371 
3372 
3373 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3374  // Set up the frame structure on the stack.
3375  // All constants are relative to the frame pointer of the exit frame.
3376  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
3377  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
3378  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3379  push(rbp);
3380  movq(rbp, rsp);
3381 
3382  // Reserve room for entry stack pointer and push the code object.
3383  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3384  push(Immediate(0)); // Saved entry sp, patched before call.
3385  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3386  push(kScratchRegister); // Accessed from EditFrame::code_slot.
3387 
3388  // Save the frame pointer and the context in top.
3389  if (save_rax) {
3390  movq(r14, rax); // Backup rax in callee-save register.
3391  }
3392 
3393  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3394  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3395 }
3396 
3397 
3398 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3399  bool save_doubles) {
3400 #ifdef _WIN64
3401  const int kShadowSpace = 4;
3402  arg_stack_space += kShadowSpace;
3403 #endif
3404  // Optionally save all XMM registers.
3405  if (save_doubles) {
3406  int space = XMMRegister::kNumRegisters * kDoubleSize +
3407  arg_stack_space * kPointerSize;
3408  subq(rsp, Immediate(space));
3409  int offset = -2 * kPointerSize;
3410  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3411  XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3412  movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3413  }
3414  } else if (arg_stack_space > 0) {
3415  subq(rsp, Immediate(arg_stack_space * kPointerSize));
3416  }
3417 
3418  // Get the required frame alignment for the OS.
3419  const int kFrameAlignment = OS::ActivationFrameAlignment();
3420  if (kFrameAlignment > 0) {
3421  ASSERT(IsPowerOf2(kFrameAlignment));
3422  ASSERT(is_int8(kFrameAlignment));
3423  and_(rsp, Immediate(-kFrameAlignment));
3424  }
3425 
3426  // Patch the saved entry sp.
3427  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3428 }
3429 
3430 
3431 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3432  EnterExitFramePrologue(true);
3433 
3434  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3435  // so it must be retained across the C-call.
3436  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3437  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
3438 
3439  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3440 }
3441 
3442 
3443 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3444  EnterExitFramePrologue(false);
3445  EnterExitFrameEpilogue(arg_stack_space, false);
3446 }
3447 
3448 
3449 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3450  // Registers:
3451  // r15 : argv
3452  if (save_doubles) {
3453  int offset = -2 * kPointerSize;
3454  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
3455  XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3456  movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3457  }
3458  }
3459  // Get the return address from the stack and restore the frame pointer.
3460  movq(rcx, Operand(rbp, 1 * kPointerSize));
3461  movq(rbp, Operand(rbp, 0 * kPointerSize));
3462 
3463  // Drop everything up to and including the arguments and the receiver
3464  // from the caller stack.
3465  lea(rsp, Operand(r15, 1 * kPointerSize));
3466 
3467  // Push the return address to get ready to return.
3468  push(rcx);
3469 
3470  LeaveExitFrameEpilogue();
3471 }
3472 
3473 
3474 void MacroAssembler::LeaveApiExitFrame() {
3475  movq(rsp, rbp);
3476  pop(rbp);
3477 
3478  LeaveExitFrameEpilogue();
3479 }
3480 
3481 
3482 void MacroAssembler::LeaveExitFrameEpilogue() {
3483  // Restore current context from top and clear it in debug mode.
3484  ExternalReference context_address(Isolate::kContextAddress, isolate());
3485  Operand context_operand = ExternalOperand(context_address);
3486  movq(rsi, context_operand);
3487 #ifdef DEBUG
3488  movq(context_operand, Immediate(0));
3489 #endif
3490 
3491  // Clear the top frame.
3492  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3493  isolate());
3494  Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3495  movq(c_entry_fp_operand, Immediate(0));
3496 }
3497 
3498 
3499 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3500  Register scratch,
3501  Label* miss) {
3502  Label same_contexts;
3503 
3504  ASSERT(!holder_reg.is(scratch));
3505  ASSERT(!scratch.is(kScratchRegister));
3506  // Load current lexical context from the stack frame.
3507  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3508 
3509  // When generating debug code, make sure the lexical context is set.
3510  if (emit_debug_code()) {
3511  cmpq(scratch, Immediate(0));
3512  Check(not_equal, "we should not have an empty lexical context");
3513  }
3514  // Load the native context of the current context.
3515  int offset =
3516  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3517  movq(scratch, FieldOperand(scratch, offset));
3518  movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3519 
3520  // Check the context is a native context.
3521  if (emit_debug_code()) {
3522  Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3523  isolate()->factory()->native_context_map());
3524  Check(equal, "JSGlobalObject::native_context should be a native context.");
3525  }
3526 
3527  // Check if both contexts are the same.
3528  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3529  j(equal, &same_contexts);
3530 
3531  // Compare security tokens.
3532  // Check that the security token in the calling global object is
3533  // compatible with the security token in the receiving global
3534  // object.
3535 
3536  // Check the context is a native context.
3537  if (emit_debug_code()) {
3538  // Preserve original value of holder_reg.
3539  push(holder_reg);
3540  movq(holder_reg,
3541  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3542  CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3543  Check(not_equal, "JSGlobalProxy::context() should not be null.");
3544 
3545  // Read the first word and compare to native_context_map(),
3546  movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3547  CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3548  Check(equal, "JSGlobalObject::native_context should be a native context.");
3549  pop(holder_reg);
3550  }
3551 
3552  movq(kScratchRegister,
3553  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3554  int token_offset =
3555  Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3556  movq(scratch, FieldOperand(scratch, token_offset));
3557  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
3558  j(not_equal, miss);
3559 
3560  bind(&same_contexts);
3561 }
3562 
3563 
3564 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3565  // First of all we assign the hash seed to scratch.
3566  LoadRoot(scratch, Heap::kHashSeedRootIndex);
3567  SmiToInteger32(scratch, scratch);
3568 
3569  // Xor original key with a seed.
3570  xorl(r0, scratch);
3571 
3572  // Compute the hash code from the untagged key. This must be kept in sync
3573  // with ComputeIntegerHash in utils.h.
3574  //
3575  // hash = ~hash + (hash << 15);
3576  movl(scratch, r0);
3577  notl(r0);
3578  shll(scratch, Immediate(15));
3579  addl(r0, scratch);
3580  // hash = hash ^ (hash >> 12);
3581  movl(scratch, r0);
3582  shrl(scratch, Immediate(12));
3583  xorl(r0, scratch);
3584  // hash = hash + (hash << 2);
3585  leal(r0, Operand(r0, r0, times_4, 0));
3586  // hash = hash ^ (hash >> 4);
3587  movl(scratch, r0);
3588  shrl(scratch, Immediate(4));
3589  xorl(r0, scratch);
3590  // hash = hash * 2057;
3591  imull(r0, r0, Immediate(2057));
3592  // hash = hash ^ (hash >> 16);
3593  movl(scratch, r0);
3594  shrl(scratch, Immediate(16));
3595  xorl(r0, scratch);
3596 }
3597 
3598 
3599 
3600 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3601  Register elements,
3602  Register key,
3603  Register r0,
3604  Register r1,
3605  Register r2,
3606  Register result) {
3607  // Register use:
3608  //
3609  // elements - holds the slow-case elements of the receiver on entry.
3610  // Unchanged unless 'result' is the same register.
3611  //
3612  // key - holds the smi key on entry.
3613  // Unchanged unless 'result' is the same register.
3614  //
3615  // Scratch registers:
3616  //
3617  // r0 - holds the untagged key on entry and holds the hash once computed.
3618  //
3619  // r1 - used to hold the capacity mask of the dictionary
3620  //
3621  // r2 - used for the index into the dictionary.
3622  //
3623  // result - holds the result on exit if the load succeeded.
3624  // Allowed to be the same as 'key' or 'result'.
3625  // Unchanged on bailout so 'key' or 'result' can be used
3626  // in further computation.
3627 
3628  Label done;
3629 
3630  GetNumberHash(r0, r1);
3631 
3632  // Compute capacity mask.
3633  SmiToInteger32(r1, FieldOperand(elements,
3634  SeededNumberDictionary::kCapacityOffset));
3635  decl(r1);
3636 
3637  // Generate an unrolled loop that performs a few probes before giving up.
3638  const int kProbes = 4;
3639  for (int i = 0; i < kProbes; i++) {
3640  // Use r2 for index calculations and keep the hash intact in r0.
3641  movq(r2, r0);
3642  // Compute the masked index: (hash + i + i * i) & mask.
3643  if (i > 0) {
3644  addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
3645  }
3646  and_(r2, r1);
3647 
3648  // Scale the index by multiplying by the entry size.
3649  ASSERT(SeededNumberDictionary::kEntrySize == 3);
3650  lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
3651 
3652  // Check if the key matches.
3653  cmpq(key, FieldOperand(elements,
3654  r2,
3656  SeededNumberDictionary::kElementsStartOffset));
3657  if (i != (kProbes - 1)) {
3658  j(equal, &done);
3659  } else {
3660  j(not_equal, miss);
3661  }
3662  }
3663 
3664  bind(&done);
3665  // Check that the value is a normal propety.
3666  const int kDetailsOffset =
3667  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3668  ASSERT_EQ(NORMAL, 0);
3669  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
3670  Smi::FromInt(PropertyDetails::TypeField::kMask));
3671  j(not_zero, miss);
3672 
3673  // Get the value at the masked, scaled index.
3674  const int kValueOffset =
3675  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3676  movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
3677 }
3678 
3679 
3680 void MacroAssembler::LoadAllocationTopHelper(Register result,
3681  Register scratch,
3683  ExternalReference new_space_allocation_top =
3684  ExternalReference::new_space_allocation_top_address(isolate());
3685 
3686  // Just return if allocation top is already known.
3687  if ((flags & RESULT_CONTAINS_TOP) != 0) {
3688  // No use of scratch if allocation top is provided.
3689  ASSERT(!scratch.is_valid());
3690 #ifdef DEBUG
3691  // Assert that result actually contains top on entry.
3692  Operand top_operand = ExternalOperand(new_space_allocation_top);
3693  cmpq(result, top_operand);
3694  Check(equal, "Unexpected allocation top");
3695 #endif
3696  return;
3697  }
3698 
3699  // Move address of new object to result. Use scratch register if available,
3700  // and keep address in scratch until call to UpdateAllocationTopHelper.
3701  if (scratch.is_valid()) {
3702  LoadAddress(scratch, new_space_allocation_top);
3703  movq(result, Operand(scratch, 0));
3704  } else {
3705  Load(result, new_space_allocation_top);
3706  }
3707 }
3708 
3709 
3710 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
3711  Register scratch) {
3712  if (emit_debug_code()) {
3713  testq(result_end, Immediate(kObjectAlignmentMask));
3714  Check(zero, "Unaligned allocation in new space");
3715  }
3716 
3717  ExternalReference new_space_allocation_top =
3718  ExternalReference::new_space_allocation_top_address(isolate());
3719 
3720  // Update new top.
3721  if (scratch.is_valid()) {
3722  // Scratch already contains address of allocation top.
3723  movq(Operand(scratch, 0), result_end);
3724  } else {
3725  Store(new_space_allocation_top, result_end);
3726  }
3727 }
3728 
3729 
3730 void MacroAssembler::AllocateInNewSpace(int object_size,
3731  Register result,
3732  Register result_end,
3733  Register scratch,
3734  Label* gc_required,
3735  AllocationFlags flags) {
3736  if (!FLAG_inline_new) {
3737  if (emit_debug_code()) {
3738  // Trash the registers to simulate an allocation failure.
3739  movl(result, Immediate(0x7091));
3740  if (result_end.is_valid()) {
3741  movl(result_end, Immediate(0x7191));
3742  }
3743  if (scratch.is_valid()) {
3744  movl(scratch, Immediate(0x7291));
3745  }
3746  }
3747  jmp(gc_required);
3748  return;
3749  }
3750  ASSERT(!result.is(result_end));
3751 
3752  // Load address of new object into result.
3753  LoadAllocationTopHelper(result, scratch, flags);
3754 
3755  // Calculate new top and bail out if new space is exhausted.
3756  ExternalReference new_space_allocation_limit =
3757  ExternalReference::new_space_allocation_limit_address(isolate());
3758 
3759  Register top_reg = result_end.is_valid() ? result_end : result;
3760 
3761  if (!top_reg.is(result)) {
3762  movq(top_reg, result);
3763  }
3764  addq(top_reg, Immediate(object_size));
3765  j(carry, gc_required);
3766  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3767  cmpq(top_reg, limit_operand);
3768  j(above, gc_required);
3769 
3770  // Update allocation top.
3771  UpdateAllocationTopHelper(top_reg, scratch);
3772 
3773  if (top_reg.is(result)) {
3774  if ((flags & TAG_OBJECT) != 0) {
3775  subq(result, Immediate(object_size - kHeapObjectTag));
3776  } else {
3777  subq(result, Immediate(object_size));
3778  }
3779  } else if ((flags & TAG_OBJECT) != 0) {
3780  // Tag the result if requested.
3781  addq(result, Immediate(kHeapObjectTag));
3782  }
3783 }
3784 
3785 
3786 void MacroAssembler::AllocateInNewSpace(int header_size,
3787  ScaleFactor element_size,
3788  Register element_count,
3789  Register result,
3790  Register result_end,
3791  Register scratch,
3792  Label* gc_required,
3793  AllocationFlags flags) {
3794  if (!FLAG_inline_new) {
3795  if (emit_debug_code()) {
3796  // Trash the registers to simulate an allocation failure.
3797  movl(result, Immediate(0x7091));
3798  movl(result_end, Immediate(0x7191));
3799  if (scratch.is_valid()) {
3800  movl(scratch, Immediate(0x7291));
3801  }
3802  // Register element_count is not modified by the function.
3803  }
3804  jmp(gc_required);
3805  return;
3806  }
3807  ASSERT(!result.is(result_end));
3808 
3809  // Load address of new object into result.
3810  LoadAllocationTopHelper(result, scratch, flags);
3811 
3812  // Calculate new top and bail out if new space is exhausted.
3813  ExternalReference new_space_allocation_limit =
3814  ExternalReference::new_space_allocation_limit_address(isolate());
3815 
3816  // We assume that element_count*element_size + header_size does not
3817  // overflow.
3818  lea(result_end, Operand(element_count, element_size, header_size));
3819  addq(result_end, result);
3820  j(carry, gc_required);
3821  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3822  cmpq(result_end, limit_operand);
3823  j(above, gc_required);
3824 
3825  // Update allocation top.
3826  UpdateAllocationTopHelper(result_end, scratch);
3827 
3828  // Tag the result if requested.
3829  if ((flags & TAG_OBJECT) != 0) {
3830  addq(result, Immediate(kHeapObjectTag));
3831  }
3832 }
3833 
3834 
3835 void MacroAssembler::AllocateInNewSpace(Register object_size,
3836  Register result,
3837  Register result_end,
3838  Register scratch,
3839  Label* gc_required,
3840  AllocationFlags flags) {
3841  if (!FLAG_inline_new) {
3842  if (emit_debug_code()) {
3843  // Trash the registers to simulate an allocation failure.
3844  movl(result, Immediate(0x7091));
3845  movl(result_end, Immediate(0x7191));
3846  if (scratch.is_valid()) {
3847  movl(scratch, Immediate(0x7291));
3848  }
3849  // object_size is left unchanged by this function.
3850  }
3851  jmp(gc_required);
3852  return;
3853  }
3854  ASSERT(!result.is(result_end));
3855 
3856  // Load address of new object into result.
3857  LoadAllocationTopHelper(result, scratch, flags);
3858 
3859  // Calculate new top and bail out if new space is exhausted.
3860  ExternalReference new_space_allocation_limit =
3861  ExternalReference::new_space_allocation_limit_address(isolate());
3862  if (!object_size.is(result_end)) {
3863  movq(result_end, object_size);
3864  }
3865  addq(result_end, result);
3866  j(carry, gc_required);
3867  Operand limit_operand = ExternalOperand(new_space_allocation_limit);
3868  cmpq(result_end, limit_operand);
3869  j(above, gc_required);
3870 
3871  // Update allocation top.
3872  UpdateAllocationTopHelper(result_end, scratch);
3873 
3874  // Tag the result if requested.
3875  if ((flags & TAG_OBJECT) != 0) {
3876  addq(result, Immediate(kHeapObjectTag));
3877  }
3878 }
3879 
3880 
3881 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
3882  ExternalReference new_space_allocation_top =
3883  ExternalReference::new_space_allocation_top_address(isolate());
3884 
3885  // Make sure the object has no tag before resetting top.
3886  and_(object, Immediate(~kHeapObjectTagMask));
3887  Operand top_operand = ExternalOperand(new_space_allocation_top);
3888 #ifdef DEBUG
3889  cmpq(object, top_operand);
3890  Check(below, "Undo allocation of non allocated memory");
3891 #endif
3892  movq(top_operand, object);
3893 }
3894 
3895 
3896 void MacroAssembler::AllocateHeapNumber(Register result,
3897  Register scratch,
3898  Label* gc_required) {
3899  // Allocate heap number in new space.
3900  AllocateInNewSpace(HeapNumber::kSize,
3901  result,
3902  scratch,
3903  no_reg,
3904  gc_required,
3905  TAG_OBJECT);
3906 
3907  // Set the map.
3908  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
3909  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3910 }
3911 
3912 
3913 void MacroAssembler::AllocateTwoByteString(Register result,
3914  Register length,
3915  Register scratch1,
3916  Register scratch2,
3917  Register scratch3,
3918  Label* gc_required) {
3919  // Calculate the number of bytes needed for the characters in the string while
3920  // observing object alignment.
3921  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
3923  ASSERT(kShortSize == 2);
3924  // scratch1 = length * 2 + kObjectAlignmentMask.
3925  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
3926  kHeaderAlignment));
3927  and_(scratch1, Immediate(~kObjectAlignmentMask));
3928  if (kHeaderAlignment > 0) {
3929  subq(scratch1, Immediate(kHeaderAlignment));
3930  }
3931 
3932  // Allocate two byte string in new space.
3933  AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
3934  times_1,
3935  scratch1,
3936  result,
3937  scratch2,
3938  scratch3,
3939  gc_required,
3940  TAG_OBJECT);
3941 
3942  // Set the map, length and hash field.
3943  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
3944  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3945  Integer32ToSmi(scratch1, length);
3946  movq(FieldOperand(result, String::kLengthOffset), scratch1);
3947  movq(FieldOperand(result, String::kHashFieldOffset),
3948  Immediate(String::kEmptyHashField));
3949 }
3950 
3951 
3952 void MacroAssembler::AllocateAsciiString(Register result,
3953  Register length,
3954  Register scratch1,
3955  Register scratch2,
3956  Register scratch3,
3957  Label* gc_required) {
3958  // Calculate the number of bytes needed for the characters in the string while
3959  // observing object alignment.
3960  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
3962  movl(scratch1, length);
3963  ASSERT(kCharSize == 1);
3964  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
3965  and_(scratch1, Immediate(~kObjectAlignmentMask));
3966  if (kHeaderAlignment > 0) {
3967  subq(scratch1, Immediate(kHeaderAlignment));
3968  }
3969 
3970  // Allocate ASCII string in new space.
3971  AllocateInNewSpace(SeqAsciiString::kHeaderSize,
3972  times_1,
3973  scratch1,
3974  result,
3975  scratch2,
3976  scratch3,
3977  gc_required,
3978  TAG_OBJECT);
3979 
3980  // Set the map, length and hash field.
3981  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
3982  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
3983  Integer32ToSmi(scratch1, length);
3984  movq(FieldOperand(result, String::kLengthOffset), scratch1);
3985  movq(FieldOperand(result, String::kHashFieldOffset),
3986  Immediate(String::kEmptyHashField));
3987 }
3988 
3989 
3990 void MacroAssembler::AllocateTwoByteConsString(Register result,
3991  Register scratch1,
3992  Register scratch2,
3993  Label* gc_required) {
3994  // Allocate heap number in new space.
3995  AllocateInNewSpace(ConsString::kSize,
3996  result,
3997  scratch1,
3998  scratch2,
3999  gc_required,
4000  TAG_OBJECT);
4001 
4002  // Set the map. The other fields are left uninitialized.
4003  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4004  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4005 }
4006 
4007 
4008 void MacroAssembler::AllocateAsciiConsString(Register result,
4009  Register scratch1,
4010  Register scratch2,
4011  Label* gc_required) {
4012  // Allocate heap number in new space.
4013  AllocateInNewSpace(ConsString::kSize,
4014  result,
4015  scratch1,
4016  scratch2,
4017  gc_required,
4018  TAG_OBJECT);
4019 
4020  // Set the map. The other fields are left uninitialized.
4021  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4022  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4023 }
4024 
4025 
4026 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4027  Register scratch1,
4028  Register scratch2,
4029  Label* gc_required) {
4030  // Allocate heap number in new space.
4031  AllocateInNewSpace(SlicedString::kSize,
4032  result,
4033  scratch1,
4034  scratch2,
4035  gc_required,
4036  TAG_OBJECT);
4037 
4038  // Set the map. The other fields are left uninitialized.
4039  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4040  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4041 }
4042 
4043 
4044 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4045  Register scratch1,
4046  Register scratch2,
4047  Label* gc_required) {
4048  // Allocate heap number in new space.
4049  AllocateInNewSpace(SlicedString::kSize,
4050  result,
4051  scratch1,
4052  scratch2,
4053  gc_required,
4054  TAG_OBJECT);
4055 
4056  // Set the map. The other fields are left uninitialized.
4057  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4058  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4059 }
4060 
4061 
4062 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4063 // long or aligned copies. The contents of scratch and length are destroyed.
4064 // Destination is incremented by length, source, length and scratch are
4065 // clobbered.
4066 // A simpler loop is faster on small copies, but slower on large ones.
4067 // The cld() instruction must have been emitted, to set the direction flag(),
4068 // before calling this function.
4069 void MacroAssembler::CopyBytes(Register destination,
4070  Register source,
4071  Register length,
4072  int min_length,
4073  Register scratch) {
4074  ASSERT(min_length >= 0);
4075  if (emit_debug_code()) {
4076  cmpl(length, Immediate(min_length));
4077  Assert(greater_equal, "Invalid min_length");
4078  }
4079  Label loop, done, short_string, short_loop;
4080 
4081  const int kLongStringLimit = 20;
4082  if (min_length <= kLongStringLimit) {
4083  cmpl(length, Immediate(kLongStringLimit));
4084  j(less_equal, &short_string);
4085  }
4086 
4087  ASSERT(source.is(rsi));
4088  ASSERT(destination.is(rdi));
4089  ASSERT(length.is(rcx));
4090 
4091  // Because source is 8-byte aligned in our uses of this function,
4092  // we keep source aligned for the rep movs operation by copying the odd bytes
4093  // at the end of the ranges.
4094  movq(scratch, length);
4095  shrl(length, Immediate(3));
4096  repmovsq();
4097  // Move remaining bytes of length.
4098  andl(scratch, Immediate(0x7));
4099  movq(length, Operand(source, scratch, times_1, -8));
4100  movq(Operand(destination, scratch, times_1, -8), length);
4101  addq(destination, scratch);
4102 
4103  if (min_length <= kLongStringLimit) {
4104  jmp(&done);
4105 
4106  bind(&short_string);
4107  if (min_length == 0) {
4108  testl(length, length);
4109  j(zero, &done);
4110  }
4111  lea(scratch, Operand(destination, length, times_1, 0));
4112 
4113  bind(&short_loop);
4114  movb(length, Operand(source, 0));
4115  movb(Operand(destination, 0), length);
4116  incq(source);
4117  incq(destination);
4118  cmpq(destination, scratch);
4119  j(not_equal, &short_loop);
4120 
4121  bind(&done);
4122  }
4123 }
4124 
4125 
4126 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4127  Register end_offset,
4128  Register filler) {
4129  Label loop, entry;
4130  jmp(&entry);
4131  bind(&loop);
4132  movq(Operand(start_offset, 0), filler);
4133  addq(start_offset, Immediate(kPointerSize));
4134  bind(&entry);
4135  cmpq(start_offset, end_offset);
4136  j(less, &loop);
4137 }
4138 
4139 
4140 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4141  if (context_chain_length > 0) {
4142  // Move up the chain of contexts to the context containing the slot.
4143  movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4144  for (int i = 1; i < context_chain_length; i++) {
4145  movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4146  }
4147  } else {
4148  // Slot is in the current function context. Move it into the
4149  // destination register in case we store into it (the write barrier
4150  // cannot be allowed to destroy the context in rsi).
4151  movq(dst, rsi);
4152  }
4153 
4154  // We should not have found a with context by walking the context
4155  // chain (i.e., the static scope chain and runtime context chain do
4156  // not agree). A variable occurring in such a scope should have
4157  // slot type LOOKUP and not CONTEXT.
4158  if (emit_debug_code()) {
4159  CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4160  Heap::kWithContextMapRootIndex);
4161  Check(not_equal, "Variable resolved to with context.");
4162  }
4163 }
4164 
4165 
4166 void MacroAssembler::LoadTransitionedArrayMapConditional(
4167  ElementsKind expected_kind,
4168  ElementsKind transitioned_kind,
4169  Register map_in_out,
4170  Register scratch,
4171  Label* no_map_match) {
4172  // Load the global or builtins object from the current context.
4173  movq(scratch,
4174  Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4175  movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4176 
4177  // Check that the function's map is the same as the expected cached map.
4178  movq(scratch, Operand(scratch,
4179  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4180 
4181  int offset = expected_kind * kPointerSize +
4182  FixedArrayBase::kHeaderSize;
4183  cmpq(map_in_out, FieldOperand(scratch, offset));
4184  j(not_equal, no_map_match);
4185 
4186  // Use the transitioned cached map.
4187  offset = transitioned_kind * kPointerSize +
4188  FixedArrayBase::kHeaderSize;
4189  movq(map_in_out, FieldOperand(scratch, offset));
4190 }
4191 
4192 
4193 void MacroAssembler::LoadInitialArrayMap(
4194  Register function_in, Register scratch,
4195  Register map_out, bool can_have_holes) {
4196  ASSERT(!function_in.is(map_out));
4197  Label done;
4198  movq(map_out, FieldOperand(function_in,
4199  JSFunction::kPrototypeOrInitialMapOffset));
4200  if (!FLAG_smi_only_arrays) {
4201  ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4202  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4203  kind,
4204  map_out,
4205  scratch,
4206  &done);
4207  } else if (can_have_holes) {
4208  LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4210  map_out,
4211  scratch,
4212  &done);
4213  }
4214  bind(&done);
4215 }
4216 
4217 #ifdef _WIN64
4218 static const int kRegisterPassedArguments = 4;
4219 #else
4220 static const int kRegisterPassedArguments = 6;
4221 #endif
4222 
4223 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4224  // Load the global or builtins object from the current context.
4225  movq(function,
4226  Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4227  // Load the native context from the global or builtins object.
4228  movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4229  // Load the function from the native context.
4230  movq(function, Operand(function, Context::SlotOffset(index)));
4231 }
4232 
4233 
4234 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4235  Register map) {
4236  // Load the initial map. The global functions all have initial maps.
4237  movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4238  if (emit_debug_code()) {
4239  Label ok, fail;
4240  CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4241  jmp(&ok);
4242  bind(&fail);
4243  Abort("Global functions must have initial map");
4244  bind(&ok);
4245  }
4246 }
4247 
4248 
4249 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4250  // On Windows 64 stack slots are reserved by the caller for all arguments
4251  // including the ones passed in registers, and space is always allocated for
4252  // the four register arguments even if the function takes fewer than four
4253  // arguments.
4254  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4255  // and the caller does not reserve stack slots for them.
4256  ASSERT(num_arguments >= 0);
4257 #ifdef _WIN64
4258  const int kMinimumStackSlots = kRegisterPassedArguments;
4259  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4260  return num_arguments;
4261 #else
4262  if (num_arguments < kRegisterPassedArguments) return 0;
4263  return num_arguments - kRegisterPassedArguments;
4264 #endif
4265 }
4266 
4267 
4268 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4269  int frame_alignment = OS::ActivationFrameAlignment();
4270  ASSERT(frame_alignment != 0);
4271  ASSERT(num_arguments >= 0);
4272 
4273  // Make stack end at alignment and allocate space for arguments and old rsp.
4274  movq(kScratchRegister, rsp);
4275  ASSERT(IsPowerOf2(frame_alignment));
4276  int argument_slots_on_stack =
4277  ArgumentStackSlotsForCFunctionCall(num_arguments);
4278  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
4279  and_(rsp, Immediate(-frame_alignment));
4280  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
4281 }
4282 
4283 
4284 void MacroAssembler::CallCFunction(ExternalReference function,
4285  int num_arguments) {
4286  LoadAddress(rax, function);
4287  CallCFunction(rax, num_arguments);
4288 }
4289 
4290 
4291 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4292  ASSERT(has_frame());
4293  // Check stack alignment.
4294  if (emit_debug_code()) {
4295  CheckStackAlignment();
4296  }
4297 
4298  call(function);
4299  ASSERT(OS::ActivationFrameAlignment() != 0);
4300  ASSERT(num_arguments >= 0);
4301  int argument_slots_on_stack =
4302  ArgumentStackSlotsForCFunctionCall(num_arguments);
4303  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
4304 }
4305 
4306 
4307 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4308  if (r1.is(r2)) return true;
4309  if (r1.is(r3)) return true;
4310  if (r1.is(r4)) return true;
4311  if (r2.is(r3)) return true;
4312  if (r2.is(r4)) return true;
4313  if (r3.is(r4)) return true;
4314  return false;
4315 }
4316 
4317 
4318 CodePatcher::CodePatcher(byte* address, int size)
4319  : address_(address),
4320  size_(size),
4321  masm_(NULL, address, size + Assembler::kGap) {
4322  // Create a new macro assembler pointing to the address of the code to patch.
4323  // The size is adjusted with kGap on order for the assembler to generate size
4324  // bytes of instructions without failing with buffer size constraints.
4325  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4326 }
4327 
4328 
4329 CodePatcher::~CodePatcher() {
4330  // Indicate that code has changed.
4331  CPU::FlushICache(address_, size_);
4332 
4333  // Check that the code was patched as expected.
4334  ASSERT(masm_.pc_ == address_ + size_);
4335  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4336 }
4337 
4338 
4339 void MacroAssembler::CheckPageFlag(
4340  Register object,
4341  Register scratch,
4342  int mask,
4343  Condition cc,
4344  Label* condition_met,
4345  Label::Distance condition_met_distance) {
4346  ASSERT(cc == zero || cc == not_zero);
4347  if (scratch.is(object)) {
4348  and_(scratch, Immediate(~Page::kPageAlignmentMask));
4349  } else {
4350  movq(scratch, Immediate(~Page::kPageAlignmentMask));
4351  and_(scratch, object);
4352  }
4353  if (mask < (1 << kBitsPerByte)) {
4354  testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4355  Immediate(static_cast<uint8_t>(mask)));
4356  } else {
4357  testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4358  }
4359  j(cc, condition_met, condition_met_distance);
4360 }
4361 
4362 
4363 void MacroAssembler::JumpIfBlack(Register object,
4364  Register bitmap_scratch,
4365  Register mask_scratch,
4366  Label* on_black,
4367  Label::Distance on_black_distance) {
4368  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4369  GetMarkBits(object, bitmap_scratch, mask_scratch);
4370 
4371  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4372  // The mask_scratch register contains a 1 at the position of the first bit
4373  // and a 0 at all other positions, including the position of the second bit.
4374  movq(rcx, mask_scratch);
4375  // Make rcx into a mask that covers both marking bits using the operation
4376  // rcx = mask | (mask << 1).
4377  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4378  // Note that we are using a 4-byte aligned 8-byte load.
4379  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4380  cmpq(mask_scratch, rcx);
4381  j(equal, on_black, on_black_distance);
4382 }
4383 
4384 
4385 // Detect some, but not all, common pointer-free objects. This is used by the
4386 // incremental write barrier which doesn't care about oddballs (they are always
4387 // marked black immediately so this code is not hit).
4388 void MacroAssembler::JumpIfDataObject(
4389  Register value,
4390  Register scratch,
4391  Label* not_data_object,
4392  Label::Distance not_data_object_distance) {
4393  Label is_data_object;
4394  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4395  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4396  j(equal, &is_data_object, Label::kNear);
4398  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4399  // If it's a string and it's not a cons string then it's an object containing
4400  // no GC pointers.
4401  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4403  j(not_zero, not_data_object, not_data_object_distance);
4404  bind(&is_data_object);
4405 }
4406 
4407 
4408 void MacroAssembler::GetMarkBits(Register addr_reg,
4409  Register bitmap_reg,
4410  Register mask_reg) {
4411  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4412  movq(bitmap_reg, addr_reg);
4413  // Sign extended 32 bit immediate.
4414  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4415  movq(rcx, addr_reg);
4416  int shift =
4417  Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4418  shrl(rcx, Immediate(shift));
4419  and_(rcx,
4420  Immediate((Page::kPageAlignmentMask >> shift) &
4421  ~(Bitmap::kBytesPerCell - 1)));
4422 
4423  addq(bitmap_reg, rcx);
4424  movq(rcx, addr_reg);
4425  shrl(rcx, Immediate(kPointerSizeLog2));
4426  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4427  movl(mask_reg, Immediate(1));
4428  shl_cl(mask_reg);
4429 }
4430 
4431 
4432 void MacroAssembler::EnsureNotWhite(
4433  Register value,
4434  Register bitmap_scratch,
4435  Register mask_scratch,
4436  Label* value_is_white_and_not_data,
4437  Label::Distance distance) {
4438  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4439  GetMarkBits(value, bitmap_scratch, mask_scratch);
4440 
4441  // If the value is black or grey we don't need to do anything.
4442  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4443  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4444  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4445  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4446 
4447  Label done;
4448 
4449  // Since both black and grey have a 1 in the first position and white does
4450  // not have a 1 there we only need to check one bit.
4451  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4452  j(not_zero, &done, Label::kNear);
4453 
4454  if (emit_debug_code()) {
4455  // Check for impossible bit pattern.
4456  Label ok;
4457  push(mask_scratch);
4458  // shl. May overflow making the check conservative.
4459  addq(mask_scratch, mask_scratch);
4460  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4461  j(zero, &ok, Label::kNear);
4462  int3();
4463  bind(&ok);
4464  pop(mask_scratch);
4465  }
4466 
4467  // Value is white. We check whether it is data that doesn't need scanning.
4468  // Currently only checks for HeapNumber and non-cons strings.
4469  Register map = rcx; // Holds map while checking type.
4470  Register length = rcx; // Holds length of object after checking type.
4471  Label not_heap_number;
4472  Label is_data_object;
4473 
4474  // Check for heap-number
4475  movq(map, FieldOperand(value, HeapObject::kMapOffset));
4476  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4477  j(not_equal, &not_heap_number, Label::kNear);
4478  movq(length, Immediate(HeapNumber::kSize));
4479  jmp(&is_data_object, Label::kNear);
4480 
4481  bind(&not_heap_number);
4482  // Check for strings.
4484  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4485  // If it's a string and it's not a cons string then it's an object containing
4486  // no GC pointers.
4487  Register instance_type = rcx;
4488  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4489  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4490  j(not_zero, value_is_white_and_not_data);
4491  // It's a non-indirect (non-cons and non-slice) string.
4492  // If it's external, the length is just ExternalString::kSize.
4493  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4494  Label not_external;
4495  // External strings are the only ones with the kExternalStringTag bit
4496  // set.
4499  testb(instance_type, Immediate(kExternalStringTag));
4500  j(zero, &not_external, Label::kNear);
4501  movq(length, Immediate(ExternalString::kSize));
4502  jmp(&is_data_object, Label::kNear);
4503 
4504  bind(&not_external);
4505  // Sequential string, either ASCII or UC16.
4506  ASSERT(kAsciiStringTag == 0x04);
4507  and_(length, Immediate(kStringEncodingMask));
4508  xor_(length, Immediate(kStringEncodingMask));
4509  addq(length, Immediate(0x04));
4510  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4511  imul(length, FieldOperand(value, String::kLengthOffset));
4512  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4513  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4514  and_(length, Immediate(~kObjectAlignmentMask));
4515 
4516  bind(&is_data_object);
4517  // Value is a data object, and it is white. Mark it black. Since we know
4518  // that the object is white we can make it black by flipping one bit.
4519  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4520 
4521  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4522  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4523 
4524  bind(&done);
4525 }
4526 
4527 
4528 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4529  Label next, start;
4530  Register empty_fixed_array_value = r8;
4531  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4532  movq(rcx, rax);
4533 
4534  // Check if the enum length field is properly initialized, indicating that
4535  // there is an enum cache.
4536  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4537 
4538  EnumLength(rdx, rbx);
4539  Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
4540  j(equal, call_runtime);
4541 
4542  jmp(&start);
4543 
4544  bind(&next);
4545 
4546  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4547 
4548  // For all objects but the receiver, check that the cache is empty.
4549  EnumLength(rdx, rbx);
4550  Cmp(rdx, Smi::FromInt(0));
4551  j(not_equal, call_runtime);
4552 
4553  bind(&start);
4554 
4555  // Check that there are no elements. Register rcx contains the current JS
4556  // object we've reached through the prototype chain.
4557  cmpq(empty_fixed_array_value,
4558  FieldOperand(rcx, JSObject::kElementsOffset));
4559  j(not_equal, call_runtime);
4560 
4561  movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
4562  cmpq(rcx, null_value);
4563  j(not_equal, &next);
4564 }
4565 
4566 
4567 } } // namespace v8::internal
4568 
4569 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:157
const Register rdx
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:4016
#define CHECK_EQ(expected, value)
Definition: checks.h:219
const Register r14
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:471
bool is_intn(int x, int n)
Definition: assembler.h:832
const Register r3
const Register r11
const int kNumRegisters
Definition: constants-arm.h:92
const Register rbp
#define ASSERT_NOT_NULL(p)
Definition: checks.h:285
const int kSmiValueSize
Definition: v8.h:4061
const Register rsi
bool is_int8(int x)
Definition: assembler.h:836
const int kNumSafepointSavedRegisters
Definition: frames-arm.h:98
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
Definition: checks.h:270
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
Definition: globals.h:232
bool is_uint6(int x)
Definition: assembler.h:849
const uint32_t kStringRepresentationMask
Definition: objects.h:474
const Register r2
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
bool is_uint32(int64_t x)
Definition: assembler-x64.h:48
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
Definition: v8.h:4011
const int kIntSize
Definition: globals.h:217
const uint32_t kNotStringTag
Definition: objects.h:457
#define UNREACHABLE()
Definition: checks.h:50
bool IsFastPackedElementsKind(ElementsKind kind)
const int kDoubleSize
Definition: globals.h:218
const uint32_t kIsIndirectStringMask
Definition: objects.h:481
const Register r9
const int kPointerSize
Definition: globals.h:220
Operand FieldOperand(Register object, int offset)
const Address kZapValue
Definition: v8globals.h:80
const int kHeapObjectTag
Definition: v8.h:4009
bool IsAligned(T value, U alignment)
Definition: utils.h:206
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
bool is_uint5(int x)
Definition: assembler.h:848
const Register rbx
bool is_uint16(int x)
Definition: assembler.h:853
const uint32_t kHoleNanLower32
Definition: v8globals.h:470
const Register rsp
Operand StackSpaceOperand(int index)
const Register rax
const Register rdi
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const int kBitsPerByte
Definition: globals.h:237
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:50
int TenToThe(int exponent)
Definition: utils.h:795
const int kRootRegisterBias
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:456
#define kRootRegister
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
bool is_int32(int64_t x)
Definition: assembler-x64.h:53
InvokeFlag
const uint32_t kIsNotStringMask
Definition: objects.h:455
const Register r1
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const Register kScratchRegister
#define UNIMPLEMENTED()
Definition: checks.h:48
v8::Handle< v8::Value > Load(const v8::Arguments &args)
Definition: shell.cc:159
const int kSmiShiftSize
Definition: v8.h:4060
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kSmiTagSize
Definition: v8.h:4015
const Register r8
const Register rcx
#define HEAP
Definition: isolate.h:1433
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
const int kShortSize
Definition: globals.h:216
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
const int kSmiTag
Definition: v8.h:4014
const int kSmiConstantRegisterValue
const uint32_t kIsIndirectStringTag
Definition: objects.h:482
const Register r10
#define RUNTIME_ENTRY(name, nargs, ressize)
const Register no_reg
const Register kSmiConstantRegister
#define STATIC_ASSERT(test)
Definition: checks.h:283
const uint32_t kAsciiStringTag
Definition: objects.h:470
const int kCharSize
Definition: globals.h:215
const Register r15
const uint32_t kStringEncodingMask
Definition: objects.h:468
const Register r4
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag
Definition: objects-inl.h:3923