v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
macro-assembler-x64.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if V8_TARGET_ARCH_X64
31 
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "cpu-profiler.h"
35 #include "assembler-x64.h"
36 #include "macro-assembler-x64.h"
37 #include "serialize.h"
38 #include "debug.h"
39 #include "heap.h"
40 #include "isolate-inl.h"
41 
42 namespace v8 {
43 namespace internal {
44 
45 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
46  : Assembler(arg_isolate, buffer, size),
47  generating_stub_(false),
48  has_frame_(false),
49  root_array_available_(true) {
50  if (isolate() != NULL) {
51  code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
52  isolate());
53  }
54 }
55 
56 
57 static const int kInvalidRootRegisterDelta = -1;
58 
59 
60 intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
61  if (predictable_code_size() &&
62  (other.address() < reinterpret_cast<Address>(isolate()) ||
63  other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
64  return kInvalidRootRegisterDelta;
65  }
66  Address roots_register_value = kRootRegisterBias +
67  reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
68  intptr_t delta = other.address() - roots_register_value;
69  return delta;
70 }
71 
72 
73 Operand MacroAssembler::ExternalOperand(ExternalReference target,
74  Register scratch) {
75  if (root_array_available_ && !Serializer::enabled()) {
76  intptr_t delta = RootRegisterDelta(target);
77  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
78  Serializer::TooLateToEnableNow();
79  return Operand(kRootRegister, static_cast<int32_t>(delta));
80  }
81  }
82  Move(scratch, target);
83  return Operand(scratch, 0);
84 }
85 
86 
87 void MacroAssembler::Load(Register destination, ExternalReference source) {
88  if (root_array_available_ && !Serializer::enabled()) {
89  intptr_t delta = RootRegisterDelta(source);
90  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
91  Serializer::TooLateToEnableNow();
92  movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
93  return;
94  }
95  }
96  // Safe code.
97  if (destination.is(rax)) {
98  load_rax(source);
99  } else {
100  Move(kScratchRegister, source);
101  movp(destination, Operand(kScratchRegister, 0));
102  }
103 }
104 
105 
106 void MacroAssembler::Store(ExternalReference destination, Register source) {
107  if (root_array_available_ && !Serializer::enabled()) {
108  intptr_t delta = RootRegisterDelta(destination);
109  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
110  Serializer::TooLateToEnableNow();
111  movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
112  return;
113  }
114  }
115  // Safe code.
116  if (source.is(rax)) {
117  store_rax(destination);
118  } else {
119  Move(kScratchRegister, destination);
120  movp(Operand(kScratchRegister, 0), source);
121  }
122 }
123 
124 
125 void MacroAssembler::LoadAddress(Register destination,
126  ExternalReference source) {
127  if (root_array_available_ && !Serializer::enabled()) {
128  intptr_t delta = RootRegisterDelta(source);
129  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
130  Serializer::TooLateToEnableNow();
131  leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
132  return;
133  }
134  }
135  // Safe code.
136  Move(destination, source);
137 }
138 
139 
140 int MacroAssembler::LoadAddressSize(ExternalReference source) {
141  if (root_array_available_ && !Serializer::enabled()) {
142  // This calculation depends on the internals of LoadAddress.
143  // It's correctness is ensured by the asserts in the Call
144  // instruction below.
145  intptr_t delta = RootRegisterDelta(source);
146  if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
147  Serializer::TooLateToEnableNow();
148  // Operand is leap(scratch, Operand(kRootRegister, delta));
149  // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
150  int size = 4;
151  if (!is_int8(static_cast<int32_t>(delta))) {
152  size += 3; // Need full four-byte displacement in lea.
153  }
154  return size;
155  }
156  }
157  // Size of movp(destination, src);
158  return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
159 }
160 
161 
162 void MacroAssembler::PushAddress(ExternalReference source) {
163  int64_t address = reinterpret_cast<int64_t>(source.address());
164  if (is_int32(address) && !Serializer::enabled()) {
165  if (emit_debug_code()) {
166  Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
167  }
168  Push(Immediate(static_cast<int32_t>(address)));
169  return;
170  }
171  LoadAddress(kScratchRegister, source);
172  Push(kScratchRegister);
173 }
174 
175 
176 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
177  ASSERT(root_array_available_);
178  movp(destination, Operand(kRootRegister,
179  (index << kPointerSizeLog2) - kRootRegisterBias));
180 }
181 
182 
183 void MacroAssembler::LoadRootIndexed(Register destination,
184  Register variable_offset,
185  int fixed_offset) {
186  ASSERT(root_array_available_);
187  movp(destination,
188  Operand(kRootRegister,
189  variable_offset, times_pointer_size,
190  (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
191 }
192 
193 
194 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
195  ASSERT(root_array_available_);
196  movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
197  source);
198 }
199 
200 
201 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
202  ASSERT(root_array_available_);
203  Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
204 }
205 
206 
207 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
208  ASSERT(root_array_available_);
209  cmpp(with, Operand(kRootRegister,
210  (index << kPointerSizeLog2) - kRootRegisterBias));
211 }
212 
213 
214 void MacroAssembler::CompareRoot(const Operand& with,
215  Heap::RootListIndex index) {
216  ASSERT(root_array_available_);
217  ASSERT(!with.AddressUsesRegister(kScratchRegister));
218  LoadRoot(kScratchRegister, index);
219  cmpp(with, kScratchRegister);
220 }
221 
222 
223 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
224  Register addr,
225  Register scratch,
226  SaveFPRegsMode save_fp,
227  RememberedSetFinalAction and_then) {
228  if (emit_debug_code()) {
229  Label ok;
230  JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
231  int3();
232  bind(&ok);
233  }
234  // Load store buffer top.
235  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
236  // Store pointer to buffer.
237  movp(Operand(scratch, 0), addr);
238  // Increment buffer top.
239  addp(scratch, Immediate(kPointerSize));
240  // Write back new top of buffer.
241  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
242  // Call stub on end of buffer.
243  Label done;
244  // Check for end of buffer.
245  testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
246  if (and_then == kReturnAtEnd) {
247  Label buffer_overflowed;
248  j(not_equal, &buffer_overflowed, Label::kNear);
249  ret(0);
250  bind(&buffer_overflowed);
251  } else {
252  ASSERT(and_then == kFallThroughAtEnd);
253  j(equal, &done, Label::kNear);
254  }
255  StoreBufferOverflowStub store_buffer_overflow =
256  StoreBufferOverflowStub(save_fp);
257  CallStub(&store_buffer_overflow);
258  if (and_then == kReturnAtEnd) {
259  ret(0);
260  } else {
261  ASSERT(and_then == kFallThroughAtEnd);
262  bind(&done);
263  }
264 }
265 
266 
267 void MacroAssembler::InNewSpace(Register object,
268  Register scratch,
269  Condition cc,
270  Label* branch,
271  Label::Distance distance) {
272  if (Serializer::enabled()) {
273  // Can't do arithmetic on external references if it might get serialized.
274  // The mask isn't really an address. We load it as an external reference in
275  // case the size of the new space is different between the snapshot maker
276  // and the running system.
277  if (scratch.is(object)) {
278  Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
279  andp(scratch, kScratchRegister);
280  } else {
281  Move(scratch, ExternalReference::new_space_mask(isolate()));
282  andp(scratch, object);
283  }
284  Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
285  cmpp(scratch, kScratchRegister);
286  j(cc, branch, distance);
287  } else {
288  ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
289  intptr_t new_space_start =
290  reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
291  Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
292  Assembler::RelocInfoNone());
293  if (scratch.is(object)) {
294  addp(scratch, kScratchRegister);
295  } else {
296  leap(scratch, Operand(object, kScratchRegister, times_1, 0));
297  }
298  andp(scratch,
299  Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
300  j(cc, branch, distance);
301  }
302 }
303 
304 
305 void MacroAssembler::RecordWriteField(
306  Register object,
307  int offset,
308  Register value,
309  Register dst,
310  SaveFPRegsMode save_fp,
311  RememberedSetAction remembered_set_action,
312  SmiCheck smi_check) {
313  // First, check if a write barrier is even needed. The tests below
314  // catch stores of Smis.
315  Label done;
316 
317  // Skip barrier if writing a smi.
318  if (smi_check == INLINE_SMI_CHECK) {
319  JumpIfSmi(value, &done);
320  }
321 
322  // Although the object register is tagged, the offset is relative to the start
323  // of the object, so so offset must be a multiple of kPointerSize.
324  ASSERT(IsAligned(offset, kPointerSize));
325 
326  leap(dst, FieldOperand(object, offset));
327  if (emit_debug_code()) {
328  Label ok;
329  testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
330  j(zero, &ok, Label::kNear);
331  int3();
332  bind(&ok);
333  }
334 
335  RecordWrite(
336  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
337 
338  bind(&done);
339 
340  // Clobber clobbered input registers when running with the debug-code flag
341  // turned on to provoke errors.
342  if (emit_debug_code()) {
343  Move(value, kZapValue, Assembler::RelocInfoNone());
344  Move(dst, kZapValue, Assembler::RelocInfoNone());
345  }
346 }
347 
348 
349 void MacroAssembler::RecordWriteArray(Register object,
350  Register value,
351  Register index,
352  SaveFPRegsMode save_fp,
353  RememberedSetAction remembered_set_action,
354  SmiCheck smi_check) {
355  // First, check if a write barrier is even needed. The tests below
356  // catch stores of Smis.
357  Label done;
358 
359  // Skip barrier if writing a smi.
360  if (smi_check == INLINE_SMI_CHECK) {
361  JumpIfSmi(value, &done);
362  }
363 
364  // Array access: calculate the destination address. Index is not a smi.
365  Register dst = index;
366  leap(dst, Operand(object, index, times_pointer_size,
367  FixedArray::kHeaderSize - kHeapObjectTag));
368 
369  RecordWrite(
370  object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
371 
372  bind(&done);
373 
374  // Clobber clobbered input registers when running with the debug-code flag
375  // turned on to provoke errors.
376  if (emit_debug_code()) {
377  Move(value, kZapValue, Assembler::RelocInfoNone());
378  Move(index, kZapValue, Assembler::RelocInfoNone());
379  }
380 }
381 
382 
383 void MacroAssembler::RecordWrite(Register object,
384  Register address,
385  Register value,
386  SaveFPRegsMode fp_mode,
387  RememberedSetAction remembered_set_action,
388  SmiCheck smi_check) {
389  ASSERT(!object.is(value));
390  ASSERT(!object.is(address));
391  ASSERT(!value.is(address));
392  AssertNotSmi(object);
393 
394  if (remembered_set_action == OMIT_REMEMBERED_SET &&
395  !FLAG_incremental_marking) {
396  return;
397  }
398 
399  if (emit_debug_code()) {
400  Label ok;
401  cmpp(value, Operand(address, 0));
402  j(equal, &ok, Label::kNear);
403  int3();
404  bind(&ok);
405  }
406 
407  // Count number of write barriers in generated code.
408  isolate()->counters()->write_barriers_static()->Increment();
409  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
410 
411  // First, check if a write barrier is even needed. The tests below
412  // catch stores of smis and stores into the young generation.
413  Label done;
414 
415  if (smi_check == INLINE_SMI_CHECK) {
416  // Skip barrier if writing a smi.
417  JumpIfSmi(value, &done);
418  }
419 
420  CheckPageFlag(value,
421  value, // Used as scratch.
422  MemoryChunk::kPointersToHereAreInterestingMask,
423  zero,
424  &done,
425  Label::kNear);
426 
427  CheckPageFlag(object,
428  value, // Used as scratch.
429  MemoryChunk::kPointersFromHereAreInterestingMask,
430  zero,
431  &done,
432  Label::kNear);
433 
434  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
435  CallStub(&stub);
436 
437  bind(&done);
438 
439  // Clobber clobbered registers when running with the debug-code flag
440  // turned on to provoke errors.
441  if (emit_debug_code()) {
442  Move(address, kZapValue, Assembler::RelocInfoNone());
443  Move(value, kZapValue, Assembler::RelocInfoNone());
444  }
445 }
446 
447 
448 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
449  if (emit_debug_code()) Check(cc, reason);
450 }
451 
452 
453 void MacroAssembler::AssertFastElements(Register elements) {
454  if (emit_debug_code()) {
455  Label ok;
456  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
457  Heap::kFixedArrayMapRootIndex);
458  j(equal, &ok, Label::kNear);
459  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
460  Heap::kFixedDoubleArrayMapRootIndex);
461  j(equal, &ok, Label::kNear);
462  CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
463  Heap::kFixedCOWArrayMapRootIndex);
464  j(equal, &ok, Label::kNear);
465  Abort(kJSObjectWithFastElementsMapHasSlowElements);
466  bind(&ok);
467  }
468 }
469 
470 
471 void MacroAssembler::Check(Condition cc, BailoutReason reason) {
472  Label L;
473  j(cc, &L, Label::kNear);
474  Abort(reason);
475  // Control will not return here.
476  bind(&L);
477 }
478 
479 
480 void MacroAssembler::CheckStackAlignment() {
481  int frame_alignment = OS::ActivationFrameAlignment();
482  int frame_alignment_mask = frame_alignment - 1;
483  if (frame_alignment > kPointerSize) {
484  ASSERT(IsPowerOf2(frame_alignment));
485  Label alignment_as_expected;
486  testp(rsp, Immediate(frame_alignment_mask));
487  j(zero, &alignment_as_expected, Label::kNear);
488  // Abort if stack is not aligned.
489  int3();
490  bind(&alignment_as_expected);
491  }
492 }
493 
494 
495 void MacroAssembler::NegativeZeroTest(Register result,
496  Register op,
497  Label* then_label) {
498  Label ok;
499  testl(result, result);
500  j(not_zero, &ok, Label::kNear);
501  testl(op, op);
502  j(sign, then_label);
503  bind(&ok);
504 }
505 
506 
507 void MacroAssembler::Abort(BailoutReason reason) {
508 #ifdef DEBUG
509  const char* msg = GetBailoutReason(reason);
510  if (msg != NULL) {
511  RecordComment("Abort message: ");
512  RecordComment(msg);
513  }
514 
515  if (FLAG_trap_on_abort) {
516  int3();
517  return;
518  }
519 #endif
520 
521  Push(rax);
522  Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
523  Assembler::RelocInfoNone());
524  Push(kScratchRegister);
525 
526  if (!has_frame_) {
527  // We don't actually want to generate a pile of code for this, so just
528  // claim there is a stack frame, without generating one.
529  FrameScope scope(this, StackFrame::NONE);
530  CallRuntime(Runtime::kAbort, 1);
531  } else {
532  CallRuntime(Runtime::kAbort, 1);
533  }
534  // Control will not return here.
535  int3();
536 }
537 
538 
539 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
540  ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
541  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
542 }
543 
544 
545 void MacroAssembler::TailCallStub(CodeStub* stub) {
546  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
547 }
548 
549 
550 void MacroAssembler::StubReturn(int argc) {
551  ASSERT(argc >= 1 && generating_stub());
552  ret((argc - 1) * kPointerSize);
553 }
554 
555 
556 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
557  return has_frame_ || !stub->SometimesSetsUpAFrame();
558 }
559 
560 
561 void MacroAssembler::IllegalOperation(int num_arguments) {
562  if (num_arguments > 0) {
563  addp(rsp, Immediate(num_arguments * kPointerSize));
564  }
565  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
566 }
567 
568 
569 void MacroAssembler::IndexFromHash(Register hash, Register index) {
570  // The assert checks that the constants for the maximum number of digits
571  // for an array index cached in the hash field and the number of bits
572  // reserved for it does not conflict.
573  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
574  (1 << String::kArrayIndexValueBits));
575  // We want the smi-tagged index in key. Even if we subsequently go to
576  // the slow case, converting the key to a smi is always valid.
577  // key: string key
578  // hash: key's hash field, including its array index value.
579  andp(hash, Immediate(String::kArrayIndexValueMask));
580  shr(hash, Immediate(String::kHashShift));
581  // Here we actually clobber the key which will be used if calling into
582  // runtime later. However as the new key is the numeric value of a string key
583  // there is no difference in using either key.
584  Integer32ToSmi(index, hash);
585 }
586 
587 
588 void MacroAssembler::CallRuntime(const Runtime::Function* f,
589  int num_arguments,
590  SaveFPRegsMode save_doubles) {
591  // If the expected number of arguments of the runtime function is
592  // constant, we check that the actual number of arguments match the
593  // expectation.
594  if (f->nargs >= 0 && f->nargs != num_arguments) {
595  IllegalOperation(num_arguments);
596  return;
597  }
598 
599  // TODO(1236192): Most runtime routines don't need the number of
600  // arguments passed in because it is constant. At some point we
601  // should remove this need and make the runtime routine entry code
602  // smarter.
603  Set(rax, num_arguments);
604  LoadAddress(rbx, ExternalReference(f, isolate()));
605  CEntryStub ces(f->result_size, save_doubles);
606  CallStub(&ces);
607 }
608 
609 
610 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
611  int num_arguments) {
612  Set(rax, num_arguments);
613  LoadAddress(rbx, ext);
614 
615  CEntryStub stub(1);
616  CallStub(&stub);
617 }
618 
619 
620 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
621  int num_arguments,
622  int result_size) {
623  // ----------- S t a t e -------------
624  // -- rsp[0] : return address
625  // -- rsp[8] : argument num_arguments - 1
626  // ...
627  // -- rsp[8 * num_arguments] : argument 0 (receiver)
628  // -----------------------------------
629 
630  // TODO(1236192): Most runtime routines don't need the number of
631  // arguments passed in because it is constant. At some point we
632  // should remove this need and make the runtime routine entry code
633  // smarter.
634  Set(rax, num_arguments);
635  JumpToExternalReference(ext, result_size);
636 }
637 
638 
639 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
640  int num_arguments,
641  int result_size) {
642  TailCallExternalReference(ExternalReference(fid, isolate()),
643  num_arguments,
644  result_size);
645 }
646 
647 
648 static int Offset(ExternalReference ref0, ExternalReference ref1) {
649  int64_t offset = (ref0.address() - ref1.address());
650  // Check that fits into int.
651  ASSERT(static_cast<int>(offset) == offset);
652  return static_cast<int>(offset);
653 }
654 
655 
656 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
657  EnterApiExitFrame(arg_stack_space);
658 }
659 
660 
661 void MacroAssembler::CallApiFunctionAndReturn(
662  Register function_address,
663  Address thunk_address,
664  Register thunk_last_arg,
665  int stack_space,
666  Operand return_value_operand,
667  Operand* context_restore_operand) {
668  Label prologue;
669  Label promote_scheduled_exception;
670  Label exception_handled;
671  Label delete_allocated_handles;
672  Label leave_exit_frame;
673  Label write_back;
674 
675  Factory* factory = isolate()->factory();
676  ExternalReference next_address =
677  ExternalReference::handle_scope_next_address(isolate());
678  const int kNextOffset = 0;
679  const int kLimitOffset = Offset(
680  ExternalReference::handle_scope_limit_address(isolate()),
681  next_address);
682  const int kLevelOffset = Offset(
683  ExternalReference::handle_scope_level_address(isolate()),
684  next_address);
685  ExternalReference scheduled_exception_address =
686  ExternalReference::scheduled_exception_address(isolate());
687 
688  ASSERT(rdx.is(function_address) || r8.is(function_address));
689  // Allocate HandleScope in callee-save registers.
690  Register prev_next_address_reg = r14;
691  Register prev_limit_reg = rbx;
692  Register base_reg = r15;
693  Move(base_reg, next_address);
694  movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
695  movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
696  addl(Operand(base_reg, kLevelOffset), Immediate(1));
697 
698  if (FLAG_log_timer_events) {
699  FrameScope frame(this, StackFrame::MANUAL);
700  PushSafepointRegisters();
701  PrepareCallCFunction(1);
702  LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
703  CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
704  PopSafepointRegisters();
705  }
706 
707 
708  Label profiler_disabled;
709  Label end_profiler_check;
710  bool* is_profiling_flag =
711  isolate()->cpu_profiler()->is_profiling_address();
712  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
713  Move(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
714  cmpb(Operand(rax, 0), Immediate(0));
715  j(zero, &profiler_disabled);
716 
717  // Third parameter is the address of the actual getter function.
718  Move(thunk_last_arg, function_address);
719  Move(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
720  jmp(&end_profiler_check);
721 
722  bind(&profiler_disabled);
723  // Call the api function!
724  Move(rax, function_address);
725 
726  bind(&end_profiler_check);
727 
728  // Call the api function!
729  call(rax);
730 
731  if (FLAG_log_timer_events) {
732  FrameScope frame(this, StackFrame::MANUAL);
733  PushSafepointRegisters();
734  PrepareCallCFunction(1);
735  LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
736  CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
737  PopSafepointRegisters();
738  }
739 
740  // Load the value from ReturnValue
741  movp(rax, return_value_operand);
742  bind(&prologue);
743 
744  // No more valid handles (the result handle was the last one). Restore
745  // previous handle scope.
746  subl(Operand(base_reg, kLevelOffset), Immediate(1));
747  movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
748  cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
749  j(not_equal, &delete_allocated_handles);
750  bind(&leave_exit_frame);
751 
752  // Check if the function scheduled an exception.
753  Move(rsi, scheduled_exception_address);
754  Cmp(Operand(rsi, 0), factory->the_hole_value());
755  j(not_equal, &promote_scheduled_exception);
756  bind(&exception_handled);
757 
758 #if ENABLE_EXTRA_CHECKS
759  // Check if the function returned a valid JavaScript value.
760  Label ok;
761  Register return_value = rax;
762  Register map = rcx;
763 
764  JumpIfSmi(return_value, &ok, Label::kNear);
765  movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
766 
767  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
768  j(below, &ok, Label::kNear);
769 
770  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
771  j(above_equal, &ok, Label::kNear);
772 
773  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
774  j(equal, &ok, Label::kNear);
775 
776  CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
777  j(equal, &ok, Label::kNear);
778 
779  CompareRoot(return_value, Heap::kTrueValueRootIndex);
780  j(equal, &ok, Label::kNear);
781 
782  CompareRoot(return_value, Heap::kFalseValueRootIndex);
783  j(equal, &ok, Label::kNear);
784 
785  CompareRoot(return_value, Heap::kNullValueRootIndex);
786  j(equal, &ok, Label::kNear);
787 
788  Abort(kAPICallReturnedInvalidObject);
789 
790  bind(&ok);
791 #endif
792 
793  bool restore_context = context_restore_operand != NULL;
794  if (restore_context) {
795  movp(rsi, *context_restore_operand);
796  }
797  LeaveApiExitFrame(!restore_context);
798  ret(stack_space * kPointerSize);
799 
800  bind(&promote_scheduled_exception);
801  {
802  FrameScope frame(this, StackFrame::INTERNAL);
803  CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
804  }
805  jmp(&exception_handled);
806 
807  // HandleScope limit has changed. Delete allocated extensions.
808  bind(&delete_allocated_handles);
809  movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
810  movp(prev_limit_reg, rax);
811  LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
812  LoadAddress(rax,
813  ExternalReference::delete_handle_scope_extensions(isolate()));
814  call(rax);
815  movp(rax, prev_limit_reg);
816  jmp(&leave_exit_frame);
817 }
818 
819 
820 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
821  int result_size) {
822  // Set the entry point and jump to the C entry runtime stub.
823  LoadAddress(rbx, ext);
824  CEntryStub ces(result_size);
825  jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
826 }
827 
828 
829 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
831  const CallWrapper& call_wrapper) {
832  // You can't call a builtin without a valid frame.
833  ASSERT(flag == JUMP_FUNCTION || has_frame());
834 
835  // Rely on the assertion to check that the number of provided
836  // arguments match the expected number of arguments. Fake a
837  // parameter count to avoid emitting code to do the check.
838  ParameterCount expected(0);
839  GetBuiltinEntry(rdx, id);
840  InvokeCode(rdx, expected, expected, flag, call_wrapper);
841 }
842 
843 
844 void MacroAssembler::GetBuiltinFunction(Register target,
845  Builtins::JavaScript id) {
846  // Load the builtins object into target register.
847  movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
848  movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
849  movp(target, FieldOperand(target,
850  JSBuiltinsObject::OffsetOfFunctionWithId(id)));
851 }
852 
853 
854 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
855  ASSERT(!target.is(rdi));
856  // Load the JavaScript builtin function from the builtins object.
857  GetBuiltinFunction(rdi, id);
858  movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
859 }
860 
861 
862 #define REG(Name) { kRegister_ ## Name ## _Code }
863 
864 static const Register saved_regs[] = {
865  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
866  REG(r9), REG(r10), REG(r11)
867 };
868 
869 #undef REG
870 
871 static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
872 
873 
874 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
875  Register exclusion1,
876  Register exclusion2,
877  Register exclusion3) {
878  // We don't allow a GC during a store buffer overflow so there is no need to
879  // store the registers in any particular way, but we do have to store and
880  // restore them.
881  for (int i = 0; i < kNumberOfSavedRegs; i++) {
882  Register reg = saved_regs[i];
883  if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
884  pushq(reg);
885  }
886  }
887  // R12 to r15 are callee save on all platforms.
888  if (fp_mode == kSaveFPRegs) {
889  subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
890  for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
891  XMMRegister reg = XMMRegister::from_code(i);
892  movsd(Operand(rsp, i * kDoubleSize), reg);
893  }
894  }
895 }
896 
897 
898 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
899  Register exclusion1,
900  Register exclusion2,
901  Register exclusion3) {
902  if (fp_mode == kSaveFPRegs) {
903  for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
904  XMMRegister reg = XMMRegister::from_code(i);
905  movsd(reg, Operand(rsp, i * kDoubleSize));
906  }
907  addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
908  }
909  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
910  Register reg = saved_regs[i];
911  if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
912  popq(reg);
913  }
914  }
915 }
916 
917 
918 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
919  xorps(dst, dst);
920  cvtlsi2sd(dst, src);
921 }
922 
923 
924 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
925  xorps(dst, dst);
926  cvtlsi2sd(dst, src);
927 }
928 
929 
930 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
931  ASSERT(!r.IsDouble());
932  if (r.IsInteger8()) {
933  movsxbq(dst, src);
934  } else if (r.IsUInteger8()) {
935  movzxbl(dst, src);
936  } else if (r.IsInteger16()) {
937  movsxwq(dst, src);
938  } else if (r.IsUInteger16()) {
939  movzxwl(dst, src);
940  } else if (r.IsInteger32()) {
941  movl(dst, src);
942  } else {
943  movp(dst, src);
944  }
945 }
946 
947 
948 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
949  ASSERT(!r.IsDouble());
950  if (r.IsInteger8() || r.IsUInteger8()) {
951  movb(dst, src);
952  } else if (r.IsInteger16() || r.IsUInteger16()) {
953  movw(dst, src);
954  } else if (r.IsInteger32()) {
955  movl(dst, src);
956  } else {
957  movp(dst, src);
958  }
959 }
960 
961 
962 void MacroAssembler::Set(Register dst, int64_t x) {
963  if (x == 0) {
964  xorl(dst, dst);
965  } else if (is_uint32(x)) {
966  movl(dst, Immediate(static_cast<uint32_t>(x)));
967  } else if (is_int32(x)) {
968  movq(dst, Immediate(static_cast<int32_t>(x)));
969  } else {
970  movq(dst, x);
971  }
972 }
973 
974 
975 void MacroAssembler::Set(const Operand& dst, intptr_t x) {
976  if (kPointerSize == kInt64Size) {
977  if (is_int32(x)) {
978  movp(dst, Immediate(static_cast<int32_t>(x)));
979  } else {
980  Set(kScratchRegister, x);
981  movp(dst, kScratchRegister);
982  }
983  } else {
985  movp(dst, Immediate(static_cast<int32_t>(x)));
986  }
987 }
988 
989 
990 // ----------------------------------------------------------------------------
991 // Smi tagging, untagging and tag detection.
992 
993 bool MacroAssembler::IsUnsafeInt(const int32_t x) {
994  static const int kMaxBits = 17;
995  return !is_intn(x, kMaxBits);
996 }
997 
998 
999 void MacroAssembler::SafeMove(Register dst, Smi* src) {
1000  ASSERT(!dst.is(kScratchRegister));
1001  ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1002  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1003  Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1004  Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1005  xorq(dst, kScratchRegister);
1006  } else {
1007  Move(dst, src);
1008  }
1009 }
1010 
1011 
1012 void MacroAssembler::SafePush(Smi* src) {
1013  ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
1014  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1015  Push(Smi::FromInt(src->value() ^ jit_cookie()));
1016  Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1017  xorq(Operand(rsp, 0), kScratchRegister);
1018  } else {
1019  Push(src);
1020  }
1021 }
1022 
1023 
1024 Register MacroAssembler::GetSmiConstant(Smi* source) {
1025  int value = source->value();
1026  if (value == 0) {
1028  return kScratchRegister;
1029  }
1030  if (value == 1) {
1031  return kSmiConstantRegister;
1032  }
1033  LoadSmiConstant(kScratchRegister, source);
1034  return kScratchRegister;
1035 }
1036 
1037 
1038 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1039  if (emit_debug_code()) {
1040  Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
1041  Assembler::RelocInfoNone());
1042  cmpq(dst, kSmiConstantRegister);
1043  Assert(equal, kUninitializedKSmiConstantRegister);
1044  }
1045  int value = source->value();
1046  if (value == 0) {
1047  xorl(dst, dst);
1048  return;
1049  }
1050  bool negative = value < 0;
1051  unsigned int uvalue = negative ? -value : value;
1052 
1053  switch (uvalue) {
1054  case 9:
1055  leap(dst,
1057  break;
1058  case 8:
1059  xorl(dst, dst);
1060  leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1061  break;
1062  case 4:
1063  xorl(dst, dst);
1064  leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1065  break;
1066  case 5:
1067  leap(dst,
1069  break;
1070  case 3:
1071  leap(dst,
1073  break;
1074  case 2:
1075  leap(dst,
1077  break;
1078  case 1:
1079  movp(dst, kSmiConstantRegister);
1080  break;
1081  case 0:
1082  UNREACHABLE();
1083  return;
1084  default:
1085  Move(dst, source, Assembler::RelocInfoNone());
1086  return;
1087  }
1088  if (negative) {
1089  negp(dst);
1090  }
1091 }
1092 
1093 
1094 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1095  STATIC_ASSERT(kSmiTag == 0);
1096  if (!dst.is(src)) {
1097  movl(dst, src);
1098  }
1099  shl(dst, Immediate(kSmiShift));
1100 }
1101 
1102 
1103 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1104  if (emit_debug_code()) {
1105  testb(dst, Immediate(0x01));
1106  Label ok;
1107  j(zero, &ok, Label::kNear);
1108  Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1109  bind(&ok);
1110  }
1111  ASSERT(kSmiShift % kBitsPerByte == 0);
1112  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1113 }
1114 
1115 
1116 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1117  Register src,
1118  int constant) {
1119  if (dst.is(src)) {
1120  addl(dst, Immediate(constant));
1121  } else {
1122  leal(dst, Operand(src, constant));
1123  }
1124  shl(dst, Immediate(kSmiShift));
1125 }
1126 
1127 
1128 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1129  STATIC_ASSERT(kSmiTag == 0);
1130  if (!dst.is(src)) {
1131  movp(dst, src);
1132  }
1133  shr(dst, Immediate(kSmiShift));
1134 }
1135 
1136 
1137 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1138  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1139 }
1140 
1141 
1142 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1143  STATIC_ASSERT(kSmiTag == 0);
1144  if (!dst.is(src)) {
1145  movp(dst, src);
1146  }
1147  sar(dst, Immediate(kSmiShift));
1148 }
1149 
1150 
1151 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1152  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1153 }
1154 
1155 
1156 void MacroAssembler::SmiTest(Register src) {
1157  AssertSmi(src);
1158  testp(src, src);
1159 }
1160 
1161 
1162 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1163  AssertSmi(smi1);
1164  AssertSmi(smi2);
1165  cmpp(smi1, smi2);
1166 }
1167 
1168 
1169 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1170  AssertSmi(dst);
1171  Cmp(dst, src);
1172 }
1173 
1174 
1175 void MacroAssembler::Cmp(Register dst, Smi* src) {
1176  ASSERT(!dst.is(kScratchRegister));
1177  if (src->value() == 0) {
1178  testp(dst, dst);
1179  } else {
1180  Register constant_reg = GetSmiConstant(src);
1181  cmpp(dst, constant_reg);
1182  }
1183 }
1184 
1185 
1186 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1187  AssertSmi(dst);
1188  AssertSmi(src);
1189  cmpp(dst, src);
1190 }
1191 
1192 
1193 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1194  AssertSmi(dst);
1195  AssertSmi(src);
1196  cmpp(dst, src);
1197 }
1198 
1199 
1200 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1201  AssertSmi(dst);
1202  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1203 }
1204 
1205 
1206 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1207  // The Operand cannot use the smi register.
1208  Register smi_reg = GetSmiConstant(src);
1209  ASSERT(!dst.AddressUsesRegister(smi_reg));
1210  cmpp(dst, smi_reg);
1211 }
1212 
1213 
1214 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1215  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1216 }
1217 
1218 
1219 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1220  Register src,
1221  int power) {
1222  ASSERT(power >= 0);
1223  ASSERT(power < 64);
1224  if (power == 0) {
1225  SmiToInteger64(dst, src);
1226  return;
1227  }
1228  if (!dst.is(src)) {
1229  movp(dst, src);
1230  }
1231  if (power < kSmiShift) {
1232  sar(dst, Immediate(kSmiShift - power));
1233  } else if (power > kSmiShift) {
1234  shl(dst, Immediate(power - kSmiShift));
1235  }
1236 }
1237 
1238 
1239 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1240  Register src,
1241  int power) {
1242  ASSERT((0 <= power) && (power < 32));
1243  if (dst.is(src)) {
1244  shr(dst, Immediate(power + kSmiShift));
1245  } else {
1246  UNIMPLEMENTED(); // Not used.
1247  }
1248 }
1249 
1250 
1251 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1252  Label* on_not_smis,
1253  Label::Distance near_jump) {
1254  if (dst.is(src1) || dst.is(src2)) {
1255  ASSERT(!src1.is(kScratchRegister));
1256  ASSERT(!src2.is(kScratchRegister));
1257  movp(kScratchRegister, src1);
1258  orp(kScratchRegister, src2);
1259  JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1260  movp(dst, kScratchRegister);
1261  } else {
1262  movp(dst, src1);
1263  orp(dst, src2);
1264  JumpIfNotSmi(dst, on_not_smis, near_jump);
1265  }
1266 }
1267 
1268 
1269 Condition MacroAssembler::CheckSmi(Register src) {
1270  STATIC_ASSERT(kSmiTag == 0);
1271  testb(src, Immediate(kSmiTagMask));
1272  return zero;
1273 }
1274 
1275 
1276 Condition MacroAssembler::CheckSmi(const Operand& src) {
1277  STATIC_ASSERT(kSmiTag == 0);
1278  testb(src, Immediate(kSmiTagMask));
1279  return zero;
1280 }
1281 
1282 
1283 Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1284  STATIC_ASSERT(kSmiTag == 0);
1285  // Test that both bits of the mask 0x8000000000000001 are zero.
1286  movp(kScratchRegister, src);
1287  rol(kScratchRegister, Immediate(1));
1288  testb(kScratchRegister, Immediate(3));
1289  return zero;
1290 }
1291 
1292 
1293 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1294  if (first.is(second)) {
1295  return CheckSmi(first);
1296  }
1298  leal(kScratchRegister, Operand(first, second, times_1, 0));
1299  testb(kScratchRegister, Immediate(0x03));
1300  return zero;
1301 }
1302 
1303 
1304 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1305  Register second) {
1306  if (first.is(second)) {
1307  return CheckNonNegativeSmi(first);
1308  }
1309  movp(kScratchRegister, first);
1310  orp(kScratchRegister, second);
1311  rol(kScratchRegister, Immediate(1));
1312  testl(kScratchRegister, Immediate(3));
1313  return zero;
1314 }
1315 
1316 
1317 Condition MacroAssembler::CheckEitherSmi(Register first,
1318  Register second,
1319  Register scratch) {
1320  if (first.is(second)) {
1321  return CheckSmi(first);
1322  }
1323  if (scratch.is(second)) {
1324  andl(scratch, first);
1325  } else {
1326  if (!scratch.is(first)) {
1327  movl(scratch, first);
1328  }
1329  andl(scratch, second);
1330  }
1331  testb(scratch, Immediate(kSmiTagMask));
1332  return zero;
1333 }
1334 
1335 
1336 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1337  ASSERT(!src.is(kScratchRegister));
1338  // If we overflow by subtracting one, it's the minimal smi value.
1339  cmpp(src, kSmiConstantRegister);
1340  return overflow;
1341 }
1342 
1343 
1344 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1345  // A 32-bit integer value can always be converted to a smi.
1346  return always;
1347 }
1348 
1349 
1350 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1351  // An unsigned 32-bit integer value is valid as long as the high bit
1352  // is not set.
1353  testl(src, src);
1354  return positive;
1355 }
1356 
1357 
1358 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1359  if (dst.is(src)) {
1360  andl(dst, Immediate(kSmiTagMask));
1361  } else {
1362  movl(dst, Immediate(kSmiTagMask));
1363  andl(dst, src);
1364  }
1365 }
1366 
1367 
1368 void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1369  if (!(src.AddressUsesRegister(dst))) {
1370  movl(dst, Immediate(kSmiTagMask));
1371  andl(dst, src);
1372  } else {
1373  movl(dst, src);
1374  andl(dst, Immediate(kSmiTagMask));
1375  }
1376 }
1377 
1378 
1379 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1380  Label* on_invalid,
1381  Label::Distance near_jump) {
1382  Condition is_valid = CheckInteger32ValidSmiValue(src);
1383  j(NegateCondition(is_valid), on_invalid, near_jump);
1384 }
1385 
1386 
1387 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1388  Label* on_invalid,
1389  Label::Distance near_jump) {
1390  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1391  j(NegateCondition(is_valid), on_invalid, near_jump);
1392 }
1393 
1394 
1395 void MacroAssembler::JumpIfSmi(Register src,
1396  Label* on_smi,
1397  Label::Distance near_jump) {
1398  Condition smi = CheckSmi(src);
1399  j(smi, on_smi, near_jump);
1400 }
1401 
1402 
1403 void MacroAssembler::JumpIfNotSmi(Register src,
1404  Label* on_not_smi,
1405  Label::Distance near_jump) {
1406  Condition smi = CheckSmi(src);
1407  j(NegateCondition(smi), on_not_smi, near_jump);
1408 }
1409 
1410 
1411 void MacroAssembler::JumpUnlessNonNegativeSmi(
1412  Register src, Label* on_not_smi_or_negative,
1413  Label::Distance near_jump) {
1414  Condition non_negative_smi = CheckNonNegativeSmi(src);
1415  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1416 }
1417 
1418 
1419 void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1420  Smi* constant,
1421  Label* on_equals,
1422  Label::Distance near_jump) {
1423  SmiCompare(src, constant);
1424  j(equal, on_equals, near_jump);
1425 }
1426 
1427 
1428 void MacroAssembler::JumpIfNotBothSmi(Register src1,
1429  Register src2,
1430  Label* on_not_both_smi,
1431  Label::Distance near_jump) {
1432  Condition both_smi = CheckBothSmi(src1, src2);
1433  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1434 }
1435 
1436 
1437 void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1438  Register src2,
1439  Label* on_not_both_smi,
1440  Label::Distance near_jump) {
1441  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1442  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1443 }
1444 
1445 
1446 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1447  if (constant->value() == 0) {
1448  if (!dst.is(src)) {
1449  movp(dst, src);
1450  }
1451  return;
1452  } else if (dst.is(src)) {
1453  ASSERT(!dst.is(kScratchRegister));
1454  switch (constant->value()) {
1455  case 1:
1456  addp(dst, kSmiConstantRegister);
1457  return;
1458  case 2:
1459  leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1460  return;
1461  case 4:
1462  leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1463  return;
1464  case 8:
1465  leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1466  return;
1467  default:
1468  Register constant_reg = GetSmiConstant(constant);
1469  addp(dst, constant_reg);
1470  return;
1471  }
1472  } else {
1473  switch (constant->value()) {
1474  case 1:
1475  leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1476  return;
1477  case 2:
1478  leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1479  return;
1480  case 4:
1481  leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1482  return;
1483  case 8:
1484  leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1485  return;
1486  default:
1487  LoadSmiConstant(dst, constant);
1488  addp(dst, src);
1489  return;
1490  }
1491  }
1492 }
1493 
1494 
1495 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1496  if (constant->value() != 0) {
1497  addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1498  }
1499 }
1500 
1501 
1502 void MacroAssembler::SmiAddConstant(Register dst,
1503  Register src,
1504  Smi* constant,
1505  SmiOperationExecutionMode mode,
1506  Label* bailout_label,
1507  Label::Distance near_jump) {
1508  if (constant->value() == 0) {
1509  if (!dst.is(src)) {
1510  movp(dst, src);
1511  }
1512  } else if (dst.is(src)) {
1513  ASSERT(!dst.is(kScratchRegister));
1514  LoadSmiConstant(kScratchRegister, constant);
1515  addp(dst, kScratchRegister);
1516  if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1517  j(no_overflow, bailout_label, near_jump);
1518  ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1519  subp(dst, kScratchRegister);
1520  } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1521  if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1522  Label done;
1523  j(no_overflow, &done, Label::kNear);
1524  subp(dst, kScratchRegister);
1525  jmp(bailout_label, near_jump);
1526  bind(&done);
1527  } else {
1528  // Bailout if overflow without reserving src.
1529  j(overflow, bailout_label, near_jump);
1530  }
1531  } else {
1532  CHECK(mode.IsEmpty());
1533  }
1534  } else {
1535  ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1536  ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1537  LoadSmiConstant(dst, constant);
1538  addp(dst, src);
1539  j(overflow, bailout_label, near_jump);
1540  }
1541 }
1542 
1543 
1544 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1545  if (constant->value() == 0) {
1546  if (!dst.is(src)) {
1547  movp(dst, src);
1548  }
1549  } else if (dst.is(src)) {
1550  ASSERT(!dst.is(kScratchRegister));
1551  Register constant_reg = GetSmiConstant(constant);
1552  subp(dst, constant_reg);
1553  } else {
1554  if (constant->value() == Smi::kMinValue) {
1555  LoadSmiConstant(dst, constant);
1556  // Adding and subtracting the min-value gives the same result, it only
1557  // differs on the overflow bit, which we don't check here.
1558  addp(dst, src);
1559  } else {
1560  // Subtract by adding the negation.
1561  LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1562  addp(dst, src);
1563  }
1564  }
1565 }
1566 
1567 
1568 void MacroAssembler::SmiSubConstant(Register dst,
1569  Register src,
1570  Smi* constant,
1571  SmiOperationExecutionMode mode,
1572  Label* bailout_label,
1573  Label::Distance near_jump) {
1574  if (constant->value() == 0) {
1575  if (!dst.is(src)) {
1576  movp(dst, src);
1577  }
1578  } else if (dst.is(src)) {
1579  ASSERT(!dst.is(kScratchRegister));
1580  LoadSmiConstant(kScratchRegister, constant);
1581  subp(dst, kScratchRegister);
1582  if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
1583  j(no_overflow, bailout_label, near_jump);
1584  ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1585  addp(dst, kScratchRegister);
1586  } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
1587  if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
1588  Label done;
1589  j(no_overflow, &done, Label::kNear);
1590  addp(dst, kScratchRegister);
1591  jmp(bailout_label, near_jump);
1592  bind(&done);
1593  } else {
1594  // Bailout if overflow without reserving src.
1595  j(overflow, bailout_label, near_jump);
1596  }
1597  } else {
1598  CHECK(mode.IsEmpty());
1599  }
1600  } else {
1601  ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
1602  ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
1603  if (constant->value() == Smi::kMinValue) {
1604  ASSERT(!dst.is(kScratchRegister));
1605  movp(dst, src);
1606  LoadSmiConstant(kScratchRegister, constant);
1607  subp(dst, kScratchRegister);
1608  j(overflow, bailout_label, near_jump);
1609  } else {
1610  // Subtract by adding the negation.
1611  LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1612  addp(dst, src);
1613  j(overflow, bailout_label, near_jump);
1614  }
1615  }
1616 }
1617 
1618 
1619 void MacroAssembler::SmiNeg(Register dst,
1620  Register src,
1621  Label* on_smi_result,
1622  Label::Distance near_jump) {
1623  if (dst.is(src)) {
1624  ASSERT(!dst.is(kScratchRegister));
1625  movp(kScratchRegister, src);
1626  negp(dst); // Low 32 bits are retained as zero by negation.
1627  // Test if result is zero or Smi::kMinValue.
1628  cmpp(dst, kScratchRegister);
1629  j(not_equal, on_smi_result, near_jump);
1630  movp(src, kScratchRegister);
1631  } else {
1632  movp(dst, src);
1633  negp(dst);
1634  cmpp(dst, src);
1635  // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1636  j(not_equal, on_smi_result, near_jump);
1637  }
1638 }
1639 
1640 
1641 template<class T>
1642 static void SmiAddHelper(MacroAssembler* masm,
1643  Register dst,
1644  Register src1,
1645  T src2,
1646  Label* on_not_smi_result,
1647  Label::Distance near_jump) {
1648  if (dst.is(src1)) {
1649  Label done;
1650  masm->addp(dst, src2);
1651  masm->j(no_overflow, &done, Label::kNear);
1652  // Restore src1.
1653  masm->subp(dst, src2);
1654  masm->jmp(on_not_smi_result, near_jump);
1655  masm->bind(&done);
1656  } else {
1657  masm->movp(dst, src1);
1658  masm->addp(dst, src2);
1659  masm->j(overflow, on_not_smi_result, near_jump);
1660  }
1661 }
1662 
1663 
1664 void MacroAssembler::SmiAdd(Register dst,
1665  Register src1,
1666  Register src2,
1667  Label* on_not_smi_result,
1668  Label::Distance near_jump) {
1669  ASSERT_NOT_NULL(on_not_smi_result);
1670  ASSERT(!dst.is(src2));
1671  SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1672 }
1673 
1674 
1675 void MacroAssembler::SmiAdd(Register dst,
1676  Register src1,
1677  const Operand& src2,
1678  Label* on_not_smi_result,
1679  Label::Distance near_jump) {
1680  ASSERT_NOT_NULL(on_not_smi_result);
1681  ASSERT(!src2.AddressUsesRegister(dst));
1682  SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1683 }
1684 
1685 
1686 void MacroAssembler::SmiAdd(Register dst,
1687  Register src1,
1688  Register src2) {
1689  // No overflow checking. Use only when it's known that
1690  // overflowing is impossible.
1691  if (!dst.is(src1)) {
1692  if (emit_debug_code()) {
1693  movp(kScratchRegister, src1);
1694  addp(kScratchRegister, src2);
1695  Check(no_overflow, kSmiAdditionOverflow);
1696  }
1697  leap(dst, Operand(src1, src2, times_1, 0));
1698  } else {
1699  addp(dst, src2);
1700  Assert(no_overflow, kSmiAdditionOverflow);
1701  }
1702 }
1703 
1704 
1705 template<class T>
1706 static void SmiSubHelper(MacroAssembler* masm,
1707  Register dst,
1708  Register src1,
1709  T src2,
1710  Label* on_not_smi_result,
1711  Label::Distance near_jump) {
1712  if (dst.is(src1)) {
1713  Label done;
1714  masm->subp(dst, src2);
1715  masm->j(no_overflow, &done, Label::kNear);
1716  // Restore src1.
1717  masm->addp(dst, src2);
1718  masm->jmp(on_not_smi_result, near_jump);
1719  masm->bind(&done);
1720  } else {
1721  masm->movp(dst, src1);
1722  masm->subp(dst, src2);
1723  masm->j(overflow, on_not_smi_result, near_jump);
1724  }
1725 }
1726 
1727 
1728 void MacroAssembler::SmiSub(Register dst,
1729  Register src1,
1730  Register src2,
1731  Label* on_not_smi_result,
1732  Label::Distance near_jump) {
1733  ASSERT_NOT_NULL(on_not_smi_result);
1734  ASSERT(!dst.is(src2));
1735  SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1736 }
1737 
1738 
1739 void MacroAssembler::SmiSub(Register dst,
1740  Register src1,
1741  const Operand& src2,
1742  Label* on_not_smi_result,
1743  Label::Distance near_jump) {
1744  ASSERT_NOT_NULL(on_not_smi_result);
1745  ASSERT(!src2.AddressUsesRegister(dst));
1746  SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1747 }
1748 
1749 
1750 template<class T>
1751 static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1752  Register dst,
1753  Register src1,
1754  T src2) {
1755  // No overflow checking. Use only when it's known that
1756  // overflowing is impossible (e.g., subtracting two positive smis).
1757  if (!dst.is(src1)) {
1758  masm->movp(dst, src1);
1759  }
1760  masm->subp(dst, src2);
1761  masm->Assert(no_overflow, kSmiSubtractionOverflow);
1762 }
1763 
1764 
1765 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1766  ASSERT(!dst.is(src2));
1767  SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1768 }
1769 
1770 
1771 void MacroAssembler::SmiSub(Register dst,
1772  Register src1,
1773  const Operand& src2) {
1774  SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1775 }
1776 
1777 
1778 void MacroAssembler::SmiMul(Register dst,
1779  Register src1,
1780  Register src2,
1781  Label* on_not_smi_result,
1782  Label::Distance near_jump) {
1783  ASSERT(!dst.is(src2));
1784  ASSERT(!dst.is(kScratchRegister));
1785  ASSERT(!src1.is(kScratchRegister));
1786  ASSERT(!src2.is(kScratchRegister));
1787 
1788  if (dst.is(src1)) {
1789  Label failure, zero_correct_result;
1790  movp(kScratchRegister, src1); // Create backup for later testing.
1791  SmiToInteger64(dst, src1);
1792  imulp(dst, src2);
1793  j(overflow, &failure, Label::kNear);
1794 
1795  // Check for negative zero result. If product is zero, and one
1796  // argument is negative, go to slow case.
1797  Label correct_result;
1798  testp(dst, dst);
1799  j(not_zero, &correct_result, Label::kNear);
1800 
1801  movp(dst, kScratchRegister);
1802  xorp(dst, src2);
1803  // Result was positive zero.
1804  j(positive, &zero_correct_result, Label::kNear);
1805 
1806  bind(&failure); // Reused failure exit, restores src1.
1807  movp(src1, kScratchRegister);
1808  jmp(on_not_smi_result, near_jump);
1809 
1810  bind(&zero_correct_result);
1811  Set(dst, 0);
1812 
1813  bind(&correct_result);
1814  } else {
1815  SmiToInteger64(dst, src1);
1816  imulp(dst, src2);
1817  j(overflow, on_not_smi_result, near_jump);
1818  // Check for negative zero result. If product is zero, and one
1819  // argument is negative, go to slow case.
1820  Label correct_result;
1821  testp(dst, dst);
1822  j(not_zero, &correct_result, Label::kNear);
1823  // One of src1 and src2 is zero, the check whether the other is
1824  // negative.
1825  movp(kScratchRegister, src1);
1826  xorp(kScratchRegister, src2);
1827  j(negative, on_not_smi_result, near_jump);
1828  bind(&correct_result);
1829  }
1830 }
1831 
1832 
1833 void MacroAssembler::SmiDiv(Register dst,
1834  Register src1,
1835  Register src2,
1836  Label* on_not_smi_result,
1837  Label::Distance near_jump) {
1838  ASSERT(!src1.is(kScratchRegister));
1839  ASSERT(!src2.is(kScratchRegister));
1840  ASSERT(!dst.is(kScratchRegister));
1841  ASSERT(!src2.is(rax));
1842  ASSERT(!src2.is(rdx));
1843  ASSERT(!src1.is(rdx));
1844 
1845  // Check for 0 divisor (result is +/-Infinity).
1846  testp(src2, src2);
1847  j(zero, on_not_smi_result, near_jump);
1848 
1849  if (src1.is(rax)) {
1850  movp(kScratchRegister, src1);
1851  }
1852  SmiToInteger32(rax, src1);
1853  // We need to rule out dividing Smi::kMinValue by -1, since that would
1854  // overflow in idiv and raise an exception.
1855  // We combine this with negative zero test (negative zero only happens
1856  // when dividing zero by a negative number).
1857 
1858  // We overshoot a little and go to slow case if we divide min-value
1859  // by any negative value, not just -1.
1860  Label safe_div;
1861  testl(rax, Immediate(0x7fffffff));
1862  j(not_zero, &safe_div, Label::kNear);
1863  testp(src2, src2);
1864  if (src1.is(rax)) {
1865  j(positive, &safe_div, Label::kNear);
1866  movp(src1, kScratchRegister);
1867  jmp(on_not_smi_result, near_jump);
1868  } else {
1869  j(negative, on_not_smi_result, near_jump);
1870  }
1871  bind(&safe_div);
1872 
1873  SmiToInteger32(src2, src2);
1874  // Sign extend src1 into edx:eax.
1875  cdq();
1876  idivl(src2);
1877  Integer32ToSmi(src2, src2);
1878  // Check that the remainder is zero.
1879  testl(rdx, rdx);
1880  if (src1.is(rax)) {
1881  Label smi_result;
1882  j(zero, &smi_result, Label::kNear);
1883  movp(src1, kScratchRegister);
1884  jmp(on_not_smi_result, near_jump);
1885  bind(&smi_result);
1886  } else {
1887  j(not_zero, on_not_smi_result, near_jump);
1888  }
1889  if (!dst.is(src1) && src1.is(rax)) {
1890  movp(src1, kScratchRegister);
1891  }
1892  Integer32ToSmi(dst, rax);
1893 }
1894 
1895 
1896 void MacroAssembler::SmiMod(Register dst,
1897  Register src1,
1898  Register src2,
1899  Label* on_not_smi_result,
1900  Label::Distance near_jump) {
1901  ASSERT(!dst.is(kScratchRegister));
1902  ASSERT(!src1.is(kScratchRegister));
1903  ASSERT(!src2.is(kScratchRegister));
1904  ASSERT(!src2.is(rax));
1905  ASSERT(!src2.is(rdx));
1906  ASSERT(!src1.is(rdx));
1907  ASSERT(!src1.is(src2));
1908 
1909  testp(src2, src2);
1910  j(zero, on_not_smi_result, near_jump);
1911 
1912  if (src1.is(rax)) {
1913  movp(kScratchRegister, src1);
1914  }
1915  SmiToInteger32(rax, src1);
1916  SmiToInteger32(src2, src2);
1917 
1918  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1919  Label safe_div;
1920  cmpl(rax, Immediate(Smi::kMinValue));
1921  j(not_equal, &safe_div, Label::kNear);
1922  cmpl(src2, Immediate(-1));
1923  j(not_equal, &safe_div, Label::kNear);
1924  // Retag inputs and go slow case.
1925  Integer32ToSmi(src2, src2);
1926  if (src1.is(rax)) {
1927  movp(src1, kScratchRegister);
1928  }
1929  jmp(on_not_smi_result, near_jump);
1930  bind(&safe_div);
1931 
1932  // Sign extend eax into edx:eax.
1933  cdq();
1934  idivl(src2);
1935  // Restore smi tags on inputs.
1936  Integer32ToSmi(src2, src2);
1937  if (src1.is(rax)) {
1938  movp(src1, kScratchRegister);
1939  }
1940  // Check for a negative zero result. If the result is zero, and the
1941  // dividend is negative, go slow to return a floating point negative zero.
1942  Label smi_result;
1943  testl(rdx, rdx);
1944  j(not_zero, &smi_result, Label::kNear);
1945  testp(src1, src1);
1946  j(negative, on_not_smi_result, near_jump);
1947  bind(&smi_result);
1948  Integer32ToSmi(dst, rdx);
1949 }
1950 
1951 
1952 void MacroAssembler::SmiNot(Register dst, Register src) {
1953  ASSERT(!dst.is(kScratchRegister));
1954  ASSERT(!src.is(kScratchRegister));
1955  // Set tag and padding bits before negating, so that they are zero afterwards.
1956  movl(kScratchRegister, Immediate(~0));
1957  if (dst.is(src)) {
1958  xorp(dst, kScratchRegister);
1959  } else {
1960  leap(dst, Operand(src, kScratchRegister, times_1, 0));
1961  }
1962  notp(dst);
1963 }
1964 
1965 
1966 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1967  ASSERT(!dst.is(src2));
1968  if (!dst.is(src1)) {
1969  movp(dst, src1);
1970  }
1971  andp(dst, src2);
1972 }
1973 
1974 
1975 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1976  if (constant->value() == 0) {
1977  Set(dst, 0);
1978  } else if (dst.is(src)) {
1979  ASSERT(!dst.is(kScratchRegister));
1980  Register constant_reg = GetSmiConstant(constant);
1981  andp(dst, constant_reg);
1982  } else {
1983  LoadSmiConstant(dst, constant);
1984  andp(dst, src);
1985  }
1986 }
1987 
1988 
1989 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1990  if (!dst.is(src1)) {
1991  ASSERT(!src1.is(src2));
1992  movp(dst, src1);
1993  }
1994  orp(dst, src2);
1995 }
1996 
1997 
1998 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1999  if (dst.is(src)) {
2000  ASSERT(!dst.is(kScratchRegister));
2001  Register constant_reg = GetSmiConstant(constant);
2002  orp(dst, constant_reg);
2003  } else {
2004  LoadSmiConstant(dst, constant);
2005  orp(dst, src);
2006  }
2007 }
2008 
2009 
2010 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2011  if (!dst.is(src1)) {
2012  ASSERT(!src1.is(src2));
2013  movp(dst, src1);
2014  }
2015  xorp(dst, src2);
2016 }
2017 
2018 
2019 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2020  if (dst.is(src)) {
2021  ASSERT(!dst.is(kScratchRegister));
2022  Register constant_reg = GetSmiConstant(constant);
2023  xorp(dst, constant_reg);
2024  } else {
2025  LoadSmiConstant(dst, constant);
2026  xorp(dst, src);
2027  }
2028 }
2029 
2030 
2031 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2032  Register src,
2033  int shift_value) {
2034  ASSERT(is_uint5(shift_value));
2035  if (shift_value > 0) {
2036  if (dst.is(src)) {
2037  sar(dst, Immediate(shift_value + kSmiShift));
2038  shl(dst, Immediate(kSmiShift));
2039  } else {
2040  UNIMPLEMENTED(); // Not used.
2041  }
2042  }
2043 }
2044 
2045 
2046 void MacroAssembler::SmiShiftLeftConstant(Register dst,
2047  Register src,
2048  int shift_value) {
2049  if (!dst.is(src)) {
2050  movp(dst, src);
2051  }
2052  if (shift_value > 0) {
2053  shl(dst, Immediate(shift_value));
2054  }
2055 }
2056 
2057 
2058 void MacroAssembler::SmiShiftLogicalRightConstant(
2059  Register dst, Register src, int shift_value,
2060  Label* on_not_smi_result, Label::Distance near_jump) {
2061  // Logic right shift interprets its result as an *unsigned* number.
2062  if (dst.is(src)) {
2063  UNIMPLEMENTED(); // Not used.
2064  } else {
2065  movp(dst, src);
2066  if (shift_value == 0) {
2067  testp(dst, dst);
2068  j(negative, on_not_smi_result, near_jump);
2069  }
2070  shr(dst, Immediate(shift_value + kSmiShift));
2071  shl(dst, Immediate(kSmiShift));
2072  }
2073 }
2074 
2075 
2076 void MacroAssembler::SmiShiftLeft(Register dst,
2077  Register src1,
2078  Register src2) {
2079  ASSERT(!dst.is(rcx));
2080  // Untag shift amount.
2081  if (!dst.is(src1)) {
2082  movq(dst, src1);
2083  }
2084  SmiToInteger32(rcx, src2);
2085  // Shift amount specified by lower 5 bits, not six as the shl opcode.
2086  andq(rcx, Immediate(0x1f));
2087  shl_cl(dst);
2088 }
2089 
2090 
2091 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2092  Register src1,
2093  Register src2,
2094  Label* on_not_smi_result,
2095  Label::Distance near_jump) {
2096  ASSERT(!dst.is(kScratchRegister));
2097  ASSERT(!src1.is(kScratchRegister));
2098  ASSERT(!src2.is(kScratchRegister));
2099  ASSERT(!dst.is(rcx));
2100  // dst and src1 can be the same, because the one case that bails out
2101  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2102  if (src1.is(rcx) || src2.is(rcx)) {
2103  movq(kScratchRegister, rcx);
2104  }
2105  if (!dst.is(src1)) {
2106  movq(dst, src1);
2107  }
2108  SmiToInteger32(rcx, src2);
2109  orl(rcx, Immediate(kSmiShift));
2110  shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
2111  shl(dst, Immediate(kSmiShift));
2112  testq(dst, dst);
2113  if (src1.is(rcx) || src2.is(rcx)) {
2114  Label positive_result;
2115  j(positive, &positive_result, Label::kNear);
2116  if (src1.is(rcx)) {
2117  movq(src1, kScratchRegister);
2118  } else {
2119  movq(src2, kScratchRegister);
2120  }
2121  jmp(on_not_smi_result, near_jump);
2122  bind(&positive_result);
2123  } else {
2124  // src2 was zero and src1 negative.
2125  j(negative, on_not_smi_result, near_jump);
2126  }
2127 }
2128 
2129 
2130 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2131  Register src1,
2132  Register src2) {
2133  ASSERT(!dst.is(kScratchRegister));
2134  ASSERT(!src1.is(kScratchRegister));
2135  ASSERT(!src2.is(kScratchRegister));
2136  ASSERT(!dst.is(rcx));
2137  if (src1.is(rcx)) {
2138  movp(kScratchRegister, src1);
2139  } else if (src2.is(rcx)) {
2140  movp(kScratchRegister, src2);
2141  }
2142  if (!dst.is(src1)) {
2143  movp(dst, src1);
2144  }
2145  SmiToInteger32(rcx, src2);
2146  orl(rcx, Immediate(kSmiShift));
2147  sar_cl(dst); // Shift 32 + original rcx & 0x1f.
2148  shl(dst, Immediate(kSmiShift));
2149  if (src1.is(rcx)) {
2150  movp(src1, kScratchRegister);
2151  } else if (src2.is(rcx)) {
2152  movp(src2, kScratchRegister);
2153  }
2154 }
2155 
2156 
2157 void MacroAssembler::SelectNonSmi(Register dst,
2158  Register src1,
2159  Register src2,
2160  Label* on_not_smis,
2161  Label::Distance near_jump) {
2162  ASSERT(!dst.is(kScratchRegister));
2163  ASSERT(!src1.is(kScratchRegister));
2164  ASSERT(!src2.is(kScratchRegister));
2165  ASSERT(!dst.is(src1));
2166  ASSERT(!dst.is(src2));
2167  // Both operands must not be smis.
2168 #ifdef DEBUG
2169  Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2170  Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2171 #endif
2172  STATIC_ASSERT(kSmiTag == 0);
2173  ASSERT_EQ(0, Smi::FromInt(0));
2174  movl(kScratchRegister, Immediate(kSmiTagMask));
2175  andp(kScratchRegister, src1);
2176  testl(kScratchRegister, src2);
2177  // If non-zero then both are smis.
2178  j(not_zero, on_not_smis, near_jump);
2179 
2180  // Exactly one operand is a smi.
2181  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2182  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2183  subp(kScratchRegister, Immediate(1));
2184  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2185  movp(dst, src1);
2186  xorp(dst, src2);
2187  andp(dst, kScratchRegister);
2188  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2189  xorp(dst, src1);
2190  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2191 }
2192 
2193 
2194 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2195  Register src,
2196  int shift) {
2197  ASSERT(is_uint6(shift));
2198  // There is a possible optimization if shift is in the range 60-63, but that
2199  // will (and must) never happen.
2200  if (!dst.is(src)) {
2201  movq(dst, src);
2202  }
2203  if (shift < kSmiShift) {
2204  sar(dst, Immediate(kSmiShift - shift));
2205  } else {
2206  shl(dst, Immediate(shift - kSmiShift));
2207  }
2208  return SmiIndex(dst, times_1);
2209 }
2210 
2211 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2212  Register src,
2213  int shift) {
2214  // Register src holds a positive smi.
2215  ASSERT(is_uint6(shift));
2216  if (!dst.is(src)) {
2217  movq(dst, src);
2218  }
2219  negq(dst);
2220  if (shift < kSmiShift) {
2221  sar(dst, Immediate(kSmiShift - shift));
2222  } else {
2223  shl(dst, Immediate(shift - kSmiShift));
2224  }
2225  return SmiIndex(dst, times_1);
2226 }
2227 
2228 
2229 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2231  addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2232 }
2233 
2234 
2235 void MacroAssembler::Push(Smi* source) {
2236  intptr_t smi = reinterpret_cast<intptr_t>(source);
2237  if (is_int32(smi)) {
2238  Push(Immediate(static_cast<int32_t>(smi)));
2239  } else {
2240  Register constant = GetSmiConstant(source);
2241  Push(constant);
2242  }
2243 }
2244 
2245 
2246 void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
2247  movp(scratch, src);
2248  // High bits.
2249  shr(src, Immediate(64 - kSmiShift));
2250  shl(src, Immediate(kSmiShift));
2251  Push(src);
2252  // Low bits.
2253  shl(scratch, Immediate(kSmiShift));
2254  Push(scratch);
2255 }
2256 
2257 
2258 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
2259  Pop(scratch);
2260  // Low bits.
2261  shr(scratch, Immediate(kSmiShift));
2262  Pop(dst);
2263  shr(dst, Immediate(kSmiShift));
2264  // High bits.
2265  shl(dst, Immediate(64 - kSmiShift));
2266  orp(dst, scratch);
2267 }
2268 
2269 
2270 void MacroAssembler::Test(const Operand& src, Smi* source) {
2271  testl(Operand(src, kIntSize), Immediate(source->value()));
2272 }
2273 
2274 
2275 // ----------------------------------------------------------------------------
2276 
2277 
2278 void MacroAssembler::LookupNumberStringCache(Register object,
2279  Register result,
2280  Register scratch1,
2281  Register scratch2,
2282  Label* not_found) {
2283  // Use of registers. Register result is used as a temporary.
2284  Register number_string_cache = result;
2285  Register mask = scratch1;
2286  Register scratch = scratch2;
2287 
2288  // Load the number string cache.
2289  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2290 
2291  // Make the hash mask from the length of the number string cache. It
2292  // contains two elements (number and string) for each cache entry.
2293  SmiToInteger32(
2294  mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2295  shrl(mask, Immediate(1));
2296  subp(mask, Immediate(1)); // Make mask.
2297 
2298  // Calculate the entry in the number string cache. The hash value in the
2299  // number string cache for smis is just the smi value, and the hash for
2300  // doubles is the xor of the upper and lower words. See
2301  // Heap::GetNumberStringCache.
2302  Label is_smi;
2303  Label load_result_from_cache;
2304  JumpIfSmi(object, &is_smi);
2305  CheckMap(object,
2306  isolate()->factory()->heap_number_map(),
2307  not_found,
2309 
2310  STATIC_ASSERT(8 == kDoubleSize);
2311  movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2312  xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2313  andp(scratch, mask);
2314  // Each entry in string cache consists of two pointer sized fields,
2315  // but times_twice_pointer_size (multiplication by 16) scale factor
2316  // is not supported by addrmode on x64 platform.
2317  // So we have to premultiply entry index before lookup.
2318  shl(scratch, Immediate(kPointerSizeLog2 + 1));
2319 
2320  Register index = scratch;
2321  Register probe = mask;
2322  movp(probe,
2323  FieldOperand(number_string_cache,
2324  index,
2325  times_1,
2326  FixedArray::kHeaderSize));
2327  JumpIfSmi(probe, not_found);
2328  movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2329  ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2330  j(parity_even, not_found); // Bail out if NaN is involved.
2331  j(not_equal, not_found); // The cache did not contain this value.
2332  jmp(&load_result_from_cache);
2333 
2334  bind(&is_smi);
2335  SmiToInteger32(scratch, object);
2336  andp(scratch, mask);
2337  // Each entry in string cache consists of two pointer sized fields,
2338  // but times_twice_pointer_size (multiplication by 16) scale factor
2339  // is not supported by addrmode on x64 platform.
2340  // So we have to premultiply entry index before lookup.
2341  shl(scratch, Immediate(kPointerSizeLog2 + 1));
2342 
2343  // Check if the entry is the smi we are looking for.
2344  cmpp(object,
2345  FieldOperand(number_string_cache,
2346  index,
2347  times_1,
2348  FixedArray::kHeaderSize));
2349  j(not_equal, not_found);
2350 
2351  // Get the result from the cache.
2352  bind(&load_result_from_cache);
2353  movp(result,
2354  FieldOperand(number_string_cache,
2355  index,
2356  times_1,
2357  FixedArray::kHeaderSize + kPointerSize));
2358  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2359 }
2360 
2361 
2362 void MacroAssembler::JumpIfNotString(Register object,
2363  Register object_map,
2364  Label* not_string,
2365  Label::Distance near_jump) {
2366  Condition is_smi = CheckSmi(object);
2367  j(is_smi, not_string, near_jump);
2368  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2369  j(above_equal, not_string, near_jump);
2370 }
2371 
2372 
2373 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2374  Register first_object,
2375  Register second_object,
2376  Register scratch1,
2377  Register scratch2,
2378  Label* on_fail,
2379  Label::Distance near_jump) {
2380  // Check that both objects are not smis.
2381  Condition either_smi = CheckEitherSmi(first_object, second_object);
2382  j(either_smi, on_fail, near_jump);
2383 
2384  // Load instance type for both strings.
2385  movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2386  movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2387  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2388  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2389 
2390  // Check that both are flat ASCII strings.
2391  ASSERT(kNotStringTag != 0);
2392  const int kFlatAsciiStringMask =
2394  const int kFlatAsciiStringTag =
2396 
2397  andl(scratch1, Immediate(kFlatAsciiStringMask));
2398  andl(scratch2, Immediate(kFlatAsciiStringMask));
2399  // Interleave the bits to check both scratch1 and scratch2 in one test.
2400  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2401  leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2402  cmpl(scratch1,
2403  Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2404  j(not_equal, on_fail, near_jump);
2405 }
2406 
2407 
2408 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2409  Register instance_type,
2410  Register scratch,
2411  Label* failure,
2412  Label::Distance near_jump) {
2413  if (!scratch.is(instance_type)) {
2414  movl(scratch, instance_type);
2415  }
2416 
2417  const int kFlatAsciiStringMask =
2419 
2420  andl(scratch, Immediate(kFlatAsciiStringMask));
2421  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2422  j(not_equal, failure, near_jump);
2423 }
2424 
2425 
2426 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2427  Register first_object_instance_type,
2428  Register second_object_instance_type,
2429  Register scratch1,
2430  Register scratch2,
2431  Label* on_fail,
2432  Label::Distance near_jump) {
2433  // Load instance type for both strings.
2434  movp(scratch1, first_object_instance_type);
2435  movp(scratch2, second_object_instance_type);
2436 
2437  // Check that both are flat ASCII strings.
2438  ASSERT(kNotStringTag != 0);
2439  const int kFlatAsciiStringMask =
2441  const int kFlatAsciiStringTag =
2443 
2444  andl(scratch1, Immediate(kFlatAsciiStringMask));
2445  andl(scratch2, Immediate(kFlatAsciiStringMask));
2446  // Interleave the bits to check both scratch1 and scratch2 in one test.
2447  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2448  leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
2449  cmpl(scratch1,
2450  Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2451  j(not_equal, on_fail, near_jump);
2452 }
2453 
2454 
2455 template<class T>
2456 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2457  T operand_or_register,
2458  Label* not_unique_name,
2459  Label::Distance distance) {
2461  Label succeed;
2462  masm->testb(operand_or_register,
2464  masm->j(zero, &succeed, Label::kNear);
2465  masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2466  masm->j(not_equal, not_unique_name, distance);
2467 
2468  masm->bind(&succeed);
2469 }
2470 
2471 
2472 void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2473  Label* not_unique_name,
2474  Label::Distance distance) {
2475  JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2476 }
2477 
2478 
2479 void MacroAssembler::JumpIfNotUniqueName(Register reg,
2480  Label* not_unique_name,
2481  Label::Distance distance) {
2482  JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2483 }
2484 
2485 
2486 void MacroAssembler::Move(Register dst, Register src) {
2487  if (!dst.is(src)) {
2488  movp(dst, src);
2489  }
2490 }
2491 
2492 
2493 void MacroAssembler::Move(Register dst, Handle<Object> source) {
2495  if (source->IsSmi()) {
2496  Move(dst, Smi::cast(*source));
2497  } else {
2498  MoveHeapObject(dst, source);
2499  }
2500 }
2501 
2502 
2503 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2505  if (source->IsSmi()) {
2506  Move(dst, Smi::cast(*source));
2507  } else {
2508  MoveHeapObject(kScratchRegister, source);
2509  movp(dst, kScratchRegister);
2510  }
2511 }
2512 
2513 
2514 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2516  if (source->IsSmi()) {
2517  Cmp(dst, Smi::cast(*source));
2518  } else {
2519  MoveHeapObject(kScratchRegister, source);
2520  cmpp(dst, kScratchRegister);
2521  }
2522 }
2523 
2524 
2525 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2527  if (source->IsSmi()) {
2528  Cmp(dst, Smi::cast(*source));
2529  } else {
2530  MoveHeapObject(kScratchRegister, source);
2531  cmpp(dst, kScratchRegister);
2532  }
2533 }
2534 
2535 
2536 void MacroAssembler::Push(Handle<Object> source) {
2538  if (source->IsSmi()) {
2539  Push(Smi::cast(*source));
2540  } else {
2541  MoveHeapObject(kScratchRegister, source);
2542  Push(kScratchRegister);
2543  }
2544 }
2545 
2546 
2547 void MacroAssembler::MoveHeapObject(Register result,
2548  Handle<Object> object) {
2549  AllowDeferredHandleDereference using_raw_address;
2550  ASSERT(object->IsHeapObject());
2551  if (isolate()->heap()->InNewSpace(*object)) {
2552  Handle<Cell> cell = isolate()->factory()->NewCell(object);
2553  Move(result, cell, RelocInfo::CELL);
2554  movp(result, Operand(result, 0));
2555  } else {
2556  Move(result, object, RelocInfo::EMBEDDED_OBJECT);
2557  }
2558 }
2559 
2560 
2561 void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2562  if (dst.is(rax)) {
2563  AllowDeferredHandleDereference embedding_raw_address;
2564  load_rax(cell.location(), RelocInfo::CELL);
2565  } else {
2566  Move(dst, cell, RelocInfo::CELL);
2567  movp(dst, Operand(dst, 0));
2568  }
2569 }
2570 
2571 
2572 void MacroAssembler::Drop(int stack_elements) {
2573  if (stack_elements > 0) {
2574  addp(rsp, Immediate(stack_elements * kPointerSize));
2575  }
2576 }
2577 
2578 
2579 void MacroAssembler::Push(Register src) {
2580  if (kPointerSize == kInt64Size) {
2581  pushq(src);
2582  } else {
2584  // x32 uses 64-bit push for rbp in the prologue.
2585  ASSERT(src.code() != rbp.code());
2586  leal(rsp, Operand(rsp, -4));
2587  movp(Operand(rsp, 0), src);
2588  }
2589 }
2590 
2591 
2592 void MacroAssembler::Push(const Operand& src) {
2593  if (kPointerSize == kInt64Size) {
2594  pushq(src);
2595  } else {
2597  movp(kScratchRegister, src);
2598  leal(rsp, Operand(rsp, -4));
2599  movp(Operand(rsp, 0), kScratchRegister);
2600  }
2601 }
2602 
2603 
2604 void MacroAssembler::Push(Immediate value) {
2605  if (kPointerSize == kInt64Size) {
2606  pushq(value);
2607  } else {
2609  leal(rsp, Operand(rsp, -4));
2610  movp(Operand(rsp, 0), value);
2611  }
2612 }
2613 
2614 
2615 void MacroAssembler::PushImm32(int32_t imm32) {
2616  if (kPointerSize == kInt64Size) {
2617  pushq_imm32(imm32);
2618  } else {
2620  leal(rsp, Operand(rsp, -4));
2621  movp(Operand(rsp, 0), Immediate(imm32));
2622  }
2623 }
2624 
2625 
2626 void MacroAssembler::Pop(Register dst) {
2627  if (kPointerSize == kInt64Size) {
2628  popq(dst);
2629  } else {
2631  // x32 uses 64-bit pop for rbp in the epilogue.
2632  ASSERT(dst.code() != rbp.code());
2633  movp(dst, Operand(rsp, 0));
2634  leal(rsp, Operand(rsp, 4));
2635  }
2636 }
2637 
2638 
2639 void MacroAssembler::Pop(const Operand& dst) {
2640  if (kPointerSize == kInt64Size) {
2641  popq(dst);
2642  } else {
2644  Register scratch = dst.AddressUsesRegister(kScratchRegister)
2646  movp(scratch, Operand(rsp, 0));
2647  movp(dst, scratch);
2648  leal(rsp, Operand(rsp, 4));
2649  if (scratch.is(kSmiConstantRegister)) {
2650  // Restore kSmiConstantRegister.
2651  movp(kSmiConstantRegister,
2652  reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
2653  Assembler::RelocInfoNone());
2654  }
2655  }
2656 }
2657 
2658 
2659 void MacroAssembler::TestBit(const Operand& src, int bits) {
2660  int byte_offset = bits / kBitsPerByte;
2661  int bit_in_byte = bits & (kBitsPerByte - 1);
2662  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2663 }
2664 
2665 
2666 void MacroAssembler::Jump(ExternalReference ext) {
2667  LoadAddress(kScratchRegister, ext);
2668  jmp(kScratchRegister);
2669 }
2670 
2671 
2672 void MacroAssembler::Jump(const Operand& op) {
2673  if (kPointerSize == kInt64Size) {
2674  jmp(op);
2675  } else {
2677  movp(kScratchRegister, op);
2678  jmp(kScratchRegister);
2679  }
2680 }
2681 
2682 
2683 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2684  Move(kScratchRegister, destination, rmode);
2685  jmp(kScratchRegister);
2686 }
2687 
2688 
2689 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2690  // TODO(X64): Inline this
2691  jmp(code_object, rmode);
2692 }
2693 
2694 
2695 int MacroAssembler::CallSize(ExternalReference ext) {
2696  // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2697  return LoadAddressSize(ext) +
2698  Assembler::kCallScratchRegisterInstructionLength;
2699 }
2700 
2701 
2702 void MacroAssembler::Call(ExternalReference ext) {
2703 #ifdef DEBUG
2704  int end_position = pc_offset() + CallSize(ext);
2705 #endif
2706  LoadAddress(kScratchRegister, ext);
2707  call(kScratchRegister);
2708 #ifdef DEBUG
2709  CHECK_EQ(end_position, pc_offset());
2710 #endif
2711 }
2712 
2713 
2714 void MacroAssembler::Call(const Operand& op) {
2715  if (kPointerSize == kInt64Size) {
2716  call(op);
2717  } else {
2719  movp(kScratchRegister, op);
2720  call(kScratchRegister);
2721  }
2722 }
2723 
2724 
2725 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2726 #ifdef DEBUG
2727  int end_position = pc_offset() + CallSize(destination);
2728 #endif
2729  Move(kScratchRegister, destination, rmode);
2730  call(kScratchRegister);
2731 #ifdef DEBUG
2732  CHECK_EQ(pc_offset(), end_position);
2733 #endif
2734 }
2735 
2736 
2737 void MacroAssembler::Call(Handle<Code> code_object,
2738  RelocInfo::Mode rmode,
2739  TypeFeedbackId ast_id) {
2740 #ifdef DEBUG
2741  int end_position = pc_offset() + CallSize(code_object);
2742 #endif
2743  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
2744  rmode == RelocInfo::CODE_AGE_SEQUENCE);
2745  call(code_object, rmode, ast_id);
2746 #ifdef DEBUG
2747  CHECK_EQ(end_position, pc_offset());
2748 #endif
2749 }
2750 
2751 
2752 void MacroAssembler::Pushad() {
2753  Push(rax);
2754  Push(rcx);
2755  Push(rdx);
2756  Push(rbx);
2757  // Not pushing rsp or rbp.
2758  Push(rsi);
2759  Push(rdi);
2760  Push(r8);
2761  Push(r9);
2762  // r10 is kScratchRegister.
2763  Push(r11);
2764  // r12 is kSmiConstantRegister.
2765  // r13 is kRootRegister.
2766  Push(r14);
2767  Push(r15);
2769  // Use lea for symmetry with Popad.
2770  int sp_delta =
2772  leap(rsp, Operand(rsp, -sp_delta));
2773 }
2774 
2775 
2776 void MacroAssembler::Popad() {
2777  // Popad must not change the flags, so use lea instead of addq.
2778  int sp_delta =
2780  leap(rsp, Operand(rsp, sp_delta));
2781  Pop(r15);
2782  Pop(r14);
2783  Pop(r11);
2784  Pop(r9);
2785  Pop(r8);
2786  Pop(rdi);
2787  Pop(rsi);
2788  Pop(rbx);
2789  Pop(rdx);
2790  Pop(rcx);
2791  Pop(rax);
2792 }
2793 
2794 
2795 void MacroAssembler::Dropad() {
2796  addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2797 }
2798 
2799 
2800 // Order general registers are pushed by Pushad:
2801 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2802 const int
2803 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2804  0,
2805  1,
2806  2,
2807  3,
2808  -1,
2809  -1,
2810  4,
2811  5,
2812  6,
2813  7,
2814  -1,
2815  8,
2816  -1,
2817  -1,
2818  9,
2819  10
2820 };
2821 
2822 
2823 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2824  const Immediate& imm) {
2825  movp(SafepointRegisterSlot(dst), imm);
2826 }
2827 
2828 
2829 void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2830  movp(SafepointRegisterSlot(dst), src);
2831 }
2832 
2833 
2834 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2835  movp(dst, SafepointRegisterSlot(src));
2836 }
2837 
2838 
2839 Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2840  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2841 }
2842 
2843 
2844 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2845  int handler_index) {
2846  // Adjust this code if not the case.
2847  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2848  kFPOnStackSize);
2849  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2850  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2851  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2852  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2853  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2854 
2855  // We will build up the handler from the bottom by pushing on the stack.
2856  // First push the frame pointer and context.
2857  if (kind == StackHandler::JS_ENTRY) {
2858  // The frame pointer does not point to a JS frame so we save NULL for
2859  // rbp. We expect the code throwing an exception to check rbp before
2860  // dereferencing it to restore the context.
2861  pushq(Immediate(0)); // NULL frame pointer.
2862  Push(Smi::FromInt(0)); // No context.
2863  } else {
2864  pushq(rbp);
2865  Push(rsi);
2866  }
2867 
2868  // Push the state and the code object.
2869  unsigned state =
2870  StackHandler::IndexField::encode(handler_index) |
2871  StackHandler::KindField::encode(kind);
2872  Push(Immediate(state));
2873  Push(CodeObject());
2874 
2875  // Link the current handler as the next handler.
2876  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2877  Push(ExternalOperand(handler_address));
2878  // Set this new handler as the current one.
2879  movp(ExternalOperand(handler_address), rsp);
2880 }
2881 
2882 
2883 void MacroAssembler::PopTryHandler() {
2884  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2885  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2886  Pop(ExternalOperand(handler_address));
2887  addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2888 }
2889 
2890 
2891 void MacroAssembler::JumpToHandlerEntry() {
2892  // Compute the handler entry address and jump to it. The handler table is
2893  // a fixed array of (smi-tagged) code offsets.
2894  // rax = exception, rdi = code object, rdx = state.
2895  movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
2896  shr(rdx, Immediate(StackHandler::kKindWidth));
2897  movp(rdx,
2898  FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
2899  SmiToInteger64(rdx, rdx);
2900  leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2901  jmp(rdi);
2902 }
2903 
2904 
2905 void MacroAssembler::Throw(Register value) {
2906  // Adjust this code if not the case.
2907  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2908  kFPOnStackSize);
2909  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2910  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2911  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2912  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2913  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2914 
2915  // The exception is expected in rax.
2916  if (!value.is(rax)) {
2917  movp(rax, value);
2918  }
2919  // Drop the stack pointer to the top of the top handler.
2920  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2921  movp(rsp, ExternalOperand(handler_address));
2922  // Restore the next handler.
2923  Pop(ExternalOperand(handler_address));
2924 
2925  // Remove the code object and state, compute the handler address in rdi.
2926  Pop(rdi); // Code object.
2927  Pop(rdx); // Offset and state.
2928 
2929  // Restore the context and frame pointer.
2930  Pop(rsi); // Context.
2931  popq(rbp); // Frame pointer.
2932 
2933  // If the handler is a JS frame, restore the context to the frame.
2934  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
2935  // rbp or rsi.
2936  Label skip;
2937  testp(rsi, rsi);
2938  j(zero, &skip, Label::kNear);
2939  movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2940  bind(&skip);
2941 
2942  JumpToHandlerEntry();
2943 }
2944 
2945 
2946 void MacroAssembler::ThrowUncatchable(Register value) {
2947  // Adjust this code if not the case.
2948  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2949  kFPOnStackSize);
2950  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2951  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2952  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2953  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2954  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2955 
2956  // The exception is expected in rax.
2957  if (!value.is(rax)) {
2958  movp(rax, value);
2959  }
2960  // Drop the stack pointer to the top of the top stack handler.
2961  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2962  Load(rsp, handler_address);
2963 
2964  // Unwind the handlers until the top ENTRY handler is found.
2965  Label fetch_next, check_kind;
2966  jmp(&check_kind, Label::kNear);
2967  bind(&fetch_next);
2968  movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
2969 
2970  bind(&check_kind);
2971  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2972  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
2973  Immediate(StackHandler::KindField::kMask));
2974  j(not_zero, &fetch_next);
2975 
2976  // Set the top handler address to next handler past the top ENTRY handler.
2977  Pop(ExternalOperand(handler_address));
2978 
2979  // Remove the code object and state, compute the handler address in rdi.
2980  Pop(rdi); // Code object.
2981  Pop(rdx); // Offset and state.
2982 
2983  // Clear the context pointer and frame pointer (0 was saved in the handler).
2984  Pop(rsi);
2985  popq(rbp);
2986 
2987  JumpToHandlerEntry();
2988 }
2989 
2990 
2991 void MacroAssembler::Ret() {
2992  ret(0);
2993 }
2994 
2995 
2996 void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2997  if (is_uint16(bytes_dropped)) {
2998  ret(bytes_dropped);
2999  } else {
3000  PopReturnAddressTo(scratch);
3001  addp(rsp, Immediate(bytes_dropped));
3002  PushReturnAddressFrom(scratch);
3003  ret(0);
3004  }
3005 }
3006 
3007 
3008 void MacroAssembler::FCmp() {
3009  fucomip();
3010  fstp(0);
3011 }
3012 
3013 
3014 void MacroAssembler::CmpObjectType(Register heap_object,
3015  InstanceType type,
3016  Register map) {
3017  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3018  CmpInstanceType(map, type);
3019 }
3020 
3021 
3022 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
3023  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
3024  Immediate(static_cast<int8_t>(type)));
3025 }
3026 
3027 
3028 void MacroAssembler::CheckFastElements(Register map,
3029  Label* fail,
3030  Label::Distance distance) {
3035  cmpb(FieldOperand(map, Map::kBitField2Offset),
3036  Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3037  j(above, fail, distance);
3038 }
3039 
3040 
3041 void MacroAssembler::CheckFastObjectElements(Register map,
3042  Label* fail,
3043  Label::Distance distance) {
3048  cmpb(FieldOperand(map, Map::kBitField2Offset),
3049  Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3050  j(below_equal, fail, distance);
3051  cmpb(FieldOperand(map, Map::kBitField2Offset),
3052  Immediate(Map::kMaximumBitField2FastHoleyElementValue));
3053  j(above, fail, distance);
3054 }
3055 
3056 
3057 void MacroAssembler::CheckFastSmiElements(Register map,
3058  Label* fail,
3059  Label::Distance distance) {
3062  cmpb(FieldOperand(map, Map::kBitField2Offset),
3063  Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
3064  j(above, fail, distance);
3065 }
3066 
3067 
3068 void MacroAssembler::StoreNumberToDoubleElements(
3069  Register maybe_number,
3070  Register elements,
3071  Register index,
3072  XMMRegister xmm_scratch,
3073  Label* fail,
3074  int elements_offset) {
3075  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
3076 
3077  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
3078 
3079  CheckMap(maybe_number,
3080  isolate()->factory()->heap_number_map(),
3081  fail,
3083 
3084  // Double value, canonicalize NaN.
3085  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
3086  cmpl(FieldOperand(maybe_number, offset),
3087  Immediate(kNaNOrInfinityLowerBoundUpper32));
3088  j(greater_equal, &maybe_nan, Label::kNear);
3089 
3090  bind(&not_nan);
3091  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
3092  bind(&have_double_value);
3093  movsd(FieldOperand(elements, index, times_8,
3094  FixedDoubleArray::kHeaderSize - elements_offset),
3095  xmm_scratch);
3096  jmp(&done);
3097 
3098  bind(&maybe_nan);
3099  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3100  // it's an Infinity, and the non-NaN code path applies.
3101  j(greater, &is_nan, Label::kNear);
3102  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
3103  j(zero, &not_nan);
3104  bind(&is_nan);
3105  // Convert all NaNs to the same canonical NaN value when they are stored in
3106  // the double array.
3107  Set(kScratchRegister, BitCast<uint64_t>(
3108  FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3109  movq(xmm_scratch, kScratchRegister);
3110  jmp(&have_double_value, Label::kNear);
3111 
3112  bind(&smi_value);
3113  // Value is a smi. convert to a double and store.
3114  // Preserve original value.
3115  SmiToInteger32(kScratchRegister, maybe_number);
3116  Cvtlsi2sd(xmm_scratch, kScratchRegister);
3117  movsd(FieldOperand(elements, index, times_8,
3118  FixedDoubleArray::kHeaderSize - elements_offset),
3119  xmm_scratch);
3120  bind(&done);
3121 }
3122 
3123 
3124 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
3125  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3126 }
3127 
3128 
3129 void MacroAssembler::CheckMap(Register obj,
3130  Handle<Map> map,
3131  Label* fail,
3132  SmiCheckType smi_check_type) {
3133  if (smi_check_type == DO_SMI_CHECK) {
3134  JumpIfSmi(obj, fail);
3135  }
3136 
3137  CompareMap(obj, map);
3138  j(not_equal, fail);
3139 }
3140 
3141 
3142 void MacroAssembler::ClampUint8(Register reg) {
3143  Label done;
3144  testl(reg, Immediate(0xFFFFFF00));
3145  j(zero, &done, Label::kNear);
3146  setcc(negative, reg); // 1 if negative, 0 if positive.
3147  decb(reg); // 0 if negative, 255 if positive.
3148  bind(&done);
3149 }
3150 
3151 
3152 void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3153  XMMRegister temp_xmm_reg,
3154  Register result_reg) {
3155  Label done;
3156  Label conv_failure;
3157  xorps(temp_xmm_reg, temp_xmm_reg);
3158  cvtsd2si(result_reg, input_reg);
3159  testl(result_reg, Immediate(0xFFFFFF00));
3160  j(zero, &done, Label::kNear);
3161  cmpl(result_reg, Immediate(1));
3162  j(overflow, &conv_failure, Label::kNear);
3163  movl(result_reg, Immediate(0));
3164  setcc(sign, result_reg);
3165  subl(result_reg, Immediate(1));
3166  andl(result_reg, Immediate(255));
3167  jmp(&done, Label::kNear);
3168  bind(&conv_failure);
3169  Set(result_reg, 0);
3170  ucomisd(input_reg, temp_xmm_reg);
3171  j(below, &done, Label::kNear);
3172  Set(result_reg, 255);
3173  bind(&done);
3174 }
3175 
3176 
3177 void MacroAssembler::LoadUint32(XMMRegister dst,
3178  Register src,
3179  XMMRegister scratch) {
3180  if (FLAG_debug_code) {
3181  cmpq(src, Immediate(0xffffffff));
3182  Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3183  }
3184  cvtqsi2sd(dst, src);
3185 }
3186 
3187 
3188 void MacroAssembler::SlowTruncateToI(Register result_reg,
3189  Register input_reg,
3190  int offset) {
3191  DoubleToIStub stub(input_reg, result_reg, offset, true);
3192  call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
3193 }
3194 
3195 
3196 void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3197  Register input_reg) {
3198  Label done;
3199  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3200  cvttsd2siq(result_reg, xmm0);
3201  cmpq(result_reg, Immediate(1));
3202  j(no_overflow, &done, Label::kNear);
3203 
3204  // Slow case.
3205  if (input_reg.is(result_reg)) {
3206  subp(rsp, Immediate(kDoubleSize));
3207  movsd(MemOperand(rsp, 0), xmm0);
3208  SlowTruncateToI(result_reg, rsp, 0);
3209  addp(rsp, Immediate(kDoubleSize));
3210  } else {
3211  SlowTruncateToI(result_reg, input_reg);
3212  }
3213 
3214  bind(&done);
3215 }
3216 
3217 
3218 void MacroAssembler::TruncateDoubleToI(Register result_reg,
3219  XMMRegister input_reg) {
3220  Label done;
3221  cvttsd2siq(result_reg, input_reg);
3222  cmpq(result_reg, Immediate(1));
3223  j(no_overflow, &done, Label::kNear);
3224 
3225  subp(rsp, Immediate(kDoubleSize));
3226  movsd(MemOperand(rsp, 0), input_reg);
3227  SlowTruncateToI(result_reg, rsp, 0);
3228  addp(rsp, Immediate(kDoubleSize));
3229 
3230  bind(&done);
3231 }
3232 
3233 
3234 void MacroAssembler::DoubleToI(Register result_reg,
3235  XMMRegister input_reg,
3236  XMMRegister scratch,
3237  MinusZeroMode minus_zero_mode,
3238  Label* conversion_failed,
3239  Label::Distance dst) {
3240  cvttsd2si(result_reg, input_reg);
3241  Cvtlsi2sd(xmm0, result_reg);
3242  ucomisd(xmm0, input_reg);
3243  j(not_equal, conversion_failed, dst);
3244  j(parity_even, conversion_failed, dst); // NaN.
3245  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3246  Label done;
3247  // The integer converted back is equal to the original. We
3248  // only have to test if we got -0 as an input.
3249  testl(result_reg, result_reg);
3250  j(not_zero, &done, Label::kNear);
3251  movmskpd(result_reg, input_reg);
3252  // Bit 0 contains the sign of the double in input_reg.
3253  // If input was positive, we are ok and return 0, otherwise
3254  // jump to conversion_failed.
3255  andl(result_reg, Immediate(1));
3256  j(not_zero, conversion_failed, dst);
3257  bind(&done);
3258  }
3259 }
3260 
3261 
3262 void MacroAssembler::TaggedToI(Register result_reg,
3263  Register input_reg,
3264  XMMRegister temp,
3265  MinusZeroMode minus_zero_mode,
3266  Label* lost_precision,
3267  Label::Distance dst) {
3268  Label done;
3269  ASSERT(!temp.is(xmm0));
3270 
3271  // Heap number map check.
3272  CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3273  Heap::kHeapNumberMapRootIndex);
3274  j(not_equal, lost_precision, dst);
3275 
3276  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3277  cvttsd2si(result_reg, xmm0);
3278  Cvtlsi2sd(temp, result_reg);
3279  ucomisd(xmm0, temp);
3280  RecordComment("Deferred TaggedToI: lost precision");
3281  j(not_equal, lost_precision, dst);
3282  RecordComment("Deferred TaggedToI: NaN");
3283  j(parity_even, lost_precision, dst); // NaN.
3284  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3285  testl(result_reg, result_reg);
3286  j(not_zero, &done, Label::kNear);
3287  movmskpd(result_reg, xmm0);
3288  andl(result_reg, Immediate(1));
3289  j(not_zero, lost_precision, dst);
3290  }
3291  bind(&done);
3292 }
3293 
3294 
3295 void MacroAssembler::Throw(BailoutReason reason) {
3296 #ifdef DEBUG
3297  const char* msg = GetBailoutReason(reason);
3298  if (msg != NULL) {
3299  RecordComment("Throw message: ");
3300  RecordComment(msg);
3301  }
3302 #endif
3303 
3304  Push(rax);
3305  Push(Smi::FromInt(reason));
3306  if (!has_frame_) {
3307  // We don't actually want to generate a pile of code for this, so just
3308  // claim there is a stack frame, without generating one.
3309  FrameScope scope(this, StackFrame::NONE);
3310  CallRuntime(Runtime::kHiddenThrowMessage, 1);
3311  } else {
3312  CallRuntime(Runtime::kHiddenThrowMessage, 1);
3313  }
3314  // Control will not return here.
3315  int3();
3316 }
3317 
3318 
3319 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
3320  Label L;
3321  j(NegateCondition(cc), &L);
3322  Throw(reason);
3323  // will not return here
3324  bind(&L);
3325 }
3326 
3327 
3328 void MacroAssembler::LoadInstanceDescriptors(Register map,
3329  Register descriptors) {
3330  movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3331 }
3332 
3333 
3334 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3335  movp(dst, FieldOperand(map, Map::kBitField3Offset));
3336  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3337 }
3338 
3339 
3340 void MacroAssembler::EnumLength(Register dst, Register map) {
3341  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3342  movp(dst, FieldOperand(map, Map::kBitField3Offset));
3343  Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
3344  andp(dst, kScratchRegister);
3345 }
3346 
3347 
3348 void MacroAssembler::DispatchMap(Register obj,
3349  Register unused,
3350  Handle<Map> map,
3351  Handle<Code> success,
3352  SmiCheckType smi_check_type) {
3353  Label fail;
3354  if (smi_check_type == DO_SMI_CHECK) {
3355  JumpIfSmi(obj, &fail);
3356  }
3357  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3358  j(equal, success, RelocInfo::CODE_TARGET);
3359 
3360  bind(&fail);
3361 }
3362 
3363 
3364 void MacroAssembler::AssertNumber(Register object) {
3365  if (emit_debug_code()) {
3366  Label ok;
3367  Condition is_smi = CheckSmi(object);
3368  j(is_smi, &ok, Label::kNear);
3369  Cmp(FieldOperand(object, HeapObject::kMapOffset),
3370  isolate()->factory()->heap_number_map());
3371  Check(equal, kOperandIsNotANumber);
3372  bind(&ok);
3373  }
3374 }
3375 
3376 
3377 void MacroAssembler::AssertNotSmi(Register object) {
3378  if (emit_debug_code()) {
3379  Condition is_smi = CheckSmi(object);
3380  Check(NegateCondition(is_smi), kOperandIsASmi);
3381  }
3382 }
3383 
3384 
3385 void MacroAssembler::AssertSmi(Register object) {
3386  if (emit_debug_code()) {
3387  Condition is_smi = CheckSmi(object);
3388  Check(is_smi, kOperandIsNotASmi);
3389  }
3390 }
3391 
3392 
3393 void MacroAssembler::AssertSmi(const Operand& object) {
3394  if (emit_debug_code()) {
3395  Condition is_smi = CheckSmi(object);
3396  Check(is_smi, kOperandIsNotASmi);
3397  }
3398 }
3399 
3400 
3401 void MacroAssembler::AssertZeroExtended(Register int32_register) {
3402  if (emit_debug_code()) {
3403  ASSERT(!int32_register.is(kScratchRegister));
3404  movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
3405  cmpq(kScratchRegister, int32_register);
3406  Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3407  }
3408 }
3409 
3410 
3411 void MacroAssembler::AssertString(Register object) {
3412  if (emit_debug_code()) {
3413  testb(object, Immediate(kSmiTagMask));
3414  Check(not_equal, kOperandIsASmiAndNotAString);
3415  Push(object);
3416  movp(object, FieldOperand(object, HeapObject::kMapOffset));
3417  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3418  Pop(object);
3419  Check(below, kOperandIsNotAString);
3420  }
3421 }
3422 
3423 
3424 void MacroAssembler::AssertName(Register object) {
3425  if (emit_debug_code()) {
3426  testb(object, Immediate(kSmiTagMask));
3427  Check(not_equal, kOperandIsASmiAndNotAName);
3428  Push(object);
3429  movp(object, FieldOperand(object, HeapObject::kMapOffset));
3430  CmpInstanceType(object, LAST_NAME_TYPE);
3431  Pop(object);
3432  Check(below_equal, kOperandIsNotAName);
3433  }
3434 }
3435 
3436 
3437 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
3438  if (emit_debug_code()) {
3439  Label done_checking;
3440  AssertNotSmi(object);
3441  Cmp(object, isolate()->factory()->undefined_value());
3442  j(equal, &done_checking);
3443  Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
3444  Assert(equal, kExpectedUndefinedOrCell);
3445  bind(&done_checking);
3446  }
3447 }
3448 
3449 
3450 void MacroAssembler::AssertRootValue(Register src,
3451  Heap::RootListIndex root_value_index,
3452  BailoutReason reason) {
3453  if (emit_debug_code()) {
3454  ASSERT(!src.is(kScratchRegister));
3455  LoadRoot(kScratchRegister, root_value_index);
3456  cmpp(src, kScratchRegister);
3457  Check(equal, reason);
3458  }
3459 }
3460 
3461 
3462 
3463 Condition MacroAssembler::IsObjectStringType(Register heap_object,
3464  Register map,
3465  Register instance_type) {
3466  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3467  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3469  testb(instance_type, Immediate(kIsNotStringMask));
3470  return zero;
3471 }
3472 
3473 
3474 Condition MacroAssembler::IsObjectNameType(Register heap_object,
3475  Register map,
3476  Register instance_type) {
3477  movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3478  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3479  cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3480  return below_equal;
3481 }
3482 
3483 
3484 void MacroAssembler::TryGetFunctionPrototype(Register function,
3485  Register result,
3486  Label* miss,
3487  bool miss_on_bound_function) {
3488  // Check that the receiver isn't a smi.
3489  testl(function, Immediate(kSmiTagMask));
3490  j(zero, miss);
3491 
3492  // Check that the function really is a function.
3493  CmpObjectType(function, JS_FUNCTION_TYPE, result);
3494  j(not_equal, miss);
3495 
3496  if (miss_on_bound_function) {
3497  movp(kScratchRegister,
3498  FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3499  // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3500  // field).
3502  SharedFunctionInfo::kCompilerHintsOffset),
3503  SharedFunctionInfo::kBoundFunction);
3504  j(not_zero, miss);
3505  }
3506 
3507  // Make sure that the function has an instance prototype.
3508  Label non_instance;
3509  testb(FieldOperand(result, Map::kBitFieldOffset),
3510  Immediate(1 << Map::kHasNonInstancePrototype));
3511  j(not_zero, &non_instance, Label::kNear);
3512 
3513  // Get the prototype or initial map from the function.
3514  movp(result,
3515  FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3516 
3517  // If the prototype or initial map is the hole, don't return it and
3518  // simply miss the cache instead. This will allow us to allocate a
3519  // prototype object on-demand in the runtime system.
3520  CompareRoot(result, Heap::kTheHoleValueRootIndex);
3521  j(equal, miss);
3522 
3523  // If the function does not have an initial map, we're done.
3524  Label done;
3525  CmpObjectType(result, MAP_TYPE, kScratchRegister);
3526  j(not_equal, &done, Label::kNear);
3527 
3528  // Get the prototype from the initial map.
3529  movp(result, FieldOperand(result, Map::kPrototypeOffset));
3530  jmp(&done, Label::kNear);
3531 
3532  // Non-instance prototype: Fetch prototype from constructor field
3533  // in initial map.
3534  bind(&non_instance);
3535  movp(result, FieldOperand(result, Map::kConstructorOffset));
3536 
3537  // All done.
3538  bind(&done);
3539 }
3540 
3541 
3542 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3543  if (FLAG_native_code_counters && counter->Enabled()) {
3544  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3545  movl(counter_operand, Immediate(value));
3546  }
3547 }
3548 
3549 
3550 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3551  ASSERT(value > 0);
3552  if (FLAG_native_code_counters && counter->Enabled()) {
3553  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3554  if (value == 1) {
3555  incl(counter_operand);
3556  } else {
3557  addl(counter_operand, Immediate(value));
3558  }
3559  }
3560 }
3561 
3562 
3563 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3564  ASSERT(value > 0);
3565  if (FLAG_native_code_counters && counter->Enabled()) {
3566  Operand counter_operand = ExternalOperand(ExternalReference(counter));
3567  if (value == 1) {
3568  decl(counter_operand);
3569  } else {
3570  subl(counter_operand, Immediate(value));
3571  }
3572  }
3573 }
3574 
3575 
3576 #ifdef ENABLE_DEBUGGER_SUPPORT
3577 void MacroAssembler::DebugBreak() {
3578  Set(rax, 0); // No arguments.
3579  LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3580  CEntryStub ces(1);
3581  ASSERT(AllowThisStubCall(&ces));
3582  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
3583 }
3584 #endif // ENABLE_DEBUGGER_SUPPORT
3585 
3586 
3587 void MacroAssembler::InvokeCode(Register code,
3588  const ParameterCount& expected,
3589  const ParameterCount& actual,
3590  InvokeFlag flag,
3591  const CallWrapper& call_wrapper) {
3592  // You can't call a function without a valid frame.
3593  ASSERT(flag == JUMP_FUNCTION || has_frame());
3594 
3595  Label done;
3596  bool definitely_mismatches = false;
3597  InvokePrologue(expected,
3598  actual,
3599  Handle<Code>::null(),
3600  code,
3601  &done,
3602  &definitely_mismatches,
3603  flag,
3604  Label::kNear,
3605  call_wrapper);
3606  if (!definitely_mismatches) {
3607  if (flag == CALL_FUNCTION) {
3608  call_wrapper.BeforeCall(CallSize(code));
3609  call(code);
3610  call_wrapper.AfterCall();
3611  } else {
3612  ASSERT(flag == JUMP_FUNCTION);
3613  jmp(code);
3614  }
3615  bind(&done);
3616  }
3617 }
3618 
3619 
3620 void MacroAssembler::InvokeFunction(Register function,
3621  const ParameterCount& actual,
3622  InvokeFlag flag,
3623  const CallWrapper& call_wrapper) {
3624  // You can't call a function without a valid frame.
3625  ASSERT(flag == JUMP_FUNCTION || has_frame());
3626 
3627  ASSERT(function.is(rdi));
3628  movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3629  movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3630  movsxlq(rbx,
3631  FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3632  // Advances rdx to the end of the Code object header, to the start of
3633  // the executable code.
3634  movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3635 
3636  ParameterCount expected(rbx);
3637  InvokeCode(rdx, expected, actual, flag, call_wrapper);
3638 }
3639 
3640 
3641 void MacroAssembler::InvokeFunction(Register function,
3642  const ParameterCount& expected,
3643  const ParameterCount& actual,
3644  InvokeFlag flag,
3645  const CallWrapper& call_wrapper) {
3646  // You can't call a function without a valid frame.
3647  ASSERT(flag == JUMP_FUNCTION || has_frame());
3648 
3649  ASSERT(function.is(rdi));
3650  movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
3651  // Advances rdx to the end of the Code object header, to the start of
3652  // the executable code.
3653  movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3654 
3655  InvokeCode(rdx, expected, actual, flag, call_wrapper);
3656 }
3657 
3658 
3659 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3660  const ParameterCount& expected,
3661  const ParameterCount& actual,
3662  InvokeFlag flag,
3663  const CallWrapper& call_wrapper) {
3664  Move(rdi, function);
3665  InvokeFunction(rdi, expected, actual, flag, call_wrapper);
3666 }
3667 
3668 
3669 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3670  const ParameterCount& actual,
3671  Handle<Code> code_constant,
3672  Register code_register,
3673  Label* done,
3674  bool* definitely_mismatches,
3675  InvokeFlag flag,
3676  Label::Distance near_jump,
3677  const CallWrapper& call_wrapper) {
3678  bool definitely_matches = false;
3679  *definitely_mismatches = false;
3680  Label invoke;
3681  if (expected.is_immediate()) {
3682  ASSERT(actual.is_immediate());
3683  if (expected.immediate() == actual.immediate()) {
3684  definitely_matches = true;
3685  } else {
3686  Set(rax, actual.immediate());
3687  if (expected.immediate() ==
3688  SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3689  // Don't worry about adapting arguments for built-ins that
3690  // don't want that done. Skip adaption code by making it look
3691  // like we have a match between expected and actual number of
3692  // arguments.
3693  definitely_matches = true;
3694  } else {
3695  *definitely_mismatches = true;
3696  Set(rbx, expected.immediate());
3697  }
3698  }
3699  } else {
3700  if (actual.is_immediate()) {
3701  // Expected is in register, actual is immediate. This is the
3702  // case when we invoke function values without going through the
3703  // IC mechanism.
3704  cmpp(expected.reg(), Immediate(actual.immediate()));
3705  j(equal, &invoke, Label::kNear);
3706  ASSERT(expected.reg().is(rbx));
3707  Set(rax, actual.immediate());
3708  } else if (!expected.reg().is(actual.reg())) {
3709  // Both expected and actual are in (different) registers. This
3710  // is the case when we invoke functions using call and apply.
3711  cmpp(expected.reg(), actual.reg());
3712  j(equal, &invoke, Label::kNear);
3713  ASSERT(actual.reg().is(rax));
3714  ASSERT(expected.reg().is(rbx));
3715  }
3716  }
3717 
3718  if (!definitely_matches) {
3719  Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3720  if (!code_constant.is_null()) {
3721  Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3722  addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3723  } else if (!code_register.is(rdx)) {
3724  movp(rdx, code_register);
3725  }
3726 
3727  if (flag == CALL_FUNCTION) {
3728  call_wrapper.BeforeCall(CallSize(adaptor));
3729  Call(adaptor, RelocInfo::CODE_TARGET);
3730  call_wrapper.AfterCall();
3731  if (!*definitely_mismatches) {
3732  jmp(done, near_jump);
3733  }
3734  } else {
3735  Jump(adaptor, RelocInfo::CODE_TARGET);
3736  }
3737  bind(&invoke);
3738  }
3739 }
3740 
3741 
3742 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
3743  if (frame_mode == BUILD_STUB_FRAME) {
3744  pushq(rbp); // Caller's frame pointer.
3745  movp(rbp, rsp);
3746  Push(rsi); // Callee's context.
3747  Push(Smi::FromInt(StackFrame::STUB));
3748  } else {
3749  PredictableCodeSizeScope predictible_code_size_scope(this,
3750  kNoCodeAgeSequenceLength);
3751  if (isolate()->IsCodePreAgingActive()) {
3752  // Pre-age the code.
3753  Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3754  RelocInfo::CODE_AGE_SEQUENCE);
3755  Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3756  } else {
3757  pushq(rbp); // Caller's frame pointer.
3758  movp(rbp, rsp);
3759  Push(rsi); // Callee's context.
3760  Push(rdi); // Callee's JS function.
3761  }
3762  }
3763 }
3764 
3765 
3766 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3767  pushq(rbp);
3768  movp(rbp, rsp);
3769  Push(rsi); // Context.
3770  Push(Smi::FromInt(type));
3771  Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3772  Push(kScratchRegister);
3773  if (emit_debug_code()) {
3774  Move(kScratchRegister,
3775  isolate()->factory()->undefined_value(),
3776  RelocInfo::EMBEDDED_OBJECT);
3777  cmpp(Operand(rsp, 0), kScratchRegister);
3778  Check(not_equal, kCodeObjectNotProperlyPatched);
3779  }
3780 }
3781 
3782 
3783 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3784  if (emit_debug_code()) {
3785  Move(kScratchRegister, Smi::FromInt(type));
3786  cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3787  Check(equal, kStackFrameTypesMustMatch);
3788  }
3789  movp(rsp, rbp);
3790  popq(rbp);
3791 }
3792 
3793 
3794 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3795  // Set up the frame structure on the stack.
3796  // All constants are relative to the frame pointer of the exit frame.
3797  ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
3799  ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3800  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3801  pushq(rbp);
3802  movp(rbp, rsp);
3803 
3804  // Reserve room for entry stack pointer and push the code object.
3805  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3806  Push(Immediate(0)); // Saved entry sp, patched before call.
3807  Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3808  Push(kScratchRegister); // Accessed from EditFrame::code_slot.
3809 
3810  // Save the frame pointer and the context in top.
3811  if (save_rax) {
3812  movp(r14, rax); // Backup rax in callee-save register.
3813  }
3814 
3815  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3816  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3817 }
3818 
3819 
3820 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3821  bool save_doubles) {
3822 #ifdef _WIN64
3823  const int kShadowSpace = 4;
3824  arg_stack_space += kShadowSpace;
3825 #endif
3826  // Optionally save all XMM registers.
3827  if (save_doubles) {
3828  int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3829  arg_stack_space * kRegisterSize;
3830  subp(rsp, Immediate(space));
3831  int offset = -2 * kPointerSize;
3832  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3833  XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3834  movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3835  }
3836  } else if (arg_stack_space > 0) {
3837  subp(rsp, Immediate(arg_stack_space * kRegisterSize));
3838  }
3839 
3840  // Get the required frame alignment for the OS.
3841  const int kFrameAlignment = OS::ActivationFrameAlignment();
3842  if (kFrameAlignment > 0) {
3843  ASSERT(IsPowerOf2(kFrameAlignment));
3844  ASSERT(is_int8(kFrameAlignment));
3845  andp(rsp, Immediate(-kFrameAlignment));
3846  }
3847 
3848  // Patch the saved entry sp.
3849  movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3850 }
3851 
3852 
3853 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3854  EnterExitFramePrologue(true);
3855 
3856  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3857  // so it must be retained across the C-call.
3858  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3859  leap(r15, Operand(rbp, r14, times_pointer_size, offset));
3860 
3861  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3862 }
3863 
3864 
3865 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3866  EnterExitFramePrologue(false);
3867  EnterExitFrameEpilogue(arg_stack_space, false);
3868 }
3869 
3870 
3871 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3872  // Registers:
3873  // r15 : argv
3874  if (save_doubles) {
3875  int offset = -2 * kPointerSize;
3876  for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3877  XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3878  movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3879  }
3880  }
3881  // Get the return address from the stack and restore the frame pointer.
3882  movp(rcx, Operand(rbp, kFPOnStackSize));
3883  movp(rbp, Operand(rbp, 0 * kPointerSize));
3884 
3885  // Drop everything up to and including the arguments and the receiver
3886  // from the caller stack.
3887  leap(rsp, Operand(r15, 1 * kPointerSize));
3888 
3889  PushReturnAddressFrom(rcx);
3890 
3891  LeaveExitFrameEpilogue(true);
3892 }
3893 
3894 
3895 void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3896  movp(rsp, rbp);
3897  popq(rbp);
3898 
3899  LeaveExitFrameEpilogue(restore_context);
3900 }
3901 
3902 
3903 void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3904  // Restore current context from top and clear it in debug mode.
3905  ExternalReference context_address(Isolate::kContextAddress, isolate());
3906  Operand context_operand = ExternalOperand(context_address);
3907  if (restore_context) {
3908  movp(rsi, context_operand);
3909  }
3910 #ifdef DEBUG
3911  movp(context_operand, Immediate(0));
3912 #endif
3913 
3914  // Clear the top frame.
3915  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3916  isolate());
3917  Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3918  movp(c_entry_fp_operand, Immediate(0));
3919 }
3920 
3921 
3922 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3923  Register scratch,
3924  Label* miss) {
3925  Label same_contexts;
3926 
3927  ASSERT(!holder_reg.is(scratch));
3928  ASSERT(!scratch.is(kScratchRegister));
3929  // Load current lexical context from the stack frame.
3930  movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3931 
3932  // When generating debug code, make sure the lexical context is set.
3933  if (emit_debug_code()) {
3934  cmpp(scratch, Immediate(0));
3935  Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
3936  }
3937  // Load the native context of the current context.
3938  int offset =
3939  Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3940  movp(scratch, FieldOperand(scratch, offset));
3941  movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3942 
3943  // Check the context is a native context.
3944  if (emit_debug_code()) {
3945  Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3946  isolate()->factory()->native_context_map());
3947  Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3948  }
3949 
3950  // Check if both contexts are the same.
3951  cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3952  j(equal, &same_contexts);
3953 
3954  // Compare security tokens.
3955  // Check that the security token in the calling global object is
3956  // compatible with the security token in the receiving global
3957  // object.
3958 
3959  // Check the context is a native context.
3960  if (emit_debug_code()) {
3961  // Preserve original value of holder_reg.
3962  Push(holder_reg);
3963  movp(holder_reg,
3964  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3965  CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3966  Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
3967 
3968  // Read the first word and compare to native_context_map(),
3969  movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3970  CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3971  Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3972  Pop(holder_reg);
3973  }
3974 
3975  movp(kScratchRegister,
3976  FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3977  int token_offset =
3978  Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3979  movp(scratch, FieldOperand(scratch, token_offset));
3980  cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
3981  j(not_equal, miss);
3982 
3983  bind(&same_contexts);
3984 }
3985 
3986 
3987 // Compute the hash code from the untagged key. This must be kept in sync with
3988 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
3989 // code-stub-hydrogen.cc
3990 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3991  // First of all we assign the hash seed to scratch.
3992  LoadRoot(scratch, Heap::kHashSeedRootIndex);
3993  SmiToInteger32(scratch, scratch);
3994 
3995  // Xor original key with a seed.
3996  xorl(r0, scratch);
3997 
3998  // Compute the hash code from the untagged key. This must be kept in sync
3999  // with ComputeIntegerHash in utils.h.
4000  //
4001  // hash = ~hash + (hash << 15);
4002  movl(scratch, r0);
4003  notl(r0);
4004  shll(scratch, Immediate(15));
4005  addl(r0, scratch);
4006  // hash = hash ^ (hash >> 12);
4007  movl(scratch, r0);
4008  shrl(scratch, Immediate(12));
4009  xorl(r0, scratch);
4010  // hash = hash + (hash << 2);
4011  leal(r0, Operand(r0, r0, times_4, 0));
4012  // hash = hash ^ (hash >> 4);
4013  movl(scratch, r0);
4014  shrl(scratch, Immediate(4));
4015  xorl(r0, scratch);
4016  // hash = hash * 2057;
4017  imull(r0, r0, Immediate(2057));
4018  // hash = hash ^ (hash >> 16);
4019  movl(scratch, r0);
4020  shrl(scratch, Immediate(16));
4021  xorl(r0, scratch);
4022 }
4023 
4024 
4025 
4026 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4027  Register elements,
4028  Register key,
4029  Register r0,
4030  Register r1,
4031  Register r2,
4032  Register result) {
4033  // Register use:
4034  //
4035  // elements - holds the slow-case elements of the receiver on entry.
4036  // Unchanged unless 'result' is the same register.
4037  //
4038  // key - holds the smi key on entry.
4039  // Unchanged unless 'result' is the same register.
4040  //
4041  // Scratch registers:
4042  //
4043  // r0 - holds the untagged key on entry and holds the hash once computed.
4044  //
4045  // r1 - used to hold the capacity mask of the dictionary
4046  //
4047  // r2 - used for the index into the dictionary.
4048  //
4049  // result - holds the result on exit if the load succeeded.
4050  // Allowed to be the same as 'key' or 'result'.
4051  // Unchanged on bailout so 'key' or 'result' can be used
4052  // in further computation.
4053 
4054  Label done;
4055 
4056  GetNumberHash(r0, r1);
4057 
4058  // Compute capacity mask.
4059  SmiToInteger32(r1, FieldOperand(elements,
4060  SeededNumberDictionary::kCapacityOffset));
4061  decl(r1);
4062 
4063  // Generate an unrolled loop that performs a few probes before giving up.
4064  for (int i = 0; i < kNumberDictionaryProbes; i++) {
4065  // Use r2 for index calculations and keep the hash intact in r0.
4066  movp(r2, r0);
4067  // Compute the masked index: (hash + i + i * i) & mask.
4068  if (i > 0) {
4069  addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
4070  }
4071  andp(r2, r1);
4072 
4073  // Scale the index by multiplying by the entry size.
4074  ASSERT(SeededNumberDictionary::kEntrySize == 3);
4075  leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
4076 
4077  // Check if the key matches.
4078  cmpp(key, FieldOperand(elements,
4079  r2,
4081  SeededNumberDictionary::kElementsStartOffset));
4082  if (i != (kNumberDictionaryProbes - 1)) {
4083  j(equal, &done);
4084  } else {
4085  j(not_equal, miss);
4086  }
4087  }
4088 
4089  bind(&done);
4090  // Check that the value is a normal propety.
4091  const int kDetailsOffset =
4092  SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4093  ASSERT_EQ(NORMAL, 0);
4094  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4095  Smi::FromInt(PropertyDetails::TypeField::kMask));
4096  j(not_zero, miss);
4097 
4098  // Get the value at the masked, scaled index.
4099  const int kValueOffset =
4100  SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4101  movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4102 }
4103 
4104 
4105 void MacroAssembler::LoadAllocationTopHelper(Register result,
4106  Register scratch,
4108  ExternalReference allocation_top =
4109  AllocationUtils::GetAllocationTopReference(isolate(), flags);
4110 
4111  // Just return if allocation top is already known.
4112  if ((flags & RESULT_CONTAINS_TOP) != 0) {
4113  // No use of scratch if allocation top is provided.
4114  ASSERT(!scratch.is_valid());
4115 #ifdef DEBUG
4116  // Assert that result actually contains top on entry.
4117  Operand top_operand = ExternalOperand(allocation_top);
4118  cmpp(result, top_operand);
4119  Check(equal, kUnexpectedAllocationTop);
4120 #endif
4121  return;
4122  }
4123 
4124  // Move address of new object to result. Use scratch register if available,
4125  // and keep address in scratch until call to UpdateAllocationTopHelper.
4126  if (scratch.is_valid()) {
4127  LoadAddress(scratch, allocation_top);
4128  movp(result, Operand(scratch, 0));
4129  } else {
4130  Load(result, allocation_top);
4131  }
4132 }
4133 
4134 
4135 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4136  Register scratch,
4137  AllocationFlags flags) {
4138  if (emit_debug_code()) {
4139  testp(result_end, Immediate(kObjectAlignmentMask));
4140  Check(zero, kUnalignedAllocationInNewSpace);
4141  }
4142 
4143  ExternalReference allocation_top =
4144  AllocationUtils::GetAllocationTopReference(isolate(), flags);
4145 
4146  // Update new top.
4147  if (scratch.is_valid()) {
4148  // Scratch already contains address of allocation top.
4149  movp(Operand(scratch, 0), result_end);
4150  } else {
4151  Store(allocation_top, result_end);
4152  }
4153 }
4154 
4155 
4156 void MacroAssembler::Allocate(int object_size,
4157  Register result,
4158  Register result_end,
4159  Register scratch,
4160  Label* gc_required,
4161  AllocationFlags flags) {
4162  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4163  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
4164  if (!FLAG_inline_new) {
4165  if (emit_debug_code()) {
4166  // Trash the registers to simulate an allocation failure.
4167  movl(result, Immediate(0x7091));
4168  if (result_end.is_valid()) {
4169  movl(result_end, Immediate(0x7191));
4170  }
4171  if (scratch.is_valid()) {
4172  movl(scratch, Immediate(0x7291));
4173  }
4174  }
4175  jmp(gc_required);
4176  return;
4177  }
4178  ASSERT(!result.is(result_end));
4179 
4180  // Load address of new object into result.
4181  LoadAllocationTopHelper(result, scratch, flags);
4182 
4183  // Align the next allocation. Storing the filler map without checking top is
4184  // safe in new-space because the limit of the heap is aligned there.
4185  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4186  testq(result, Immediate(kDoubleAlignmentMask));
4187  Check(zero, kAllocationIsNotDoubleAligned);
4188  }
4189 
4190  // Calculate new top and bail out if new space is exhausted.
4191  ExternalReference allocation_limit =
4192  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4193 
4194  Register top_reg = result_end.is_valid() ? result_end : result;
4195 
4196  if (!top_reg.is(result)) {
4197  movp(top_reg, result);
4198  }
4199  addp(top_reg, Immediate(object_size));
4200  j(carry, gc_required);
4201  Operand limit_operand = ExternalOperand(allocation_limit);
4202  cmpp(top_reg, limit_operand);
4203  j(above, gc_required);
4204 
4205  // Update allocation top.
4206  UpdateAllocationTopHelper(top_reg, scratch, flags);
4207 
4208  bool tag_result = (flags & TAG_OBJECT) != 0;
4209  if (top_reg.is(result)) {
4210  if (tag_result) {
4211  subp(result, Immediate(object_size - kHeapObjectTag));
4212  } else {
4213  subp(result, Immediate(object_size));
4214  }
4215  } else if (tag_result) {
4216  // Tag the result if requested.
4217  ASSERT(kHeapObjectTag == 1);
4218  incp(result);
4219  }
4220 }
4221 
4222 
4223 void MacroAssembler::Allocate(int header_size,
4224  ScaleFactor element_size,
4225  Register element_count,
4226  Register result,
4227  Register result_end,
4228  Register scratch,
4229  Label* gc_required,
4230  AllocationFlags flags) {
4231  ASSERT((flags & SIZE_IN_WORDS) == 0);
4232  leap(result_end, Operand(element_count, element_size, header_size));
4233  Allocate(result_end, result, result_end, scratch, gc_required, flags);
4234 }
4235 
4236 
4237 void MacroAssembler::Allocate(Register object_size,
4238  Register result,
4239  Register result_end,
4240  Register scratch,
4241  Label* gc_required,
4242  AllocationFlags flags) {
4243  ASSERT((flags & SIZE_IN_WORDS) == 0);
4244  if (!FLAG_inline_new) {
4245  if (emit_debug_code()) {
4246  // Trash the registers to simulate an allocation failure.
4247  movl(result, Immediate(0x7091));
4248  movl(result_end, Immediate(0x7191));
4249  if (scratch.is_valid()) {
4250  movl(scratch, Immediate(0x7291));
4251  }
4252  // object_size is left unchanged by this function.
4253  }
4254  jmp(gc_required);
4255  return;
4256  }
4257  ASSERT(!result.is(result_end));
4258 
4259  // Load address of new object into result.
4260  LoadAllocationTopHelper(result, scratch, flags);
4261 
4262  // Align the next allocation. Storing the filler map without checking top is
4263  // safe in new-space because the limit of the heap is aligned there.
4264  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4265  testq(result, Immediate(kDoubleAlignmentMask));
4266  Check(zero, kAllocationIsNotDoubleAligned);
4267  }
4268 
4269  // Calculate new top and bail out if new space is exhausted.
4270  ExternalReference allocation_limit =
4271  AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4272  if (!object_size.is(result_end)) {
4273  movp(result_end, object_size);
4274  }
4275  addp(result_end, result);
4276  j(carry, gc_required);
4277  Operand limit_operand = ExternalOperand(allocation_limit);
4278  cmpp(result_end, limit_operand);
4279  j(above, gc_required);
4280 
4281  // Update allocation top.
4282  UpdateAllocationTopHelper(result_end, scratch, flags);
4283 
4284  // Tag the result if requested.
4285  if ((flags & TAG_OBJECT) != 0) {
4286  addp(result, Immediate(kHeapObjectTag));
4287  }
4288 }
4289 
4290 
4291 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4292  ExternalReference new_space_allocation_top =
4293  ExternalReference::new_space_allocation_top_address(isolate());
4294 
4295  // Make sure the object has no tag before resetting top.
4296  andp(object, Immediate(~kHeapObjectTagMask));
4297  Operand top_operand = ExternalOperand(new_space_allocation_top);
4298 #ifdef DEBUG
4299  cmpp(object, top_operand);
4300  Check(below, kUndoAllocationOfNonAllocatedMemory);
4301 #endif
4302  movp(top_operand, object);
4303 }
4304 
4305 
4306 void MacroAssembler::AllocateHeapNumber(Register result,
4307  Register scratch,
4308  Label* gc_required) {
4309  // Allocate heap number in new space.
4310  Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4311 
4312  // Set the map.
4313  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
4314  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4315 }
4316 
4317 
4318 void MacroAssembler::AllocateTwoByteString(Register result,
4319  Register length,
4320  Register scratch1,
4321  Register scratch2,
4322  Register scratch3,
4323  Label* gc_required) {
4324  // Calculate the number of bytes needed for the characters in the string while
4325  // observing object alignment.
4326  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4328  ASSERT(kShortSize == 2);
4329  // scratch1 = length * 2 + kObjectAlignmentMask.
4330  leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4331  kHeaderAlignment));
4332  andp(scratch1, Immediate(~kObjectAlignmentMask));
4333  if (kHeaderAlignment > 0) {
4334  subp(scratch1, Immediate(kHeaderAlignment));
4335  }
4336 
4337  // Allocate two byte string in new space.
4338  Allocate(SeqTwoByteString::kHeaderSize,
4339  times_1,
4340  scratch1,
4341  result,
4342  scratch2,
4343  scratch3,
4344  gc_required,
4345  TAG_OBJECT);
4346 
4347  // Set the map, length and hash field.
4348  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4349  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4350  Integer32ToSmi(scratch1, length);
4351  movp(FieldOperand(result, String::kLengthOffset), scratch1);
4352  movp(FieldOperand(result, String::kHashFieldOffset),
4353  Immediate(String::kEmptyHashField));
4354 }
4355 
4356 
4357 void MacroAssembler::AllocateAsciiString(Register result,
4358  Register length,
4359  Register scratch1,
4360  Register scratch2,
4361  Register scratch3,
4362  Label* gc_required) {
4363  // Calculate the number of bytes needed for the characters in the string while
4364  // observing object alignment.
4365  const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4367  movl(scratch1, length);
4368  ASSERT(kCharSize == 1);
4369  addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4370  andp(scratch1, Immediate(~kObjectAlignmentMask));
4371  if (kHeaderAlignment > 0) {
4372  subp(scratch1, Immediate(kHeaderAlignment));
4373  }
4374 
4375  // Allocate ASCII string in new space.
4376  Allocate(SeqOneByteString::kHeaderSize,
4377  times_1,
4378  scratch1,
4379  result,
4380  scratch2,
4381  scratch3,
4382  gc_required,
4383  TAG_OBJECT);
4384 
4385  // Set the map, length and hash field.
4386  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4387  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4388  Integer32ToSmi(scratch1, length);
4389  movp(FieldOperand(result, String::kLengthOffset), scratch1);
4390  movp(FieldOperand(result, String::kHashFieldOffset),
4391  Immediate(String::kEmptyHashField));
4392 }
4393 
4394 
4395 void MacroAssembler::AllocateTwoByteConsString(Register result,
4396  Register scratch1,
4397  Register scratch2,
4398  Label* gc_required) {
4399  // Allocate heap number in new space.
4400  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4401  TAG_OBJECT);
4402 
4403  // Set the map. The other fields are left uninitialized.
4404  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4405  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4406 }
4407 
4408 
4409 void MacroAssembler::AllocateAsciiConsString(Register result,
4410  Register scratch1,
4411  Register scratch2,
4412  Label* gc_required) {
4413  Label allocate_new_space, install_map;
4414  AllocationFlags flags = TAG_OBJECT;
4415 
4416  ExternalReference high_promotion_mode = ExternalReference::
4417  new_space_high_promotion_mode_active_address(isolate());
4418 
4419  Load(scratch1, high_promotion_mode);
4420  testb(scratch1, Immediate(1));
4421  j(zero, &allocate_new_space);
4422  Allocate(ConsString::kSize,
4423  result,
4424  scratch1,
4425  scratch2,
4426  gc_required,
4427  static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
4428 
4429  jmp(&install_map);
4430 
4431  bind(&allocate_new_space);
4432  Allocate(ConsString::kSize,
4433  result,
4434  scratch1,
4435  scratch2,
4436  gc_required,
4437  flags);
4438 
4439  bind(&install_map);
4440 
4441  // Set the map. The other fields are left uninitialized.
4442  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4443  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4444 }
4445 
4446 
4447 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4448  Register scratch1,
4449  Register scratch2,
4450  Label* gc_required) {
4451  // Allocate heap number in new space.
4452  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4453  TAG_OBJECT);
4454 
4455  // Set the map. The other fields are left uninitialized.
4456  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4457  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4458 }
4459 
4460 
4461 void MacroAssembler::AllocateAsciiSlicedString(Register result,
4462  Register scratch1,
4463  Register scratch2,
4464  Label* gc_required) {
4465  // Allocate heap number in new space.
4466  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4467  TAG_OBJECT);
4468 
4469  // Set the map. The other fields are left uninitialized.
4470  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4471  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4472 }
4473 
4474 
4475 // Copy memory, byte-by-byte, from source to destination. Not optimized for
4476 // long or aligned copies. The contents of scratch and length are destroyed.
4477 // Destination is incremented by length, source, length and scratch are
4478 // clobbered.
4479 // A simpler loop is faster on small copies, but slower on large ones.
4480 // The cld() instruction must have been emitted, to set the direction flag(),
4481 // before calling this function.
4482 void MacroAssembler::CopyBytes(Register destination,
4483  Register source,
4484  Register length,
4485  int min_length,
4486  Register scratch) {
4487  ASSERT(min_length >= 0);
4488  if (emit_debug_code()) {
4489  cmpl(length, Immediate(min_length));
4490  Assert(greater_equal, kInvalidMinLength);
4491  }
4492  Label short_loop, len8, len16, len24, done, short_string;
4493 
4494  const int kLongStringLimit = 4 * kPointerSize;
4495  if (min_length <= kLongStringLimit) {
4496  cmpl(length, Immediate(kPointerSize));
4497  j(below, &short_string, Label::kNear);
4498  }
4499 
4500  ASSERT(source.is(rsi));
4501  ASSERT(destination.is(rdi));
4502  ASSERT(length.is(rcx));
4503 
4504  if (min_length <= kLongStringLimit) {
4505  cmpl(length, Immediate(2 * kPointerSize));
4506  j(below_equal, &len8, Label::kNear);
4507  cmpl(length, Immediate(3 * kPointerSize));
4508  j(below_equal, &len16, Label::kNear);
4509  cmpl(length, Immediate(4 * kPointerSize));
4510  j(below_equal, &len24, Label::kNear);
4511  }
4512 
4513  // Because source is 8-byte aligned in our uses of this function,
4514  // we keep source aligned for the rep movs operation by copying the odd bytes
4515  // at the end of the ranges.
4516  movp(scratch, length);
4517  shrl(length, Immediate(kPointerSizeLog2));
4518  repmovsp();
4519  // Move remaining bytes of length.
4520  andl(scratch, Immediate(kPointerSize - 1));
4521  movp(length, Operand(source, scratch, times_1, -kPointerSize));
4522  movp(Operand(destination, scratch, times_1, -kPointerSize), length);
4523  addp(destination, scratch);
4524 
4525  if (min_length <= kLongStringLimit) {
4526  jmp(&done, Label::kNear);
4527  bind(&len24);
4528  movp(scratch, Operand(source, 2 * kPointerSize));
4529  movp(Operand(destination, 2 * kPointerSize), scratch);
4530  bind(&len16);
4531  movp(scratch, Operand(source, kPointerSize));
4532  movp(Operand(destination, kPointerSize), scratch);
4533  bind(&len8);
4534  movp(scratch, Operand(source, 0));
4535  movp(Operand(destination, 0), scratch);
4536  // Move remaining bytes of length.
4537  movp(scratch, Operand(source, length, times_1, -kPointerSize));
4538  movp(Operand(destination, length, times_1, -kPointerSize), scratch);
4539  addp(destination, length);
4540  jmp(&done, Label::kNear);
4541 
4542  bind(&short_string);
4543  if (min_length == 0) {
4544  testl(length, length);
4545  j(zero, &done, Label::kNear);
4546  }
4547 
4548  bind(&short_loop);
4549  movb(scratch, Operand(source, 0));
4550  movb(Operand(destination, 0), scratch);
4551  incp(source);
4552  incp(destination);
4553  decl(length);
4554  j(not_zero, &short_loop);
4555  }
4556 
4557  bind(&done);
4558 }
4559 
4560 
4561 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4562  Register end_offset,
4563  Register filler) {
4564  Label loop, entry;
4565  jmp(&entry);
4566  bind(&loop);
4567  movp(Operand(start_offset, 0), filler);
4568  addp(start_offset, Immediate(kPointerSize));
4569  bind(&entry);
4570  cmpp(start_offset, end_offset);
4571  j(less, &loop);
4572 }
4573 
4574 
4575 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4576  if (context_chain_length > 0) {
4577  // Move up the chain of contexts to the context containing the slot.
4578  movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4579  for (int i = 1; i < context_chain_length; i++) {
4580  movp(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4581  }
4582  } else {
4583  // Slot is in the current function context. Move it into the
4584  // destination register in case we store into it (the write barrier
4585  // cannot be allowed to destroy the context in rsi).
4586  movp(dst, rsi);
4587  }
4588 
4589  // We should not have found a with context by walking the context
4590  // chain (i.e., the static scope chain and runtime context chain do
4591  // not agree). A variable occurring in such a scope should have
4592  // slot type LOOKUP and not CONTEXT.
4593  if (emit_debug_code()) {
4594  CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4595  Heap::kWithContextMapRootIndex);
4596  Check(not_equal, kVariableResolvedToWithContext);
4597  }
4598 }
4599 
4600 
4601 void MacroAssembler::LoadTransitionedArrayMapConditional(
4602  ElementsKind expected_kind,
4603  ElementsKind transitioned_kind,
4604  Register map_in_out,
4605  Register scratch,
4606  Label* no_map_match) {
4607  // Load the global or builtins object from the current context.
4608  movp(scratch,
4609  Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4610  movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4611 
4612  // Check that the function's map is the same as the expected cached map.
4613  movp(scratch, Operand(scratch,
4614  Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4615 
4616  int offset = expected_kind * kPointerSize +
4617  FixedArrayBase::kHeaderSize;
4618  cmpp(map_in_out, FieldOperand(scratch, offset));
4619  j(not_equal, no_map_match);
4620 
4621  // Use the transitioned cached map.
4622  offset = transitioned_kind * kPointerSize +
4623  FixedArrayBase::kHeaderSize;
4624  movp(map_in_out, FieldOperand(scratch, offset));
4625 }
4626 
4627 
4628 #ifdef _WIN64
4629 static const int kRegisterPassedArguments = 4;
4630 #else
4631 static const int kRegisterPassedArguments = 6;
4632 #endif
4633 
4634 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4635  // Load the global or builtins object from the current context.
4636  movp(function,
4637  Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4638  // Load the native context from the global or builtins object.
4639  movp(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4640  // Load the function from the native context.
4641  movp(function, Operand(function, Context::SlotOffset(index)));
4642 }
4643 
4644 
4645 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4646  Register map) {
4647  // Load the initial map. The global functions all have initial maps.
4648  movp(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4649  if (emit_debug_code()) {
4650  Label ok, fail;
4651  CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4652  jmp(&ok);
4653  bind(&fail);
4654  Abort(kGlobalFunctionsMustHaveInitialMap);
4655  bind(&ok);
4656  }
4657 }
4658 
4659 
4660 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4661  // On Windows 64 stack slots are reserved by the caller for all arguments
4662  // including the ones passed in registers, and space is always allocated for
4663  // the four register arguments even if the function takes fewer than four
4664  // arguments.
4665  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4666  // and the caller does not reserve stack slots for them.
4667  ASSERT(num_arguments >= 0);
4668 #ifdef _WIN64
4669  const int kMinimumStackSlots = kRegisterPassedArguments;
4670  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4671  return num_arguments;
4672 #else
4673  if (num_arguments < kRegisterPassedArguments) return 0;
4674  return num_arguments - kRegisterPassedArguments;
4675 #endif
4676 }
4677 
4678 
4679 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
4680  Register index,
4681  Register value,
4682  uint32_t encoding_mask) {
4683  Label is_object;
4684  JumpIfNotSmi(string, &is_object);
4685  Abort(kNonObject);
4686  bind(&is_object);
4687 
4688  Push(value);
4689  movp(value, FieldOperand(string, HeapObject::kMapOffset));
4690  movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
4691 
4692  andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
4693  cmpp(value, Immediate(encoding_mask));
4694  Pop(value);
4695  Check(equal, kUnexpectedStringType);
4696 
4697  // The index is assumed to be untagged coming in, tag it to compare with the
4698  // string length without using a temp register, it is restored at the end of
4699  // this function.
4700  Integer32ToSmi(index, index);
4701  SmiCompare(index, FieldOperand(string, String::kLengthOffset));
4702  Check(less, kIndexIsTooLarge);
4703 
4704  SmiCompare(index, Smi::FromInt(0));
4705  Check(greater_equal, kIndexIsNegative);
4706 
4707  // Restore the index
4708  SmiToInteger32(index, index);
4709 }
4710 
4711 
4712 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4713  int frame_alignment = OS::ActivationFrameAlignment();
4714  ASSERT(frame_alignment != 0);
4715  ASSERT(num_arguments >= 0);
4716 
4717  // Make stack end at alignment and allocate space for arguments and old rsp.
4718  movp(kScratchRegister, rsp);
4719  ASSERT(IsPowerOf2(frame_alignment));
4720  int argument_slots_on_stack =
4721  ArgumentStackSlotsForCFunctionCall(num_arguments);
4722  subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
4723  andp(rsp, Immediate(-frame_alignment));
4724  movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
4725 }
4726 
4727 
4728 void MacroAssembler::CallCFunction(ExternalReference function,
4729  int num_arguments) {
4730  LoadAddress(rax, function);
4731  CallCFunction(rax, num_arguments);
4732 }
4733 
4734 
4735 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4736  ASSERT(has_frame());
4737  // Check stack alignment.
4738  if (emit_debug_code()) {
4739  CheckStackAlignment();
4740  }
4741 
4742  call(function);
4743  ASSERT(OS::ActivationFrameAlignment() != 0);
4744  ASSERT(num_arguments >= 0);
4745  int argument_slots_on_stack =
4746  ArgumentStackSlotsForCFunctionCall(num_arguments);
4747  movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
4748 }
4749 
4750 
4751 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4752  if (r1.is(r2)) return true;
4753  if (r1.is(r3)) return true;
4754  if (r1.is(r4)) return true;
4755  if (r2.is(r3)) return true;
4756  if (r2.is(r4)) return true;
4757  if (r3.is(r4)) return true;
4758  return false;
4759 }
4760 
4761 
4762 CodePatcher::CodePatcher(byte* address, int size)
4763  : address_(address),
4764  size_(size),
4765  masm_(NULL, address, size + Assembler::kGap) {
4766  // Create a new macro assembler pointing to the address of the code to patch.
4767  // The size is adjusted with kGap on order for the assembler to generate size
4768  // bytes of instructions without failing with buffer size constraints.
4769  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4770 }
4771 
4772 
4773 CodePatcher::~CodePatcher() {
4774  // Indicate that code has changed.
4775  CPU::FlushICache(address_, size_);
4776 
4777  // Check that the code was patched as expected.
4778  ASSERT(masm_.pc_ == address_ + size_);
4779  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4780 }
4781 
4782 
4783 void MacroAssembler::CheckPageFlag(
4784  Register object,
4785  Register scratch,
4786  int mask,
4787  Condition cc,
4788  Label* condition_met,
4789  Label::Distance condition_met_distance) {
4790  ASSERT(cc == zero || cc == not_zero);
4791  if (scratch.is(object)) {
4792  andp(scratch, Immediate(~Page::kPageAlignmentMask));
4793  } else {
4794  movp(scratch, Immediate(~Page::kPageAlignmentMask));
4795  andp(scratch, object);
4796  }
4797  if (mask < (1 << kBitsPerByte)) {
4798  testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4799  Immediate(static_cast<uint8_t>(mask)));
4800  } else {
4801  testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4802  }
4803  j(cc, condition_met, condition_met_distance);
4804 }
4805 
4806 
4807 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4808  Register scratch,
4809  Label* if_deprecated) {
4810  if (map->CanBeDeprecated()) {
4811  Move(scratch, map);
4812  movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
4813  SmiToInteger32(scratch, scratch);
4814  andp(scratch, Immediate(Map::Deprecated::kMask));
4815  j(not_zero, if_deprecated);
4816  }
4817 }
4818 
4819 
4820 void MacroAssembler::JumpIfBlack(Register object,
4821  Register bitmap_scratch,
4822  Register mask_scratch,
4823  Label* on_black,
4824  Label::Distance on_black_distance) {
4825  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4826  GetMarkBits(object, bitmap_scratch, mask_scratch);
4827 
4828  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4829  // The mask_scratch register contains a 1 at the position of the first bit
4830  // and a 0 at all other positions, including the position of the second bit.
4831  movp(rcx, mask_scratch);
4832  // Make rcx into a mask that covers both marking bits using the operation
4833  // rcx = mask | (mask << 1).
4834  leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4835  // Note that we are using a 4-byte aligned 8-byte load.
4836  andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4837  cmpp(mask_scratch, rcx);
4838  j(equal, on_black, on_black_distance);
4839 }
4840 
4841 
4842 // Detect some, but not all, common pointer-free objects. This is used by the
4843 // incremental write barrier which doesn't care about oddballs (they are always
4844 // marked black immediately so this code is not hit).
4845 void MacroAssembler::JumpIfDataObject(
4846  Register value,
4847  Register scratch,
4848  Label* not_data_object,
4849  Label::Distance not_data_object_distance) {
4850  Label is_data_object;
4851  movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
4852  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4853  j(equal, &is_data_object, Label::kNear);
4855  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4856  // If it's a string and it's not a cons string then it's an object containing
4857  // no GC pointers.
4858  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4860  j(not_zero, not_data_object, not_data_object_distance);
4861  bind(&is_data_object);
4862 }
4863 
4864 
4865 void MacroAssembler::GetMarkBits(Register addr_reg,
4866  Register bitmap_reg,
4867  Register mask_reg) {
4868  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4869  movp(bitmap_reg, addr_reg);
4870  // Sign extended 32 bit immediate.
4871  andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4872  movp(rcx, addr_reg);
4873  int shift =
4874  Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4875  shrl(rcx, Immediate(shift));
4876  andp(rcx,
4877  Immediate((Page::kPageAlignmentMask >> shift) &
4878  ~(Bitmap::kBytesPerCell - 1)));
4879 
4880  addp(bitmap_reg, rcx);
4881  movp(rcx, addr_reg);
4882  shrl(rcx, Immediate(kPointerSizeLog2));
4883  andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4884  movl(mask_reg, Immediate(1));
4885  shl_cl(mask_reg);
4886 }
4887 
4888 
4889 void MacroAssembler::EnsureNotWhite(
4890  Register value,
4891  Register bitmap_scratch,
4892  Register mask_scratch,
4893  Label* value_is_white_and_not_data,
4894  Label::Distance distance) {
4895  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4896  GetMarkBits(value, bitmap_scratch, mask_scratch);
4897 
4898  // If the value is black or grey we don't need to do anything.
4899  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4900  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4901  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4902  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4903 
4904  Label done;
4905 
4906  // Since both black and grey have a 1 in the first position and white does
4907  // not have a 1 there we only need to check one bit.
4908  testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4909  j(not_zero, &done, Label::kNear);
4910 
4911  if (emit_debug_code()) {
4912  // Check for impossible bit pattern.
4913  Label ok;
4914  Push(mask_scratch);
4915  // shl. May overflow making the check conservative.
4916  addp(mask_scratch, mask_scratch);
4917  testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4918  j(zero, &ok, Label::kNear);
4919  int3();
4920  bind(&ok);
4921  Pop(mask_scratch);
4922  }
4923 
4924  // Value is white. We check whether it is data that doesn't need scanning.
4925  // Currently only checks for HeapNumber and non-cons strings.
4926  Register map = rcx; // Holds map while checking type.
4927  Register length = rcx; // Holds length of object after checking type.
4928  Label not_heap_number;
4929  Label is_data_object;
4930 
4931  // Check for heap-number
4932  movp(map, FieldOperand(value, HeapObject::kMapOffset));
4933  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4934  j(not_equal, &not_heap_number, Label::kNear);
4935  movp(length, Immediate(HeapNumber::kSize));
4936  jmp(&is_data_object, Label::kNear);
4937 
4938  bind(&not_heap_number);
4939  // Check for strings.
4941  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4942  // If it's a string and it's not a cons string then it's an object containing
4943  // no GC pointers.
4944  Register instance_type = rcx;
4945  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4946  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4947  j(not_zero, value_is_white_and_not_data);
4948  // It's a non-indirect (non-cons and non-slice) string.
4949  // If it's external, the length is just ExternalString::kSize.
4950  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4951  Label not_external;
4952  // External strings are the only ones with the kExternalStringTag bit
4953  // set.
4956  testb(instance_type, Immediate(kExternalStringTag));
4957  j(zero, &not_external, Label::kNear);
4958  movp(length, Immediate(ExternalString::kSize));
4959  jmp(&is_data_object, Label::kNear);
4960 
4961  bind(&not_external);
4962  // Sequential string, either ASCII or UC16.
4963  ASSERT(kOneByteStringTag == 0x04);
4964  andp(length, Immediate(kStringEncodingMask));
4965  xorp(length, Immediate(kStringEncodingMask));
4966  addp(length, Immediate(0x04));
4967  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4968  imulp(length, FieldOperand(value, String::kLengthOffset));
4969  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4970  addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4971  andp(length, Immediate(~kObjectAlignmentMask));
4972 
4973  bind(&is_data_object);
4974  // Value is a data object, and it is white. Mark it black. Since we know
4975  // that the object is white we can make it black by flipping one bit.
4976  orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4977 
4978  andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4979  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4980 
4981  bind(&done);
4982 }
4983 
4984 
4985 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4986  Label next, start;
4987  Register empty_fixed_array_value = r8;
4988  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4989  movp(rcx, rax);
4990 
4991  // Check if the enum length field is properly initialized, indicating that
4992  // there is an enum cache.
4993  movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4994 
4995  EnumLength(rdx, rbx);
4996  Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
4997  j(equal, call_runtime);
4998 
4999  jmp(&start);
5000 
5001  bind(&next);
5002 
5003  movp(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
5004 
5005  // For all objects but the receiver, check that the cache is empty.
5006  EnumLength(rdx, rbx);
5007  Cmp(rdx, Smi::FromInt(0));
5008  j(not_equal, call_runtime);
5009 
5010  bind(&start);
5011 
5012  // Check that there are no elements. Register rcx contains the current JS
5013  // object we've reached through the prototype chain.
5014  Label no_elements;
5015  cmpp(empty_fixed_array_value,
5016  FieldOperand(rcx, JSObject::kElementsOffset));
5017  j(equal, &no_elements);
5018 
5019  // Second chance, the object may be using the empty slow element dictionary.
5020  LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
5021  cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
5022  j(not_equal, call_runtime);
5023 
5024  bind(&no_elements);
5025  movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
5026  cmpp(rcx, null_value);
5027  j(not_equal, &next);
5028 }
5029 
5030 void MacroAssembler::TestJSArrayForAllocationMemento(
5031  Register receiver_reg,
5032  Register scratch_reg,
5033  Label* no_memento_found) {
5034  ExternalReference new_space_start =
5035  ExternalReference::new_space_start(isolate());
5036  ExternalReference new_space_allocation_top =
5037  ExternalReference::new_space_allocation_top_address(isolate());
5038 
5039  leap(scratch_reg, Operand(receiver_reg,
5040  JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5041  Move(kScratchRegister, new_space_start);
5042  cmpp(scratch_reg, kScratchRegister);
5043  j(less, no_memento_found);
5044  cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
5045  j(greater, no_memento_found);
5046  CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
5047  Heap::kAllocationMementoMapRootIndex);
5048 }
5049 
5050 
5051 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5052  Register object,
5053  Register scratch0,
5054  Register scratch1,
5055  Label* found) {
5056  ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
5057  ASSERT(!scratch1.is(scratch0));
5058  Register current = scratch0;
5059  Label loop_again;
5060 
5061  movp(current, object);
5062 
5063  // Loop based on the map going up the prototype chain.
5064  bind(&loop_again);
5065  movp(current, FieldOperand(current, HeapObject::kMapOffset));
5066  movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
5067  andp(scratch1, Immediate(Map::kElementsKindMask));
5068  shr(scratch1, Immediate(Map::kElementsKindShift));
5069  cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
5070  j(equal, found);
5071  movp(current, FieldOperand(current, Map::kPrototypeOffset));
5072  CompareRoot(current, Heap::kNullValueRootIndex);
5073  j(not_equal, &loop_again);
5074 }
5075 
5076 
5077 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
5078  ASSERT(!dividend.is(rax));
5079  ASSERT(!dividend.is(rdx));
5080  MultiplierAndShift ms(divisor);
5081  movl(rax, Immediate(ms.multiplier()));
5082  imull(dividend);
5083  if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
5084  if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
5085  if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
5086  movl(rax, dividend);
5087  shrl(rax, Immediate(31));
5088  addl(rdx, rax);
5089 }
5090 
5091 
5092 } } // namespace v8::internal
5093 
5094 #endif // V8_TARGET_ARCH_X64
byte * Address
Definition: globals.h:186
const Register rdx
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
unsigned char byte
Definition: disasm.h:33
const intptr_t kSmiTagMask
Definition: v8.h:5480
#define CHECK_EQ(expected, value)
Definition: checks.h:252
const intptr_t kDoubleAlignmentMask
Definition: v8globals.h:53
const Register r14
const uint32_t kNaNOrInfinityLowerBoundUpper32
Definition: v8globals.h:456
const Register r3
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
Definition: flags.cc:208
const Register r11
const int kNumRegisters
Definition: constants-arm.h:57
const Register rbp
#define ASSERT_NOT_NULL(p)
Definition: checks.h:343
const Register rsi
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
const uint32_t kIsNotInternalizedMask
Definition: objects.h:603
#define ASSERT(condition)
Definition: checks.h:329
const int kPointerSizeLog2
Definition: globals.h:281
const uint32_t kStringRepresentationMask
Definition: objects.h:615
#define CHECK(condition)
Definition: checks.h:75
const Register r2
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
Definition: assert-scope.h:234
const intptr_t kObjectAlignmentMask
Definition: v8globals.h:45
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
const intptr_t kHeapObjectTagMask
Definition: v8.h:5475
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const int kIntSize
Definition: globals.h:263
bool is_intn(int64_t x, unsigned n)
Definition: utils.h:1102
kInstanceClassNameOffset flag
Definition: objects-inl.h:5115
const uint32_t kNotStringTag
Definition: objects.h:599
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
const int kDoubleSize
Definition: globals.h:266
#define kNumSafepointSavedRegisters
Definition: frames-arm64.h:52
#define V8_INT64_C(x)
Definition: globals.h:218
PrologueFrameMode
Definition: frames.h:957
const uint32_t kIsIndirectStringMask
Definition: objects.h:622
const Register r9
const int kPointerSize
Definition: globals.h:268
Operand FieldOperand(Register object, int offset)
const Address kZapValue
Definition: v8globals.h:82
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
const int kHeapObjectTag
Definition: v8.h:5473
bool IsAligned(T value, U alignment)
Definition: utils.h:211
const int kRegisterSize
Definition: globals.h:269
const Register rbx
const uint32_t kHoleNanLower32
Definition: v8globals.h:455
const Register rsp
const int kFPOnStackSize
Definition: globals.h:271
const Register rax
const Register rdi
const int kBitsPerByte
Definition: globals.h:287
const int kSmiShift
const Register r0
bool IsPowerOf2(T x)
Definition: utils.h:51
int TenToThe(int exponent)
Definition: utils.h:880
const int kRootRegisterBias
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
Definition: objects.h:598
#define kRootRegister
const uint32_t kInternalizedTag
Definition: objects.h:605
#define T(name, string, precedence)
Definition: token.cc:48
InvokeFlag
AllocationFlags
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
Definition: shell.cc:171
const Register arg_reg_1
bool is(Register reg) const
const uint32_t kIsNotStringMask
Definition: objects.h:597
const Register r1
const char * GetBailoutReason(BailoutReason reason)
Definition: objects.cc:16437
const int kNumSafepointRegisters
Definition: frames-arm.h:92
const Register kScratchRegister
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
#define UNIMPLEMENTED()
Definition: checks.h:50
const int kSmiShiftSize
Definition: v8.h:5539
const int kSmiTagSize
Definition: v8.h:5479
void CopyBytes(uint8_t *target, uint8_t *source)
Definition: runtime.cc:1309
const int kInt64Size
Definition: globals.h:265
const Register r8
const Register rcx
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
const int kShortSize
Definition: globals.h:262
const uint32_t kOneByteStringTag
Definition: objects.h:611
const int kSmiTag
Definition: v8.h:5478
const int kSmiConstantRegisterValue
const uint32_t kIsIndirectStringTag
Definition: objects.h:623
const Register r10
HeapObject * obj
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const Register no_reg
const Register kSmiConstantRegister
const int kPCOnStackSize
Definition: globals.h:270
#define STATIC_ASSERT(test)
Definition: checks.h:341
const int kInt32Size
Definition: globals.h:264
#define xmm0
const int kCharSize
Definition: globals.h:261
const Register r15
const uint32_t kStringEncodingMask
Definition: objects.h:609
bool AreAliased(const CPURegister &reg1, const CPURegister &reg2, const CPURegister &reg3=NoReg, const CPURegister &reg4=NoReg, const CPURegister &reg5=NoReg, const CPURegister &reg6=NoReg, const CPURegister &reg7=NoReg, const CPURegister &reg8=NoReg)
const Register r4