30 #if V8_TARGET_ARCH_ARM64
49 : Assembler(arg_isolate, buffer, buffer_size),
50 generating_stub_(
false),
52 allow_macro_instructions_(
true),
55 use_real_aborts_(
true),
56 sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
58 code_object_ = Handle<Object>(
isolate()->
heap()->undefined_value(),
66 const Operand& operand,
68 UseScratchRegisterScope temps(
this);
70 if (operand.NeedsRelocation()) {
71 Register temp = temps.AcquireX();
75 }
else if (operand.IsImmediate()) {
76 int64_t immediate = operand.immediate();
77 unsigned reg_size = rd.SizeInBits();
78 ASSERT(rd.Is64Bits() || is_uint32(immediate));
83 immediate = ~immediate;
105 }
else if ((rd.Is64Bits() && (immediate == -1
L)) ||
106 (rd.Is32Bits() && (immediate == 0xffffffff
L))) {
125 unsigned n, imm_s, imm_r;
126 if (
IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
131 Register temp = temps.AcquireSameSizeAs(rn);
132 Mov(temp, immediate);
143 }
else if (operand.IsExtendedRegister()) {
144 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
147 ASSERT(operand.shift_amount() <= 4);
148 ASSERT(operand.reg().Is64Bits() ||
149 ((operand.extend() !=
UXTX) && (operand.extend() !=
SXTX)));
150 Register temp = temps.AcquireSameSizeAs(rn);
152 operand.shift_amount());
157 ASSERT(operand.IsShiftedRegister());
164 ASSERT(allow_macro_instructions_);
165 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
185 unsigned reg_size = rd.SizeInBits();
186 unsigned n, imm_s, imm_r;
187 if (
IsImmMovz(imm, reg_size) && !rd.IsSP()) {
191 }
else if (
IsImmMovn(imm, reg_size) && !rd.IsSP()) {
195 }
else if (
IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
204 uint64_t ignored_halfword = 0;
205 bool invert_move =
false;
210 ignored_halfword = 0xffff
L;
216 UseScratchRegisterScope temps(
this);
217 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
221 ASSERT((reg_size % 16) == 0);
222 bool first_mov_done =
false;
223 for (
unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
224 uint64_t imm16 = (imm >> (16 * i)) & 0xffff
L;
225 if (imm16 != ignored_halfword) {
226 if (!first_mov_done) {
228 movn(temp, (~imm16) & 0xffffL, 16 * i);
230 movz(temp, imm16, 16 * i);
232 first_mov_done =
true;
235 movk(temp, imm16, 16 * i);
251 const Operand& operand,
253 ASSERT(allow_macro_instructions_);
258 UseScratchRegisterScope temps(
this);
259 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
261 if (operand.NeedsRelocation()) {
264 }
else if (operand.IsImmediate()) {
266 Mov(dst, operand.immediate());
268 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
272 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
274 }
else if (operand.IsExtendedRegister()) {
278 operand.shift_amount());
290 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
307 ASSERT(allow_macro_instructions_);
309 if (operand.NeedsRelocation()) {
313 }
else if (operand.IsImmediate()) {
315 Mov(rd, ~operand.immediate());
317 }
else if (operand.IsExtendedRegister()) {
321 operand.shift_amount());
331 ASSERT((reg_size % 8) == 0);
333 for (
unsigned i = 0; i < (reg_size / 16); i++) {
334 if ((imm & 0xffff) == 0) {
359 const Operand& operand,
364 if (operand.NeedsRelocation()) {
365 UseScratchRegisterScope temps(
this);
366 Register temp = temps.AcquireX();
370 }
else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
379 UseScratchRegisterScope temps(
this);
380 Register temp = temps.AcquireSameSizeAs(rn);
389 const Operand& operand,
391 ASSERT(allow_macro_instructions_);
394 if (operand.IsImmediate()) {
397 int64_t imm = operand.immediate();
400 csel(rd, rn, zr, cond);
401 }
else if (imm == 1) {
402 csinc(rd, rn, zr, cond);
403 }
else if (imm == -1) {
404 csinv(rd, rn, zr, cond);
406 UseScratchRegisterScope temps(
this);
407 Register temp = temps.AcquireSameSizeAs(rn);
408 Mov(temp, operand.immediate());
409 csel(rd, rn, temp, cond);
411 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
413 csel(rd, rn, operand.reg(), cond);
416 UseScratchRegisterScope temps(
this);
417 Register temp = temps.AcquireSameSizeAs(rn);
419 csel(rd, rn, temp, cond);
426 const Operand& operand,
429 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
430 !operand.NeedsRelocation() && (S ==
LeaveFlags)) {
435 if (operand.NeedsRelocation()) {
436 UseScratchRegisterScope temps(
this);
437 Register temp = temps.AcquireX();
440 }
else if ((operand.IsImmediate() && !
IsImmAddSub(operand.immediate())) ||
441 (rn.IsZero() && !operand.IsShiftedRegister()) ||
442 (operand.IsShiftedRegister() && (operand.shift() ==
ROR))) {
443 UseScratchRegisterScope temps(
this);
444 Register temp = temps.AcquireSameSizeAs(rn);
446 AddSub(rd, rn, temp, S, op);
448 AddSub(rd, rn, operand, S, op);
455 const Operand& operand,
458 ASSERT(rd.SizeInBits() == rn.SizeInBits());
459 UseScratchRegisterScope temps(
this);
461 if (operand.NeedsRelocation()) {
462 Register temp = temps.AcquireX();
466 }
else if (operand.IsImmediate() ||
467 (operand.IsShiftedRegister() && (operand.shift() ==
ROR))) {
469 Register temp = temps.AcquireSameSizeAs(rn);
473 }
else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
480 Register temp = temps.AcquireSameSizeAs(rn);
481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
484 }
else if (operand.IsExtendedRegister()) {
486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
489 ASSERT(operand.shift_amount() <= 4);
490 ASSERT(operand.reg().Is64Bits() ||
491 ((operand.extend() !=
UXTX) && (operand.extend() !=
SXTX)));
492 Register temp = temps.AcquireSameSizeAs(rn);
494 operand.shift_amount());
507 int64_t offset = addr.offset();
513 if (addr.IsImmediateOffset() && !
IsImmLSScaled(offset, size) &&
517 UseScratchRegisterScope temps(
this);
518 Register temp = temps.AcquireSameSizeAs(addr.base());
519 Mov(temp, addr.offset());
524 add(addr.base(), addr.base(), offset);
527 add(addr.base(), addr.base(), offset);
541 if (r.IsInteger8()) {
543 }
else if (r.IsUInteger8()) {
545 }
else if (r.IsInteger16()) {
547 }
else if (r.IsUInteger16()) {
549 }
else if (r.IsInteger32()) {
563 if (r.IsInteger8() || r.IsUInteger8()) {
565 }
else if (r.IsInteger16() || r.IsUInteger16()) {
567 }
else if (r.IsInteger32()) {
578 bool need_longer_range =
false;
584 if (label->is_bound() || label->is_linked()) {
588 if (!need_longer_range && !label->is_bound()) {
591 std::pair<int, FarBranchInfo>(max_reachable_pc,
598 return need_longer_range;
606 B(static_cast<Condition>(type), label);
623 ASSERT(allow_macro_instructions_);
627 bool need_extra_instructions =
630 if (need_extra_instructions) {
641 ASSERT(allow_macro_instructions_);
644 bool need_extra_instructions =
647 if (need_extra_instructions) {
648 tbz(rt, bit_pos, &done);
651 tbnz(rt, bit_pos, label);
658 ASSERT(allow_macro_instructions_);
661 bool need_extra_instructions =
664 if (need_extra_instructions) {
665 tbnz(rt, bit_pos, &done);
668 tbz(rt, bit_pos, label);
675 ASSERT(allow_macro_instructions_);
678 bool need_extra_instructions =
681 if (need_extra_instructions) {
692 ASSERT(allow_macro_instructions_);
695 bool need_extra_instructions =
698 if (need_extra_instructions) {
712 Label* is_not_representable,
713 Label* is_representable) {
714 ASSERT(allow_macro_instructions_);
723 if ((is_not_representable !=
NULL) && (is_representable !=
NULL)) {
724 B(is_not_representable,
vs);
726 }
else if (is_not_representable !=
NULL) {
727 B(is_not_representable,
vs);
728 }
else if (is_representable !=
NULL) {
729 B(is_representable,
vc);
738 const CPURegister& src2,
const CPURegister& src3) {
741 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
742 int size = src0.SizeInBytes();
744 PrepareForPush(count, size);
745 PushHelper(count, size, src0, src1, src2, src3);
750 const CPURegister& src2,
const CPURegister& src3,
751 const CPURegister& src4,
const CPURegister& src5,
752 const CPURegister& src6,
const CPURegister& src7) {
755 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
756 int size = src0.SizeInBytes();
758 PrepareForPush(count, size);
759 PushHelper(4, size, src0, src1, src2, src3);
760 PushHelper(count - 4, size, src4, src5, src6, src7);
765 const CPURegister& dst2,
const CPURegister& dst3) {
772 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
773 int size = dst0.SizeInBytes();
775 PrepareForPop(count, size);
776 PopHelper(count, size, dst0, dst1, dst2, dst3);
788 if (queued_.empty())
return;
790 masm_->PrepareForPush(size_);
792 int count = queued_.size();
794 while (index < count) {
797 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
800 batch[batch_index++] = queued_[index++];
801 }
while ((batch_index < 4) && (index < count) &&
802 batch[0].IsSameSizeAndType(queued_[index]));
804 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
805 batch[0], batch[1], batch[2], batch[3]);
813 if (queued_.empty())
return;
815 masm_->PrepareForPop(size_);
817 int count = queued_.size();
819 while (index < count) {
822 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
825 batch[batch_index++] = queued_[index++];
826 }
while ((batch_index < 4) && (index < count) &&
827 batch[0].IsSameSizeAndType(queued_[index]));
829 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
830 batch[0], batch[1], batch[2], batch[3]);
838 int size = registers.RegisterSizeInBytes();
840 PrepareForPush(registers.Count(),
size);
844 while (!registers.IsEmpty()) {
845 int count_before = registers.Count();
846 const CPURegister& src0 = registers.PopHighestIndex();
847 const CPURegister& src1 = registers.PopHighestIndex();
848 const CPURegister& src2 = registers.PopHighestIndex();
849 const CPURegister& src3 = registers.PopHighestIndex();
850 int count = count_before - registers.Count();
851 PushHelper(count, size, src0, src1, src2, src3);
857 int size = registers.RegisterSizeInBytes();
859 PrepareForPop(registers.Count(),
size);
863 while (!registers.IsEmpty()) {
864 int count_before = registers.Count();
865 const CPURegister& dst0 = registers.PopLowestIndex();
866 const CPURegister& dst1 = registers.PopLowestIndex();
867 const CPURegister& dst2 = registers.PopLowestIndex();
868 const CPURegister& dst3 = registers.PopLowestIndex();
869 int count = count_before - registers.Count();
870 PopHelper(count, size, dst0, dst1, dst2, dst3);
883 int size = src.SizeInBytes();
885 PrepareForPush(count, size);
887 if (FLAG_optimize_for_size && count > 8) {
888 UseScratchRegisterScope temps(
this);
889 Register temp = temps.AcquireX();
892 __ Mov(temp, count / 2);
894 PushHelper(2, size, src, src, NoReg, NoReg);
905 PushHelper(4, size, src, src, src, src);
909 PushHelper(2, size, src, src, NoReg, NoReg);
913 PushHelper(1, size, src, NoReg, NoReg, NoReg);
923 UseScratchRegisterScope temps(
this);
924 Register temp = temps.AcquireSameSizeAs(count);
926 if (FLAG_optimize_for_size) {
929 Subs(temp, count, 1);
935 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
940 Label loop, leftover2, leftover1, done;
942 Subs(temp, count, 4);
948 PushHelper(4, src.SizeInBytes(), src, src, src, src);
953 Tbz(count, 1, &leftover1);
954 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
958 Tbz(count, 0, &done);
959 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
966 void MacroAssembler::PushHelper(
int count,
int size,
967 const CPURegister& src0,
968 const CPURegister& src1,
969 const CPURegister& src2,
970 const CPURegister& src3) {
972 InstructionAccurateScope scope(
this);
975 ASSERT(size == src0.SizeInBytes());
981 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
985 ASSERT(src2.IsNone() && src3.IsNone());
1006 void MacroAssembler::PopHelper(
int count,
int size,
1007 const CPURegister& dst0,
1008 const CPURegister& dst1,
1009 const CPURegister& dst2,
1010 const CPURegister& dst3) {
1012 InstructionAccurateScope scope(
this);
1015 ASSERT(size == dst0.SizeInBytes());
1021 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1025 ASSERT(dst2.IsNone() && dst3.IsNone());
1047 void MacroAssembler::PrepareForPush(Operand total_size) {
1054 if (total_size.IsImmediate()) {
1055 ASSERT((total_size.immediate() % 16) == 0);
1069 void MacroAssembler::PrepareForPop(Operand total_size) {
1075 if (total_size.IsImmediate()) {
1076 ASSERT((total_size.immediate() % 16) == 0);
1086 if (offset.IsImmediate()) {
1087 ASSERT(offset.immediate() >= 0);
1090 Check(
le, kStackAccessBelowStackPointer);
1098 if (offset.IsImmediate()) {
1099 ASSERT(offset.immediate() >= 0);
1102 Check(
le, kStackAccessBelowStackPointer);
1110 const CPURegister& src2,
1113 ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1119 const CPURegister& dst2,
1122 ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1129 InstructionAccurateScope scope(
this);
1153 InstructionAccurateScope scope(
this);
1189 Abort(kTheCurrentStackPointerIsBelowCsp);
1214 Register false_root) {
1215 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1216 Ldp(true_root, false_root,
1222 Handle<HeapObject>
object) {
1224 if (
isolate()->heap()->InNewSpace(*
object)) {
1226 Mov(result, Operand(cell));
1229 Mov(result, Operand(
object));
1235 Register descriptors) {
1242 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1261 Register null_value,
1266 Label* call_runtime) {
1270 Register empty_fixed_array_value = scratch0;
1271 Register current_object = scratch1;
1273 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1276 Mov(current_object,
object);
1280 Register map = scratch2;
1281 Register enum_length = scratch3;
1285 Cmp(enum_length, kInvalidEnumCacheSentinel);
1286 B(
eq, call_runtime);
1295 Cbnz(enum_length, call_runtime);
1304 Cmp(current_object, empty_fixed_array_value);
1305 B(
eq, &no_elements);
1308 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1309 B(
ne, call_runtime);
1313 Cmp(current_object, null_value);
1321 Label* no_memento_found) {
1322 ExternalReference new_space_start =
1323 ExternalReference::new_space_start(
isolate());
1324 ExternalReference new_space_allocation_top =
1325 ExternalReference::new_space_allocation_top_address(
isolate());
1327 Add(scratch1, receiver,
1329 Cmp(scratch1, new_space_start);
1330 B(
lt, no_memento_found);
1332 Mov(scratch2, new_space_allocation_top);
1334 Cmp(scratch1, scratch2);
1335 B(
gt, no_memento_found);
1339 Operand(
isolate()->factory()->allocation_memento_map()));
1343 void MacroAssembler::JumpToHandlerEntry(Register exception,
1347 Register scratch2) {
1349 ASSERT(exception.Is(x0));
1356 Lsr(scratch2, state, StackHandler::kKindWidth);
1364 void MacroAssembler::InNewSpace(Register
object,
1368 UseScratchRegisterScope temps(
this);
1369 Register temp = temps.AcquireX();
1370 And(temp,
object, ExternalReference::new_space_mask(
isolate()));
1371 Cmp(temp, ExternalReference::new_space_start(
isolate()));
1380 Register scratch4) {
1394 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1402 Register
object = scratch1;
1403 Register state = scratch2;
1410 Cbz(
cp, ¬_js_frame);
1412 Bind(¬_js_frame);
1414 JumpToHandlerEntry(value,
object, state, scratch3, scratch4);
1422 Register scratch4) {
1436 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1441 Label fetch_next, check_kind;
1457 Register
object = scratch1;
1458 Register state = scratch2;
1461 JumpToHandlerEntry(value,
object, state, scratch3, scratch4);
1509 Abs(smi, smi, slow);
1535 UseScratchRegisterScope temps(
this);
1536 Register temp = temps.AcquireX();
1540 Check(
ls, kOperandIsNotAName);
1548 Label done_checking;
1550 JumpIfRoot(
object, Heap::kUndefinedValueRootIndex, &done_checking);
1552 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1553 Assert(
eq, kExpectedUndefinedOrCell);
1554 Bind(&done_checking);
1561 UseScratchRegisterScope temps(
this);
1562 Register temp = temps.AcquireX();
1565 Check(
ne, kOperandIsASmiAndNotAString);
1568 Check(
lo, kOperandIsNotAString);
1575 Call(stub->GetCode(
isolate()), RelocInfo::CODE_TARGET, ast_id);
1580 Jump(stub->GetCode(
isolate()), RelocInfo::CODE_TARGET);
1592 if (f->nargs >= 0 && f->nargs != num_arguments) {
1594 if (num_arguments > 0) {
1595 Drop(num_arguments);
1597 LoadRoot(x0, Heap::kUndefinedValueRootIndex);
1602 Mov(x0, num_arguments);
1605 CEntryStub stub(1, save_doubles);
1610 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1611 return ref0.address() - ref1.address();
1616 Register function_address,
1617 ExternalReference thunk_ref,
1623 ExternalReference next_address =
1624 ExternalReference::handle_scope_next_address(
isolate());
1625 const int kNextOffset = 0;
1626 const int kLimitOffset = AddressOffset(
1627 ExternalReference::handle_scope_limit_address(
isolate()),
1629 const int kLevelOffset = AddressOffset(
1630 ExternalReference::handle_scope_level_address(
isolate()),
1633 ASSERT(function_address.is(x1) || function_address.is(x2));
1635 Label profiler_disabled;
1636 Label end_profiler_check;
1639 Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
1641 Cbz(w10, &profiler_disabled);
1643 B(&end_profiler_check);
1645 Bind(&profiler_disabled);
1646 Mov(x3, function_address);
1647 Bind(&end_profiler_check);
1660 Register handle_scope_base = x22;
1661 Register next_address_reg = x19;
1662 Register limit_reg = x20;
1663 Register level_reg = w21;
1665 Mov(handle_scope_base, next_address);
1666 Ldr(next_address_reg,
MemOperand(handle_scope_base, kNextOffset));
1667 Ldr(limit_reg,
MemOperand(handle_scope_base, kLimitOffset));
1668 Ldr(level_reg,
MemOperand(handle_scope_base, kLevelOffset));
1669 Add(level_reg, level_reg, 1);
1670 Str(level_reg,
MemOperand(handle_scope_base, kLevelOffset));
1672 if (FLAG_log_timer_events) {
1673 FrameScope frame(
this, StackFrame::MANUAL);
1675 Mov(x0, ExternalReference::isolate_address(
isolate()));
1683 DirectCEntryStub stub;
1684 stub.GenerateCall(
this, x3);
1686 if (FLAG_log_timer_events) {
1687 FrameScope frame(
this, StackFrame::MANUAL);
1689 Mov(x0, ExternalReference::isolate_address(
isolate()));
1694 Label promote_scheduled_exception;
1695 Label exception_handled;
1696 Label delete_allocated_handles;
1697 Label leave_exit_frame;
1698 Label return_value_loaded;
1701 Ldr(x0, return_value_operand);
1702 Bind(&return_value_loaded);
1705 Str(next_address_reg,
MemOperand(handle_scope_base, kNextOffset));
1709 Check(
eq, kUnexpectedLevelAfterReturnFromApiCall);
1711 Sub(level_reg, level_reg, 1);
1712 Str(level_reg,
MemOperand(handle_scope_base, kLevelOffset));
1715 B(
ne, &delete_allocated_handles);
1717 Bind(&leave_exit_frame);
1725 Mov(x5, ExternalReference::scheduled_exception_address(
isolate()));
1727 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
1728 Bind(&exception_handled);
1730 bool restore_context = context_restore_operand !=
NULL;
1731 if (restore_context) {
1732 Ldr(
cp, *context_restore_operand);
1739 Bind(&promote_scheduled_exception);
1744 Runtime::kHiddenPromoteScheduledException,
isolate()), 0);
1746 B(&exception_handled);
1749 Bind(&delete_allocated_handles);
1750 Str(limit_reg,
MemOperand(handle_scope_base, kLimitOffset));
1752 Register saved_result = x19;
1753 Mov(saved_result, x0);
1754 Mov(x0, ExternalReference::isolate_address(
isolate()));
1756 ExternalReference::delete_handle_scope_extensions(
isolate()), 1);
1757 Mov(x0, saved_result);
1758 B(&leave_exit_frame);
1763 int num_arguments) {
1764 Mov(x0, num_arguments);
1775 Jump(stub.GetCode(
isolate()), RelocInfo::CODE_TARGET);
1802 const CallWrapper& call_wrapper) {
1810 call_wrapper.BeforeCall(
CallSize(x2));
1812 call_wrapper.AfterCall();
1827 Mov(x0, num_arguments);
1841 void MacroAssembler::InitializeNewString(Register
string,
1845 Register scratch2) {
1848 SmiTag(scratch1, length);
1858 #if V8_HOST_ARCH_ARM64
1864 #else // V8_HOST_ARCH_ARM64
1869 return FLAG_sim_stack_alignment;
1870 #endif // V8_HOST_ARCH_ARM64
1875 int num_of_reg_args) {
1881 int num_of_reg_args,
1882 int num_of_double_args) {
1883 UseScratchRegisterScope temps(
this);
1884 Register temp = temps.AcquireX();
1885 Mov(temp,
function);
1891 int num_of_reg_args,
1892 int num_of_double_args) {
1896 ASSERT(num_of_reg_args <= 8);
1904 if (num_of_double_args > 0) {
1905 ASSERT(num_of_reg_args <= 1);
1906 ASSERT((num_of_double_args + num_of_reg_args) <= 2);
1913 if (!csp.Is(old_stack_pointer)) {
1918 ASSERT(sp_alignment >= 16);
1926 Bic(csp, old_stack_pointer, sp_alignment - 1);
1934 if (!csp.Is(old_stack_pointer)) {
1939 UseScratchRegisterScope temps(
this);
1940 Register temp = temps.AcquireX();
1942 Sub(temp, csp, old_stack_pointer);
1946 Check(
ge, kTheStackWasCorruptedByMacroAssemblerCall);
1959 UseScratchRegisterScope temps(
this);
1960 Register temp = temps.AcquireX();
1961 Mov(temp, Operand(target, rmode));
1967 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1968 Jump(reinterpret_cast<intptr_t>(target), rmode);
1973 ASSERT(RelocInfo::IsCodeTarget(rmode));
1975 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
1980 BlockPoolsScope scope(
this);
1995 BlockPoolsScope scope(
this);
2012 BlockPoolsScope scope(
this);
2022 ASSERT(rmode != RelocInfo::NONE32);
2024 UseScratchRegisterScope temps(
this);
2025 Register temp = temps.AcquireX();
2027 if (rmode == RelocInfo::NONE64) {
2029 uint64_t imm =
reinterpret_cast<uint64_t
>(target);
2031 ASSERT(((imm >> 48) & 0xffff) == 0);
2032 movz(temp, (imm >> 0) & 0xffff, 0);
2033 movk(temp, (imm >> 16) & 0xffff, 16);
2034 movk(temp, (imm >> 32) & 0xffff, 32);
2036 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
2046 RelocInfo::Mode rmode,
2047 TypeFeedbackId ast_id) {
2053 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
2055 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2059 Call(reinterpret_cast<Address>(code.location()), rmode);
2084 ASSERT(rmode != RelocInfo::NONE32);
2086 if (rmode == RelocInfo::NONE64) {
2095 RelocInfo::Mode rmode,
2096 TypeFeedbackId ast_id) {
2101 ASSERT(rmode != RelocInfo::NONE32);
2103 if (rmode == RelocInfo::NONE64) {
2115 Register heap_number_map,
2116 Label* on_heap_number,
2117 Label* on_not_heap_number) {
2118 ASSERT(on_heap_number || on_not_heap_number);
2121 UseScratchRegisterScope temps(
this);
2122 Register temp = temps.AcquireX();
2125 if (heap_number_map.Is(NoReg)) {
2126 heap_number_map = temps.AcquireX();
2127 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2135 Cmp(temp, heap_number_map);
2137 if (on_heap_number) {
2138 B(
eq, on_heap_number);
2140 if (on_not_heap_number) {
2141 B(
ne, on_not_heap_number);
2147 Label* on_heap_number,
2148 Register heap_number_map) {
2157 Label* on_not_heap_number,
2158 Register heap_number_map) {
2162 on_not_heap_number);
2175 Register number_string_cache = result;
2176 Register mask = scratch3;
2179 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2193 Label load_result_from_cache;
2196 CheckMap(
object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2202 Eor(scratch1, scratch1, scratch2);
2203 And(scratch1, scratch1, mask);
2207 Add(scratch1, number_string_cache,
2210 Register probe = mask;
2217 B(&load_result_from_cache);
2220 Register scratch = scratch1;
2224 Add(scratch, number_string_cache,
2233 Bind(&load_result_from_cache);
2236 scratch1, scratch2);
2240 void MacroAssembler::TryConvertDoubleToInt(Register as_int,
2242 FPRegister scratch_d,
2243 Label* on_successful_conversion,
2244 Label* on_failed_conversion) {
2247 Scvtf(scratch_d, as_int);
2248 Fcmp(value, scratch_d);
2250 if (on_successful_conversion) {
2251 B(on_successful_conversion,
eq);
2253 if (on_failed_conversion) {
2254 B(on_failed_conversion,
ne);
2260 UseScratchRegisterScope temps(
this);
2261 Register temp = temps.AcquireX();
2270 Label* on_negative_zero) {
2272 B(
vs, on_negative_zero);
2278 Cmp(input.W(), Operand(input.W(),
UXTB));
2280 Csel(output.W(), wzr, input.W(),
lt);
2282 Csel(output.W(), output.W(), 255,
le);
2302 Fmov(dbl_scratch, 255);
2303 Fmin(dbl_scratch, dbl_scratch, input);
2307 Fcvtnu(output, dbl_scratch);
2311 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2318 Register scratch5) {
2322 scratch1, scratch2, scratch3, scratch4, scratch5));
2325 const Register& remaining = scratch3;
2326 Mov(remaining, count / 2);
2328 const Register& dst_untagged = scratch1;
2329 const Register& src_untagged = scratch2;
2336 Ldp(scratch4, scratch5,
2338 Stp(scratch4, scratch5,
2340 Sub(remaining, remaining, 1);
2341 Cbnz(remaining, &loop);
2351 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2357 Register scratch4) {
2362 const Register& dst_untagged = scratch1;
2363 const Register& src_untagged = scratch2;
2368 for (
unsigned i = 0; i < count / 2; i++) {
2381 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2386 Register scratch3) {
2391 const Register& dst_untagged = scratch1;
2392 const Register& src_untagged = scratch2;
2397 for (
unsigned i = 0; i < count; i++) {
2419 ASSERT(!temps.IncludesAliasOf(dst));
2420 ASSERT(!temps.IncludesAliasOf(src));
2421 ASSERT(!temps.IncludesAliasOf(xzr));
2425 Check(
ne, kTheSourceAndDestinationAreTheSame);
2430 static const unsigned kLoopThreshold = 8;
2432 UseScratchRegisterScope masm_temps(
this);
2433 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
2434 CopyFieldsLoopPairsHelper(dst, src, count,
2435 Register(temps.PopLowestIndex()),
2436 Register(temps.PopLowestIndex()),
2437 Register(temps.PopLowestIndex()),
2438 masm_temps.AcquireX(),
2439 masm_temps.AcquireX());
2440 }
else if (temps.Count() >= 2) {
2441 CopyFieldsUnrolledPairsHelper(dst, src, count,
2442 Register(temps.PopLowestIndex()),
2443 Register(temps.PopLowestIndex()),
2444 masm_temps.AcquireX(),
2445 masm_temps.AcquireX());
2446 }
else if (temps.Count() == 1) {
2447 CopyFieldsUnrolledHelper(dst, src, count,
2448 Register(temps.PopLowestIndex()),
2449 masm_temps.AcquireX(),
2450 masm_temps.AcquireX());
2462 UseScratchRegisterScope temps(
this);
2463 Register tmp1 = temps.AcquireX();
2464 Register tmp2 = temps.AcquireX();
2471 Assert(
ge, kUnexpectedNegativeValue);
2474 Add(scratch, src, length);
2476 Add(scratch, dst, length);
2481 Label short_copy, short_loop, bulk_loop, done;
2484 Register bulk_length = scratch;
2486 int pair_mask = pair_size - 1;
2488 Bic(bulk_length, length, pair_mask);
2489 Cbz(bulk_length, &short_copy);
2491 Sub(bulk_length, bulk_length, pair_size);
2494 Cbnz(bulk_length, &bulk_loop);
2496 And(length, length, pair_mask);
2502 Sub(length, length, 1);
2505 Cbnz(length, &short_loop);
2513 Register field_count,
2516 UseScratchRegisterScope temps(
this);
2517 Register field_ptr = temps.AcquireX();
2518 Register counter = temps.AcquireX();
2524 Subs(counter, field_count, 1);
2533 And(field_ptr, counter, 1);
2541 Subs(counter, counter, 2);
2566 Abort(kUnexpectedSmi);
2593 static const int kFlatAsciiStringMask =
2596 And(scratch1, first, kFlatAsciiStringMask);
2597 And(scratch2, second, kFlatAsciiStringMask);
2598 Cmp(scratch1, kFlatAsciiStringTag);
2607 const int kFlatAsciiStringMask =
2609 const int kFlatAsciiStringTag =
2611 And(scratch, type, kFlatAsciiStringMask);
2612 Cmp(scratch, kFlatAsciiStringTag);
2624 const int kFlatAsciiStringMask =
2626 const int kFlatAsciiStringTag =
2628 And(scratch1, first, kFlatAsciiStringMask);
2629 And(scratch2, second, kFlatAsciiStringMask);
2630 Cmp(scratch1, kFlatAsciiStringTag);
2637 Label* not_unique_name) {
2646 B(
ne, not_unique_name);
2650 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
2651 const ParameterCount& actual,
2652 Handle<Code> code_constant,
2656 bool* definitely_mismatches,
2657 const CallWrapper& call_wrapper) {
2658 bool definitely_matches =
false;
2659 *definitely_mismatches =
false;
2660 Label regular_invoke;
2671 ASSERT(actual.is_immediate() || actual.reg().is(x0));
2672 ASSERT(expected.is_immediate() || expected.reg().is(x2));
2673 ASSERT((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(x3));
2675 if (expected.is_immediate()) {
2676 ASSERT(actual.is_immediate());
2677 if (expected.immediate() == actual.immediate()) {
2678 definitely_matches =
true;
2681 Mov(x0, actual.immediate());
2682 if (expected.immediate() ==
2688 definitely_matches =
true;
2690 *definitely_mismatches =
true;
2692 Mov(x2, expected.immediate());
2697 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2698 : Operand(actual.reg());
2700 Cmp(expected.reg(), actual_op);
2701 B(
eq, ®ular_invoke);
2708 if (!definitely_matches) {
2709 if (!code_constant.is_null()) {
2710 Mov(x3, Operand(code_constant));
2714 Handle<Code> adaptor =
2717 call_wrapper.BeforeCall(
CallSize(adaptor));
2719 call_wrapper.AfterCall();
2720 if (!*definitely_mismatches) {
2726 Jump(adaptor, RelocInfo::CODE_TARGET);
2729 Bind(®ular_invoke);
2734 const ParameterCount& expected,
2735 const ParameterCount& actual,
2737 const CallWrapper& call_wrapper) {
2743 bool definitely_mismatches =
false;
2744 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2745 &definitely_mismatches, call_wrapper);
2750 if (!definitely_mismatches) {
2752 call_wrapper.BeforeCall(
CallSize(code));
2754 call_wrapper.AfterCall();
2768 const ParameterCount& actual,
2770 const CallWrapper& call_wrapper) {
2778 Register expected_reg = x2;
2779 Register code_reg = x3;
2793 ParameterCount expected(expected_reg);
2794 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2799 const ParameterCount& expected,
2800 const ParameterCount& actual,
2802 const CallWrapper& call_wrapper) {
2810 Register code_reg = x3;
2819 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2824 const ParameterCount& expected,
2825 const ParameterCount& actual,
2827 const CallWrapper& call_wrapper) {
2845 Fcvtzs(result.X(), double_input);
2873 DoubleToIStub stub(jssp,
2890 ASSERT(!result.is(
object));
2901 DoubleToIStub stub(
object,
2916 UseScratchRegisterScope temps(
this);
2917 Register temp = temps.AcquireX();
2924 if (
isolate()->IsCodePreAgingActive()) {
2936 UseScratchRegisterScope temps(
this);
2937 Register type_reg = temps.AcquireX();
2938 Register code_reg = temps.AcquireX();
2943 Push(type_reg, code_reg);
2974 ASSERT(saved_fp_regs.Count() % 2 == 0);
2977 while (!saved_fp_regs.IsEmpty()) {
2978 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2979 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2987 const Register& scratch,
3008 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3011 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3057 const Register& scratch,
3058 bool restore_context) {
3061 if (restore_doubles) {
3066 if (restore_context) {
3067 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3074 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3079 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
3095 Register scratch1, Register scratch2) {
3096 if (FLAG_native_code_counters && counter->Enabled()) {
3097 Mov(scratch1, value);
3098 Mov(scratch2, ExternalReference(counter));
3105 Register scratch1, Register scratch2) {
3107 if (FLAG_native_code_counters && counter->Enabled()) {
3108 Mov(scratch2, ExternalReference(counter));
3110 Add(scratch1, scratch1, value);
3117 Register scratch1, Register scratch2) {
3123 if (context_chain_length > 0) {
3126 for (
int i = 1; i < context_chain_length; i++) {
3138 #ifdef ENABLE_DEBUGGER_SUPPORT
3139 void MacroAssembler::DebugBreak() {
3141 Mov(x1, ExternalReference(Runtime::kDebugBreak,
isolate()));
3150 int handler_index) {
3164 StackHandler::IndexField::encode(handler_index) |
3165 StackHandler::KindField::encode(kind);
3172 if (kind == StackHandler::JS_ENTRY) {
3174 Push(xzr, xzr, x11, x10);
3180 Mov(x11, ExternalReference(Isolate::kHandlerAddress,
isolate()));
3191 Mov(x11, ExternalReference(Isolate::kHandlerAddress,
isolate()));
3204 if (!FLAG_inline_new) {
3216 UseScratchRegisterScope temps(
this);
3217 Register scratch3 = temps.AcquireX();
3220 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3230 ExternalReference heap_allocation_top =
3232 ExternalReference heap_allocation_limit =
3234 intptr_t top =
reinterpret_cast<intptr_t
>(heap_allocation_top.address());
3235 intptr_t limit =
reinterpret_cast<intptr_t
>(heap_allocation_limit.address());
3239 Register top_address = scratch1;
3240 Register allocation_limit = scratch2;
3241 Mov(top_address, Operand(heap_allocation_top));
3250 Cmp(result, scratch3);
3251 Check(
eq, kUnexpectedAllocationTop);
3254 Ldr(allocation_limit,
MemOperand(top_address, limit - top));
3262 Adds(scratch3, result, object_size);
3264 Cmp(scratch3, allocation_limit);
3281 if (!FLAG_inline_new) {
3293 UseScratchRegisterScope temps(
this);
3294 Register scratch3 = temps.AcquireX();
3297 ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
3298 scratch1.Is64Bits() && scratch2.Is64Bits());
3302 ExternalReference heap_allocation_top =
3304 ExternalReference heap_allocation_limit =
3306 intptr_t top =
reinterpret_cast<intptr_t
>(heap_allocation_top.address());
3307 intptr_t limit =
reinterpret_cast<intptr_t
>(heap_allocation_limit.address());
3311 Register top_address = scratch1;
3312 Register allocation_limit = scratch2;
3313 Mov(top_address, heap_allocation_top);
3315 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3322 Cmp(result, scratch3);
3323 Check(
eq, kUnexpectedAllocationTop);
3326 Ldr(allocation_limit,
MemOperand(top_address, limit - top));
3334 if ((flags & SIZE_IN_WORDS) != 0) {
3337 Adds(scratch3, result, object_size);
3342 Check(
eq, kUnalignedAllocationInNewSpace);
3346 Cmp(scratch3, allocation_limit);
3359 ExternalReference new_space_allocation_top =
3360 ExternalReference::new_space_allocation_top_address(
isolate());
3366 Mov(scratch, new_space_allocation_top);
3368 Cmp(
object, scratch);
3369 Check(
lt, kUndoAllocationOfNonAllocatedMemory);
3372 Mov(scratch, new_space_allocation_top);
3382 Label* gc_required) {
3387 Add(scratch1, length, length);
3400 InitializeNewString(result,
3402 Heap::kStringMapRootIndex,
3413 Label* gc_required) {
3431 InitializeNewString(result,
3433 Heap::kAsciiStringMapRootIndex,
3443 Label* gc_required) {
3447 InitializeNewString(result,
3449 Heap::kConsStringMapRootIndex,
3459 Label* gc_required) {
3460 Label allocate_new_space, install_map;
3463 ExternalReference high_promotion_mode = ExternalReference::
3464 new_space_high_promotion_mode_active_address(
isolate());
3465 Mov(scratch1, high_promotion_mode);
3467 Cbz(scratch1, &allocate_new_space);
3478 Bind(&allocate_new_space);
3488 InitializeNewString(result,
3490 Heap::kConsAsciiStringMapRootIndex,
3500 Label* gc_required) {
3505 InitializeNewString(result,
3507 Heap::kSlicedStringMapRootIndex,
3517 Label* gc_required) {
3522 InitializeNewString(result,
3524 Heap::kSlicedAsciiStringMapRootIndex,
3536 Register heap_number_map) {
3543 if (heap_number_map.Is(NoReg)) {
3544 heap_number_map = scratch1;
3545 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3557 Register heap_number_map) {
3569 Label* if_cond_pass,
3572 B(cond, if_cond_pass);
3580 Label* if_not_object) {
3600 Cmp(type_reg, type);
3614 Cmp(obj_map, Operand(map));
3661 Handle<Code> success,
3668 Cmp(scratch, Operand(map));
3670 Jump(success, RelocInfo::CODE_TARGET);
3676 UseScratchRegisterScope temps(
this);
3677 Register temp = temps.AcquireX();
3706 Register scratch_w = scratch.W();
3728 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3740 Bind(&non_instance);
3750 UseScratchRegisterScope temps(
this);
3751 Register temp = temps.AcquireX();
3768 Label* if_not_equal) {
3770 B(
ne, if_not_equal);
3779 Label* fall_through) {
3780 if ((if_true == if_false) && (if_false == fall_through)) {
3782 }
else if (if_true == if_false) {
3784 }
else if (if_false == fall_through) {
3786 }
else if (if_true == fall_through) {
3796 uint64_t bit_pattern,
3797 Label* if_all_clear,
3799 Label* fall_through) {
3800 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3802 }
else if (if_all_clear == if_any_set) {
3804 }
else if (if_all_clear == fall_through) {
3806 }
else if (if_any_set == fall_through) {
3848 Register elements_reg,
3850 FPRegister fpscratch1,
3851 FPRegister fpscratch2,
3853 int elements_offset) {
3865 CheckMap(value_reg, scratch1,
isolate()->factory()->heap_number_map(),
3873 Fcmp(fpscratch1, fpscratch1);
3874 Fcsel(fpscratch1, fpscratch2, fpscratch1,
vs);
3878 Add(scratch1, elements_reg,
3887 return has_frame_ || !stub->SometimesSetsUpAFrame();
3911 uint32_t encoding_mask) {
3926 Cmp(scratch, encoding_mask);
3927 Check(
eq, kUnexpectedStringType);
3944 Label same_contexts;
3951 Check(
ne, kWeShouldNotHaveAnEmptyLexicalContext);
3964 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
3965 Check(
eq, kExpectedNativeContext);
3971 Cmp(scratch1, scratch2);
3972 B(&same_contexts,
eq);
3978 Register scratch3 = holder_reg;
3981 Check(
ne, kExpectedNonNullContext);
3984 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
3985 Check(
eq, kExpectedNativeContext);
3997 Cmp(scratch1, scratch2);
4000 Bind(&same_contexts);
4011 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4016 scratch = scratch.W();
4023 Add(key, scratch, Operand(key,
LSL, 15));
4025 Eor(key, key, Operand(key,
LSR, 12));
4027 Add(key, key, Operand(key,
LSL, 2));
4029 Eor(key, key, Operand(key,
LSR, 4));
4031 Mov(scratch, Operand(key,
LSL, 11));
4032 Add(key, key, Operand(key,
LSL, 3));
4033 Add(key, key, scratch);
4035 Eor(key, key, Operand(key,
LSR, 16));
4046 Register scratch3) {
4047 ASSERT(!
AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4058 Sub(scratch1, scratch1, 1);
4061 for (
int i = 0; i < kNumberDictionaryProbes; i++) {
4064 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
4066 Mov(scratch2, scratch0);
4068 And(scratch2, scratch2, scratch1);
4072 Add(scratch2, scratch2, Operand(scratch2,
LSL, 1));
4080 if (i != (kNumberDictionaryProbes - 1)) {
4089 const int kDetailsOffset =
4095 const int kValueOffset =
4107 Label done, store_buffer_overflow;
4111 Abort(kRememberedSetPointerInNewSpace);
4114 UseScratchRegisterScope temps(
this);
4115 Register scratch2 = temps.AcquireX();
4118 Mov(scratch2, ExternalReference::store_buffer_top(
isolate()));
4136 Bind(&store_buffer_overflow);
4138 StoreBufferOverflowStub store_buffer_overflow_stub =
4139 StoreBufferOverflowStub(fp_mode);
4140 CallStub(&store_buffer_overflow_stub);
4161 ASSERT(num_unsaved >= 0);
4181 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
4196 if ((reg_code >= 0) && (reg_code <= 15)) {
4198 }
else if ((reg_code >= 18) && (reg_code <= 27)) {
4200 return reg_code - 2;
4201 }
else if ((reg_code == 29) || (reg_code == 30)) {
4203 return reg_code - 3;
4213 const Register& scratch,
4215 Label* if_any_set) {
4223 const Register& scratch,
4225 Label* if_all_clear) {
4259 Abort(kUnalignedCellInWriteBarrier);
4268 remembered_set_action,
4277 Mov(scratch, Operand(BitCast<int64_t>(
kZapValue + 8)));
4298 UseScratchRegisterScope temps(
this);
4299 Register temp = temps.AcquireX();
4303 Check(
eq, kWrongAddressOrValuePassedToRecordWrite);
4332 RecordWriteStub stub(
object, value, address, remembered_set_action, fp_mode);
4343 Mov(address, Operand(BitCast<int64_t>(
kZapValue + 12)));
4355 Label color_is_valid;
4356 Tbnz(reg, 0, &color_is_valid);
4357 Tbz(reg, 1, &color_is_valid);
4358 Abort(kUnexpectedColorFound);
4359 Bind(&color_is_valid);
4364 void MacroAssembler::GetMarkBits(Register addr_reg,
4365 Register bitmap_reg,
4366 Register shift_reg) {
4368 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4374 UseScratchRegisterScope temps(
this);
4375 Register temp = temps.AcquireX();
4386 Register bitmap_scratch,
4387 Register shift_scratch,
4394 GetMarkBits(
object, bitmap_scratch, shift_scratch);
4397 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4408 if (first_bit == 0) {
4412 Tbz(bitmap_scratch, 0, has_color);
4416 Tbz(bitmap_scratch, 0, &other_color);
4417 if (second_bit == 0) {
4418 Tbz(bitmap_scratch, 1, has_color);
4420 Tbnz(bitmap_scratch, 1, has_color);
4431 Label* if_deprecated) {
4432 if (map->CanBeDeprecated()) {
4433 Mov(scratch, Operand(map));
4445 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
4456 Register current = scratch0;
4460 Mov(current,
object);
4475 ASSERT(!result.Is(ldr_location));
4476 const uint32_t kLdrLitOffset_lsb = 5;
4477 const uint32_t kLdrLitOffset_width = 19;
4482 Check(
eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4486 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4493 Register bitmap_scratch,
4494 Register shift_scratch,
4495 Register load_scratch,
4496 Register length_scratch,
4497 Label* value_is_white_and_not_data) {
4499 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4507 GetMarkBits(value, bitmap_scratch, shift_scratch);
4509 Lsr(load_scratch, load_scratch, shift_scratch);
4517 Tbnz(load_scratch, 0, &done);
4520 Register map = load_scratch;
4521 Label is_data_object;
4526 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4533 Register instance_type = load_scratch;
4537 value_is_white_and_not_data);
4558 Lsl(length_scratch, length_scratch, load_scratch);
4564 Bind(&is_data_object);
4567 Register mask = shift_scratch;
4568 Mov(load_scratch, 1);
4569 Lsl(mask, load_scratch, shift_scratch);
4572 Orr(load_scratch, load_scratch, mask);
4577 Add(load_scratch, load_scratch, length_scratch);
4586 Check(cond, reason);
4611 UseScratchRegisterScope temps(
this);
4612 Register temp = temps.AcquireX();
4615 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4616 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4617 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4618 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4626 UseScratchRegisterScope temps(
this);
4627 Register temp = temps.AcquireX();
4630 Check(
ne, kOperandIsNotAString);
4633 Check(
lo, kOperandIsNotAString);
4661 if (FLAG_trap_on_abort) {
4672 Mov(jssp, old_stack_pointer);
4682 NoUseRealAbortsScope no_real_aborts(
this);
4698 Adr(x0, &msg_address);
4709 BlockPoolsScope scope(
this);
4723 Register map_in_out,
4726 Label* no_map_match) {
4735 Cmp(map_in_out, scratch2);
4736 B(
ne, no_map_match);
4765 Abort(kGlobalFunctionsMustHaveInitialMap);
4774 const CPURegister& arg0,
4775 const CPURegister& arg1,
4776 const CPURegister& arg2,
4777 const CPURegister& arg3) {
4784 ASSERT(!
TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
4795 static const int kMaxArgCount = 4;
4798 int arg_count = kMaxArgCount;
4801 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
4804 CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
4809 for (
int i = 0; i < kMaxArgCount; i++) {
4810 if (args[i].IsRegister()) {
4818 if (!args[i].Is64Bits()) {
4819 const Register& as_x = args[i].X();
4820 And(as_x, as_x, 0x00000000ffffffff);
4823 }
else if (args[i].IsFPRegister()) {
4827 if (!args[i].Is64Bits()) {
4828 FPRegister s(args[i]);
4829 const FPRegister& as_d = args[i].D();
4840 ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
4842 for (
int i = arg_count; i < kMaxArgCount; i++) {
4843 ASSERT(args[i].IsNone());
4863 if (arg_count >= 4) {
4864 Push(args[3], args[2], args[1], args[0]);
4865 }
else if (arg_count >= 2) {
4866 Push(args[1], args[0]);
4869 if ((arg_count % 2) != 0) {
4871 const CPURegister& leftover_arg = args[arg_count - 1];
4872 const CPURegister& leftover_pcs = pcs[arg_count - 1];
4873 if (leftover_arg.IsRegister()) {
4874 Mov(Register(leftover_pcs), Register(leftover_arg));
4876 Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
4880 if (arg_count >= 4) {
4881 Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
4882 }
else if (arg_count >= 2) {
4883 Pop(pcs[0], pcs[1]);
4892 Label format_address;
4893 Adr(x0, &format_address);
4896 { BlockPoolsScope scope(
this);
4899 Bind(&format_address);
4911 CallPrintf(pcs[0].type());
4919 #ifdef USE_SIMULATOR
4931 const CPURegister& arg0,
4932 const CPURegister& arg1,
4933 const CPURegister& arg2,
4934 const CPURegister& arg3) {
4951 tmp_list.Remove(arg0, arg1, arg2, arg3);
4952 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4957 { UseScratchRegisterScope temps(
this);
4958 Register tmp = temps.AcquireX();
4965 { UseScratchRegisterScope temps(
this);
4966 Register tmp = temps.AcquireX();
4983 InstructionAccurateScope scope(
this, kCodeAgeSequenceSize /
kInstructionSize);
4991 InstructionAccurateScope scope(
this, kCodeAgeSequenceSize /
kInstructionSize);
5037 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5046 static bool initialized =
false;
5047 static byte young[kCodeAgeSequenceSize];
5049 PatchingAssembler patcher(young, length);
5056 bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
5057 ASSERT(is_young || IsCodeAgeSequence(sequence));
5063 bool MacroAssembler::IsCodeAgeSequence(
byte* sequence) {
5068 static bool initialized =
false;
5069 static byte old[kCodeAgeStubEntryOffset];
5071 PatchingAssembler patcher(old, length);
5075 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
5084 ASSERT(result.Is32Bits() && dividend.Is32Bits());
5085 MultiplierAndShift ms(divisor);
5086 Mov(result, ms.multiplier());
5087 Smull(result.X(), dividend, result);
5088 Asr(result.X(), result.X(), 32);
5089 if (divisor > 0 && ms.multiplier() < 0)
Add(result, result, dividend);
5090 if (divisor < 0 && ms.multiplier() > 0)
Sub(result, result, dividend);
5091 if (ms.shift() > 0)
Asr(result, result, ms.shift());
5092 Add(result, result, Operand(dividend,
LSR, 31));
5100 available_->set_list(old_available_);
5101 availablefp_->set_list(old_availablefp_);
5106 int code = AcquireNextAvailable(available_).
code();
5112 int code = AcquireNextAvailable(availablefp_).
code();
5117 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5119 CHECK(!available->IsEmpty());
5120 CPURegister result = available->PopLowestIndex();
5127 const CPURegister& reg) {
5128 ASSERT(available->IncludesAliasOf(reg));
5129 available->Remove(reg);
5138 const Label* smi_check) {
5139 Assembler::BlockPoolsScope scope(masm);
5140 if (reg.IsValid()) {
5141 ASSERT(smi_check->is_bound());
5149 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5151 ASSERT(!smi_check->is_bound());
5160 : reg_(NoReg), smi_check_(
NULL) {
5162 ASSERT(inline_data->IsInlineData());
5163 if (inline_data->IsInlineData()) {
5164 uint64_t payload = inline_data->InlineData();
5167 ASSERT(is_uint32(payload));
5169 int reg_code = RegisterBits::decode(payload);
5171 uint64_t smi_check_delta = DeltaBits::decode(payload);
5172 ASSERT(smi_check_delta != 0);
5173 smi_check_ = inline_data->preceding(smi_check_delta);
5184 #endif // V8_TARGET_ARCH_ARM64
void AssertRegisterIsClear(Register reg, BailoutReason reason)
void cbnz(const Register &rt, Label *label)
const RegList kSafepointSavedRegisters
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void Poke(const CPURegister &src, const Operand &offset)
void SmiUntag(Register reg, SBit s=LeaveCC)
void SmiAbs(const Register &smi, Label *slow)
void TestMapBitfield(Register object, uint64_t mask)
void EmitExtendShift(const Register &rd, const Register &rn, Extend extend, unsigned left_shift)
static const char * kGreyBitPattern
static const int kHashFieldOffset
static const int kBitFieldOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if available(ARM only)") DEFINE_bool(enable_sudiv
static const int kFPOffset
void Mvn(const Register &rd, uint64_t imm)
Isolate * isolate() const
void Adr(const Register &rd, Label *label)
static FPRegister Create(unsigned code, unsigned size)
int InstructionsGeneratedSince(Label *label)
const intptr_t kSmiTagMask
void TestAndBranchIfAllClear(const Register ®, const uint64_t bit_pattern, Label *label)
void PushSafepointRegisters()
void csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static const int kCodeEntryOffset
const Register & AppropriateZeroRegFor(const CPURegister ®) const
const RegList kCallerSaved
void tbz(const Register &rt, unsigned bit_pos, Label *label)
static const int kPrototypeOrInitialMapOffset
static const int kStateOffset
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size)
void FillFields(Register dst, Register field_count, Register filler)
static const int kValueOffset
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index, BailoutReason reason=kRegisterDidNotMatchExpectedRoot)
static int SlotOffset(int index)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void GetRelocatedValueLocation(Register ldr_location, Register result)
static const int kBuiltinsOffset
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void LoadRelocated(const CPURegister &rt, const Operand &operand)
void PrintfNoPreserve(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void LoadElementsKindFromMap(Register result, Register map)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths true
void SmiTag(Register reg, SBit s=LeaveCC)
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
void Ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
static const int kCallSizeWithoutRelocation
static const uint32_t kMask
const int kDoubleSizeLog2
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void ClampInt32ToUint8(Register in_out)
const LowDwVfpRegister d11
static const RegList kAllocatableFPRegisters
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void mov(Register rd, Register rt)
static const char * kWhiteBitPattern
static const int kCodeOffset
static const int kCallSizeWithRelocation
bool use_real_aborts() const
const LowDwVfpRegister d0
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static bool IsImmLSUnscaled(ptrdiff_t offset)
const unsigned kDRegSizeInBits
static Smi * FromInt(int value)
void AssertString(Register object)
int next_veneer_pool_check_
#define ASM_LOCATION(message)
void ExitFramePreserveFPRegs()
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void AlignAndSetCSPForFrame()
void JumpForHeapNumber(Register object, Register heap_number_map, Label *on_heap_number, Label *on_not_heap_number=NULL)
void JumpToExternalReference(const ExternalReference &builtin)
void JumpIfEitherInstanceTypeIsNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
static const int kElementsKindBitCount
void LoadInstanceDescriptors(Register map, Register descriptors)
static const int kVeneerDistanceCheckMargin
void tbnz(const Register &rt, unsigned bit_pos, Label *label)
void ConditionalCompare(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
void Cneg(const Register &rd, const Register &rn, Condition cond)
const unsigned kWRegSizeInBitsLog2
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void PushCalleeSavedRegisters()
STATIC_ASSERT((reg_zero==(reg_not_zero^1))&&(reg_bit_clear==(reg_bit_set^1))&&(always==(never^1)))
static LSDataSize CalcLSDataSize(LoadStoreOp op)
const unsigned kByteSizeInBytes
const unsigned kXRegSizeInBits
void Fcvtnu(const Register &rd, const FPRegister &fn)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void Store(Register src, const MemOperand &dst, Representation r)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void b(int branch_offset, Condition cond=al)
void JumpIfSmi(Register value, Label *smi_label)
void Ldr(const FPRegister &ft, double imm)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
static Code * GetPreAgedCodeAgeStub(Isolate *isolate)
TypeImpl< ZoneTypeConfig > Type
bool AllowThisStubCall(CodeStub *stub)
static Register Create(unsigned code, unsigned size)
static const intptr_t kPageAlignmentMask
void EnterFrame(StackFrame::Type type, bool load_constant_pool=false)
void Peek(const CPURegister &dst, const Operand &offset)
void PopSafepointRegistersAndDoubles()
void LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context)
const Register & StackPointer() const
static void Emit(MacroAssembler *masm, const Register ®, const Label *smi_check)
const uint32_t kIsNotInternalizedMask
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
void EnumLengthSmi(Register dst, Register map)
void Fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void Logical(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
const LowDwVfpRegister d15
static const int kHandlerTableOffset
void ThrowIf(Condition cc, BailoutReason reason)
#define ASSERT(condition)
void PushMultipleTimes(CPURegister src, Register count)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success)
static const int kContextOffset
void AssertNotSmi(Register object)
const int kPointerSizeLog2
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void SetRecordedAstId(TypeFeedbackId ast_id)
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
static const int kFlagsOffset
static const int kNativeContextOffset
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
static const char * kBlackBitPattern
static int OffsetOfFunctionWithId(Builtins::JavaScript id)
void NumberOfOwnDescriptors(Register dst, Register map)
static const int kNextOffset
const uint32_t kStringRepresentationMask
void JumpIfMinusZero(DoubleRegister input, Label *on_negative_zero)
void csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void CompareAndSplit(const Register &lhs, const Operand &rhs, Condition cond, Label *if_true, Label *if_false, Label *fall_through)
void JumpIfRoot(const Register &obj, Heap::RootListIndex index, Label *if_equal)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void PopXRegList(RegList regs)
void movk(const Register &rd, uint64_t imm, int shift=-1)
void Bic(const Register &rd, const Register &rn, const Operand &operand)
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
void PushXRegList(RegList regs)
static const int kEntrySize
const bool FLAG_enable_slow_asserts
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
static const int kDescriptorsOffset
int WhichPowerOf2(uint32_t x)
const intptr_t kObjectAlignmentMask
void Load(Register dst, const MemOperand &src, Representation r)
static Operand UntagSmiAndScale(Register smi, int scale)
bool AreSameSizeAndType(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoCPUReg, const CPURegister ®4=NoCPUReg, const CPURegister ®5=NoCPUReg, const CPURegister ®6=NoCPUReg, const CPURegister ®7=NoCPUReg, const CPURegister ®8=NoCPUReg)
static const int kContextOffset
const LowDwVfpRegister d10
void LoadStoreMacro(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
const Instr kImmExceptionIsPrintf
const intptr_t kHeapObjectTagMask
void AssertSmi(Register object)
void CompareRoot(Register obj, Heap::RootListIndex index)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
const unsigned kXRegSizeInBitsLog2
void LoadTrueFalseRoots(Register true_root, Register false_root)
SeqStringSetCharCheckIndexType
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, uint32_t encoding_mask)
kInstanceClassNameOffset flag
void EnumLengthUntagged(Register dst, Register map)
Handle< Object > CodeObject()
void Abort(BailoutReason msg)
void mvn(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
const unsigned kWRegSizeInBits
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
void stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
static bool IsImmMovz(uint64_t imm, unsigned reg_size)
const uint32_t kNotStringTag
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
void JumpIfNotInNewSpace(Register object, Register scratch, Label *branch)
static const int kCallerPCOffset
static const uint32_t kBytesPerCellLog2
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
DwVfpRegister DoubleRegister
void ExitFrameRestoreFPRegs()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void Cbnz(const Register &rt, Label *label)
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void AssertHasValidColor(const Register ®)
static const size_t kHeaderSize
void cbz(const Register &rt, Label *label)
static int ActivationFrameAlignment()
const LowDwVfpRegister d14
void LoadStore(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
static const int kLengthOffset
static const int kCapacityOffset
void CheckFastElements(Register map, Register scratch, Label *fail)
void TestForMinusZero(DoubleRegister input)
bool * is_profiling_address()
static const int kValueOffset
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Condition InvertCondition(Condition cond)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void br(const Register &xn)
void LoadGlobalFunction(int index, Register function)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
static const int kDontAdaptArgumentsSentinel
const LowDwVfpRegister d13
static const int kCallerFPOffset
static const int kStoreBufferOverflowBit
InlineSmiCheckInfo(Address info)
static const int8_t kMaximumBitField2FastHoleyElementValue
const uint32_t kIsIndirectStringMask
const unsigned kPrintfLength
static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size)
void AddSub(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void SetStackPointer(const Register &stack_pointer)
static bool IsImmAddSub(int64_t immediate)
void csel(const Register &rd, const Register &rn, const Register &rm, Condition cond)
static const int kMaxCachedArrayIndexLength
void TestAndBranchIfAnySet(const Register ®, const uint64_t bit_pattern, Label *label)
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), Condition cond=al)
void CallCFunction(ExternalReference function, int num_arguments)
void Fcmp(const FPRegister &fn, const FPRegister &fm)
const unsigned kInstructionSize
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
static bool IsImmConditionalCompare(int64_t immediate)
void JumpIfHeapNumber(Register object, Label *on_heap_number, Register heap_number_map=NoReg)
void Jump(Register target, Condition cond=al)
bool IsAligned(T value, U alignment)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void TruncateHeapNumberToI(Register result, Register object)
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void Fmov(FPRegister fd, FPRegister fn)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void LoadHeapObject(Register dst, Handle< HeapObject > object)
const RegList kCalleeSaved
void Throw(Register value)
void Fcvtzs(const Register &rd, const FPRegister &fn)
static const int kMaxRegularHeapObjectSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void ThrowIfSmi(const Register &value, BailoutReason reason)
void JumpIfNotObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_not_object)
static ExternalReference GetAllocationLimitReference(Isolate *isolate, AllocationFlags flags)
static const uint32_t kShift
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void str(Register src, const MemOperand &dst, Condition cond=al)
bool NeedExtraInstructionsOrRegisterBranch(Label *label, ImmBranchType branch_type)
Handle< Cell > NewCell(Handle< Object > value)
static const int kHeaderSize
static const int kElementsOffset
void movz(const Register &rd, uint64_t imm, int shift=-1)
void PopCPURegList(CPURegList registers)
int TenToThe(int exponent)
void SmiUntagToDouble(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
void LoadContext(Register dst, int context_chain_length)
static const int kArrayIndexValueBits
void Cset(const Register &rd, Condition cond)
static int CallSize(Register target, Condition cond=al)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
static const int kSPOffset
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void AssertFastElements(Register elements)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
const uint32_t kInternalizedTag
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void PushSafepointRegistersAndDoubles()
void set_list(RegList new_list)
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
const LowDwVfpRegister d9
void ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Cbz(const Register &rt, Label *label)
void Drop(int count, Condition cond=al)
static const int kCallerSPDisplacement
void AddSubWithCarry(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void Stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void ldr(Register dst, const MemOperand &src, Condition cond=al)
static const int kHeaderSize
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CopyFields(Register dst, Register src, LowDwVfpRegister double_scratch, int field_count)
static const int kMapOffset
static const int kFixedFrameSizeFromFp
void Abs(const Register &rd, const Register &rm, Label *is_not_representable=NULL, Label *is_representable=NULL)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void Combine(const CPURegList &other)
void TruncateDoubleToI(Register result, DwVfpRegister double_input)
const uint32_t kIsNotStringMask
static const intptr_t kLiveBytesOffset
void LoadObject(Register result, Handle< Object > object)
static bool IsImmMovn(uint64_t imm, unsigned reg_size)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
const char * GetBailoutReason(BailoutReason reason)
static const int kLengthOffset
void Printf(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void ThrowUncatchable(Register value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
void RecordComment(const char *msg)
MemOperand FieldMemOperand(Register object, int offset)
CpuProfiler * cpu_profiler() const
static const int kContextOffset
static const int kHasNonInstancePrototype
const int kNumSafepointRegisters
bool emit_debug_code() const
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
static ExternalReference GetAllocationTopReference(Isolate *isolate, AllocationFlags flags)
void GetNumberHash(Register t0, Register scratch)
void hint(SystemHint code)
static FPRegister DRegFromCode(unsigned code)
static const int kLastExitFrameField
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
static const int kFormalParameterCountOffset
static const int kBitField3Offset
void TryConvertDoubleToInt64(Register as_int, FPRegister value, FPRegister scratch_d, Label *on_successful_conversion=NULL, Label *on_failed_conversion=NULL)
void Msr(SystemRegister sysreg, const Register &rt)
~UseScratchRegisterScope()
void Mrs(const Register &rt, SystemRegister sysreg)
const intptr_t kPointerAlignment
void EmitStringData(const char *string)
static Register XRegFromCode(unsigned code)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
Register UnsafeAcquire(const Register ®)
static const int kHeaderSize
void BumpSystemStackPointer(const Operand &space)
void PokePair(const CPURegister &src1, const CPURegister &src2, int offset)
std::multimap< int, FarBranchInfo > unresolved_branches_
void JumpIfNotRoot(const Register &obj, Heap::RootListIndex index, Label *if_not_equal)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void Cmp(const Register &rn, const Operand &operand)
void Tst(const Register &rn, const Operand &operand)
void UndoAllocationInNewSpace(Register object, Register scratch)
static int32_t ImmBranchRange(ImmBranchType branch_type)
void PeekPair(const CPURegister &dst1, const CPURegister &dst2, int offset)
void Prologue(PrologueFrameMode frame_mode)
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
#define ASSERT_EQ(v1, v2)
static const int kPointersFromHereAreInterestingMask
const unsigned kWordSizeInBytesLog2
static const int kElementsKindShift
void Call(Register target, Condition cond=al)
void Scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
static const int kConstructorOffset
const uint32_t kOneByteStringTag
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void AssertStackConsistency()
static double canonical_not_the_hole_nan_as_double()
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
static int ActivationFrameAlignment()
void CheckPageFlagClear(const Register &object, const Register &scratch, int mask, Label *if_all_clear)
void CheckRegisterIsClear(Register reg, BailoutReason reason)
void Check(Condition cond, BailoutReason reason)
void InlineData(uint64_t data)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void PushCPURegList(CPURegList registers)
PositionsRecorder * positions_recorder()
void Assert(Condition cond, BailoutReason reason)
void movn(const Register &rd, uint64_t imm, int shift=-1)
const uint32_t kDebugZapValue
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void Claim(uint64_t count, uint64_t unit_size=kXRegSize)
void Br(const Register &xn)
const uint32_t kIsIndirectStringTag
static const int8_t kMaximumBitField2FastHoleySmiElementValue
void Adds(const Register &rd, const Register &rn, const Operand &operand)
static const int kPrototypeOffset
const LowDwVfpRegister d12
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, LowDwVfpRegister double_scratch, Label *fail, int elements_offset=0)
void AssertSizeOfCodeGeneratedSince(const Label *label, ptrdiff_t size)
void TailCallStub(CodeStub *stub, Condition cond=al)
void Smull(const Register &rd, const Register &rn, const Register &rm)
static const int kElementsStartOffset
void TestAndSplit(const Register ®, uint64_t bit_pattern, Label *if_all_clear, Label *if_any_set, Label *fall_through)
static const int kHashShift
void AssertName(Register object)
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
const LowDwVfpRegister d1
static void EmitCodeAgeSequence(Assembler *assm, Code *stub)
static const char * kImpossibleBitPattern
void JumpIfEitherIsNotSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *failure, SmiCheckType smi_check=DO_SMI_CHECK)
static const uint32_t kBitsPerCellLog2
void PopCalleeSavedRegisters()
int LeaveFrame(StackFrame::Type type)
static const int kPointersToHereAreInterestingMask
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
static const int kNativeContextOffset
void Asr(const Register &rd, const Register &rn, unsigned shift)
MemOperand ContextMemOperand(Register context, int index)
static bool IsYoungSequence(byte *sequence)
static bool IsImmLogical(uint64_t value, unsigned width, unsigned *n, unsigned *imm_s, unsigned *imm_r)
void IndexFromHash(Register hash, Register index)
void adr(const Register &rd, Label *label)
static const int kCompilerHintsOffset
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
static Operand UntagSmi(Register smi)
static const int kEmptyHashField
static const int kSharedFunctionInfoOffset
void EnterExitFrame(bool save_doubles, int stack_space=0)
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void AssertIsString(const Register &object)
const LowDwVfpRegister d8
void CheckPageFlagSet(const Register &object, const Register &scratch, int mask, Label *if_any_set)
void EmitFrameSetupForCodeAgePatching()
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
static const int kBitField2Offset
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void LogicalImmediate(const Register &rd, const Register &rn, unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
void Blr(const Register &xn)
Register AcquireSameSizeAs(const Register ®)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
static CPURegList GetSafepointSavedRegisters()
void EmitShift(const Register &rd, const Register &rn, Shift shift, unsigned amount)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
const intptr_t kDoubleAlignment
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
static InstructionSequence * At(Address address)
void Fcvt(const FPRegister &fd, const FPRegister &fn)
MemOperand GlobalObjectMemOperand()
void LoadLiteral(const CPURegister &rt, int offset_from_pc)
static const int kCodeOffset
const uint32_t kStringEncodingMask
bool is_uintn(int64_t x, unsigned n)
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)
void CheckEnumCache(Register null_value, Label *call_runtime)
static const int kInstanceTypeOffset
void PopSafepointRegisters()
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required)
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
static const int kCallApiFunctionSpillSpace
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)