32 #if V8_TARGET_ARCH_ARM
45 : Assembler(arg_isolate, buffer, size),
46 generating_stub_(
false),
48 if (isolate() !=
NULL) {
49 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
55 void MacroAssembler::Jump(Register target,
Condition cond) {
60 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
62 ASSERT(RelocInfo::IsCodeTarget(rmode));
63 mov(
pc, Operand(target, rmode),
LeaveCC, cond);
67 void MacroAssembler::Jump(
Address target, RelocInfo::Mode rmode,
69 ASSERT(!RelocInfo::IsCodeTarget(rmode));
70 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
74 void MacroAssembler::Jump(Handle<Code>
code, RelocInfo::Mode rmode,
76 ASSERT(RelocInfo::IsCodeTarget(rmode));
79 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
83 int MacroAssembler::CallSize(Register target,
Condition cond) {
88 void MacroAssembler::Call(Register target,
Condition cond) {
90 BlockConstPoolScope block_const_pool(
this);
94 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
98 int MacroAssembler::CallSize(
100 int size = 2 * kInstrSize;
102 intptr_t immediate =
reinterpret_cast<intptr_t
>(target);
103 if (!Operand(immediate, rmode).is_single_instruction(
this, mov_instr)) {
110 int MacroAssembler::CallSizeNotPredictableCodeSize(
112 int size = 2 * kInstrSize;
114 intptr_t immediate =
reinterpret_cast<intptr_t
>(target);
115 if (!Operand(immediate, rmode).is_single_instruction(
NULL, mov_instr)) {
122 void MacroAssembler::Call(
Address target,
123 RelocInfo::Mode rmode,
127 BlockConstPoolScope block_const_pool(
this);
131 bool old_predictable_code_size = predictable_code_size();
133 set_predictable_code_size(
true);
157 positions_recorder()->WriteRecordedPositions();
159 mov(
ip, Operand(reinterpret_cast<int32_t>(target), rmode));
162 ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
164 set_predictable_code_size(old_predictable_code_size);
169 int MacroAssembler::CallSize(Handle<Code> code,
170 RelocInfo::Mode rmode,
171 TypeFeedbackId ast_id,
174 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
178 void MacroAssembler::Call(Handle<Code> code,
179 RelocInfo::Mode rmode,
180 TypeFeedbackId ast_id,
185 ASSERT(RelocInfo::IsCodeTarget(rmode));
186 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
187 SetRecordedAstId(ast_id);
188 rmode = RelocInfo::CODE_TARGET_WITH_ID;
192 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
196 void MacroAssembler::Ret(
Condition cond) {
201 void MacroAssembler::Drop(
int count,
Condition cond) {
208 void MacroAssembler::Ret(
int drop,
Condition cond) {
214 void MacroAssembler::Swap(Register reg1,
219 eor(reg1, reg1, Operand(reg2),
LeaveCC, cond);
220 eor(reg2, reg2, Operand(reg1),
LeaveCC, cond);
221 eor(reg1, reg1, Operand(reg2),
LeaveCC, cond);
223 mov(scratch, reg1,
LeaveCC, cond);
224 mov(reg1, reg2,
LeaveCC, cond);
225 mov(reg2, scratch,
LeaveCC, cond);
230 void MacroAssembler::Call(Label* target) {
235 void MacroAssembler::Push(Handle<Object>
handle) {
236 mov(
ip, Operand(handle));
241 void MacroAssembler::Move(Register dst, Handle<Object> value) {
243 if (value->IsSmi()) {
244 mov(dst, Operand(value));
246 ASSERT(value->IsHeapObject());
247 if (isolate()->heap()->InNewSpace(*value)) {
248 Handle<Cell> cell = isolate()->factory()->NewCell(value);
249 mov(dst, Operand(cell));
252 mov(dst, Operand(value));
258 void MacroAssembler::Move(Register dst, Register src,
Condition cond) {
265 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
272 void MacroAssembler::And(Register dst, Register src1,
const Operand& src2,
274 if (!src2.is_reg() &&
275 !src2.must_output_reloc_info(
this) &&
276 src2.immediate() == 0) {
277 mov(dst, Operand::Zero(),
LeaveCC, cond);
278 }
else if (!src2.is_single_instruction(
this) &&
279 !src2.must_output_reloc_info(
this) &&
280 CpuFeatures::IsSupported(
ARMv7) &&
283 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
285 and_(dst, src1, src2,
LeaveCC, cond);
290 void MacroAssembler::Ubfx(Register dst, Register src1,
int lsb,
int width,
293 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
294 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
295 and_(dst, src1, Operand(mask),
LeaveCC, cond);
297 mov(dst, Operand(dst,
LSR, lsb),
LeaveCC, cond);
300 ubfx(dst, src1, lsb, width, cond);
305 void MacroAssembler::Sbfx(Register dst, Register src1,
int lsb,
int width,
308 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
309 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
310 and_(dst, src1, Operand(mask),
LeaveCC, cond);
311 int shift_up = 32 - lsb - width;
312 int shift_down = lsb + shift_up;
314 mov(dst, Operand(dst,
LSL, shift_up),
LeaveCC, cond);
316 if (shift_down != 0) {
317 mov(dst, Operand(dst,
ASR, shift_down),
LeaveCC, cond);
320 sbfx(dst, src1, lsb, width, cond);
325 void MacroAssembler::Bfi(Register dst,
331 ASSERT(0 <= lsb && lsb < 32);
332 ASSERT(0 <= width && width < 32);
335 if (width == 0)
return;
336 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
337 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
338 bic(dst, dst, Operand(mask));
339 and_(scratch, src, Operand((1 << width) - 1));
340 mov(scratch, Operand(scratch,
LSL, lsb));
341 orr(dst, dst, scratch);
343 bfi(dst, src, lsb, width, cond);
348 void MacroAssembler::Bfc(Register dst, Register src,
int lsb,
int width,
351 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
352 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
353 bic(dst, src, Operand(mask));
355 Move(dst, src, cond);
356 bfc(dst, lsb, width, cond);
361 void MacroAssembler::Usat(Register dst,
int satpos,
const Operand& src,
363 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
365 ASSERT((satpos >= 0) && (satpos <= 31));
369 ASSERT((src.shift_op() ==
ASR) || (src.shift_op() ==
LSL));
373 int satval = (1 << satpos) - 1;
378 if (!(src.is_reg() && dst.is(src.rm()))) {
381 tst(dst, Operand(~satval));
387 usat(dst, satpos, src, cond);
396 if (r.IsInteger8()) {
398 }
else if (r.IsUInteger8()) {
400 }
else if (r.IsInteger16()) {
402 }
else if (r.IsUInteger16()) {
410 void MacroAssembler::Store(Register src,
414 if (r.IsInteger8() || r.IsUInteger8()) {
416 }
else if (r.IsInteger16() || r.IsUInteger16()) {
424 void MacroAssembler::LoadRoot(Register destination,
425 Heap::RootListIndex index,
428 isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
429 !predictable_code_size()) {
432 Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
433 mov(destination, Operand(root),
LeaveCC, cond);
440 void MacroAssembler::StoreRoot(Register source,
441 Heap::RootListIndex index,
447 void MacroAssembler::InNewSpace(Register
object,
452 and_(scratch,
object, Operand(ExternalReference::new_space_mask(isolate())));
453 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
458 void MacroAssembler::RecordWriteField(
473 JumpIfSmi(value, &done);
481 if (emit_debug_code()) {
485 stop(
"Unaligned cell in write barrier");
494 remembered_set_action,
501 if (emit_debug_code()) {
502 mov(value, Operand(BitCast<int32_t>(
kZapValue + 4)));
503 mov(dst, Operand(BitCast<int32_t>(
kZapValue + 8)));
511 void MacroAssembler::RecordWrite(Register
object,
518 ASSERT(!
object.is(value));
519 if (emit_debug_code()) {
522 Check(
eq, kWrongAddressOrValuePassedToRecordWrite);
526 isolate()->counters()->write_barriers_static()->Increment();
534 JumpIfSmi(value, &done);
539 MemoryChunk::kPointersToHereAreInterestingMask,
542 CheckPageFlag(
object,
544 MemoryChunk::kPointersFromHereAreInterestingMask,
552 RecordWriteStub stub(
object, value, address, remembered_set_action, fp_mode);
562 if (emit_debug_code()) {
563 mov(address, Operand(BitCast<int32_t>(
kZapValue + 12)));
564 mov(value, Operand(BitCast<int32_t>(
kZapValue + 16)));
569 void MacroAssembler::RememberedSetHelper(Register
object,
573 RememberedSetFinalAction and_then) {
575 if (emit_debug_code()) {
577 JumpIfNotInNewSpace(
object, scratch, &ok);
578 stop(
"Remembered set pointer is in new space");
582 ExternalReference store_buffer =
583 ExternalReference::store_buffer_top(isolate());
584 mov(
ip, Operand(store_buffer));
592 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
593 if (and_then == kFallThroughAtEnd) {
596 ASSERT(and_then == kReturnAtEnd);
600 StoreBufferOverflowStub store_buffer_overflow =
601 StoreBufferOverflowStub(fp_mode);
602 CallStub(&store_buffer_overflow);
605 if (and_then == kReturnAtEnd) {
611 void MacroAssembler::PushFixedFrame(Register marker_reg) {
612 ASSERT(!marker_reg.is_valid() || marker_reg.code() <
cp.code());
613 stm(
db_w,
sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
615 (FLAG_enable_ool_constant_pool ?
pp.
bit() : 0) |
621 void MacroAssembler::PopFixedFrame(Register marker_reg) {
622 ASSERT(!marker_reg.is_valid() || marker_reg.code() <
cp.code());
623 ldm(
ia_w,
sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
625 (FLAG_enable_ool_constant_pool ?
pp.
bit() : 0) |
632 void MacroAssembler::PushSafepointRegisters() {
644 void MacroAssembler::PopSafepointRegisters() {
651 void MacroAssembler::PushSafepointRegistersAndDoubles() {
653 ASSERT(!Serializer::enabled());
654 PushSafepointRegisters();
657 ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
665 void MacroAssembler::PopSafepointRegistersAndDoubles() {
667 ASSERT(!Serializer::enabled());
670 ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
675 PopSafepointRegisters();
678 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
680 str(src, SafepointRegistersAndDoublesSlot(dst));
684 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
685 str(src, SafepointRegisterSlot(dst));
689 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
690 ldr(dst, SafepointRegisterSlot(src));
694 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
702 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
707 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
709 ASSERT(!Serializer::enabled());
711 int doubles_size = DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize;
712 int register_offset = SafepointRegisterStackIndex(reg.code()) *
kPointerSize;
717 void MacroAssembler::Ldrd(Register dst1, Register dst2,
727 if (CpuFeatures::IsSupported(
ARMv7) && !predictable_code_size() &&
728 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
729 CpuFeatureScope scope(
this,
ARMv7);
730 ldrd(dst1, dst2, src, cond);
734 src2.set_offset(src2.offset() + 4);
735 if (dst1.is(src.rn())) {
736 ldr(dst2, src2, cond);
737 ldr(dst1, src, cond);
739 ldr(dst1, src, cond);
740 ldr(dst2, src2, cond);
744 if (dst1.is(src.rn())) {
746 ldr(dst1, src, cond);
749 src2.set_offset(src2.offset() - 4);
751 ldr(dst2, src2, cond);
758 void MacroAssembler::Strd(Register src1, Register src2,
768 if (CpuFeatures::IsSupported(
ARMv7) && !predictable_code_size() &&
769 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
770 CpuFeatureScope scope(
this,
ARMv7);
771 strd(src1, src2, dst, cond);
775 dst2.set_offset(dst2.offset() + 4);
776 str(src1, dst, cond);
777 str(src2, dst2, cond);
780 dst2.set_offset(dst2.offset() - 4);
782 str(src2, dst2, cond);
788 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
800 void MacroAssembler::VFPCanonicalizeNaN(
const DwVfpRegister dst,
801 const DwVfpRegister src,
807 void MacroAssembler::VFPCompareAndSetFlags(
const DwVfpRegister src1,
808 const DwVfpRegister src2,
811 VFPCompareAndLoadFlags(src1, src2,
pc, cond);
814 void MacroAssembler::VFPCompareAndSetFlags(
const DwVfpRegister src1,
818 VFPCompareAndLoadFlags(src1, src2,
pc, cond);
822 void MacroAssembler::VFPCompareAndLoadFlags(
const DwVfpRegister src1,
823 const DwVfpRegister src2,
824 const Register fpscr_flags,
827 vcmp(src1, src2, cond);
828 vmrs(fpscr_flags, cond);
831 void MacroAssembler::VFPCompareAndLoadFlags(
const DwVfpRegister src1,
833 const Register fpscr_flags,
836 vcmp(src1, src2, cond);
837 vmrs(fpscr_flags, cond);
840 void MacroAssembler::Vmov(
const DwVfpRegister dst,
842 const Register scratch) {
843 static const DoubleRepresentation minus_zero(-0.0);
844 static const DoubleRepresentation
zero(0.0);
845 DoubleRepresentation value_rep(imm);
847 if (value_rep ==
zero) {
849 }
else if (value_rep == minus_zero) {
852 vmov(dst, imm, scratch);
857 void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
858 if (src.code() < 16) {
859 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
860 vmov(dst, loc.high());
867 void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
868 if (dst.code() < 16) {
869 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
870 vmov(loc.high(), src);
877 void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
878 if (src.code() < 16) {
879 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
880 vmov(dst, loc.low());
887 void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
888 if (dst.code() < 16) {
889 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
890 vmov(loc.low(), src);
897 void MacroAssembler::LoadConstantPoolPointerRegister() {
898 if (FLAG_enable_ool_constant_pool) {
899 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
900 pc_offset() - Instruction::kPCReadOffset;
901 ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
912 add(
fp,
sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
914 PredictableCodeSizeScope predictible_code_size_scope(
915 this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
918 if (isolate()->IsCodePreAgingActive()) {
920 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
921 add(
r0,
pc, Operand(-8));
923 emit_code_stub_address(stub);
928 add(
fp,
sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
931 if (FLAG_enable_ool_constant_pool) {
932 LoadConstantPoolPointerRegister();
933 set_constant_pool_available(
true);
939 bool load_constant_pool) {
942 if (FLAG_enable_ool_constant_pool && load_constant_pool) {
943 LoadConstantPoolPointerRegister();
945 mov(
ip, Operand(Smi::FromInt(type)));
947 mov(
ip, Operand(CodeObject()));
951 Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
964 if (FLAG_enable_ool_constant_pool) {
965 add(
sp,
fp, Operand(StandardFrameConstants::kConstantPoolOffset));
966 frame_ends = pc_offset();
970 frame_ends = pc_offset();
977 void MacroAssembler::EnterExitFrame(
bool save_doubles,
int stack_space) {
983 mov(
fp, Operand(
sp));
985 sub(
sp,
sp, Operand(ExitFrameConstants::kFrameSize));
986 if (emit_debug_code()) {
987 mov(
ip, Operand::Zero());
990 if (FLAG_enable_ool_constant_pool) {
992 LoadConstantPoolPointerRegister();
994 mov(
ip, Operand(CodeObject()));
998 mov(
ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1000 mov(
ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1015 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1017 if (frame_alignment > 0) {
1019 and_(
sp,
sp, Operand(-frame_alignment));
1029 void MacroAssembler::InitializeNewString(Register
string,
1031 Heap::RootListIndex map_index,
1033 Register scratch2) {
1034 SmiTag(scratch1, length);
1035 LoadRoot(scratch2, map_index);
1037 mov(scratch1, Operand(String::kEmptyHashField));
1043 int MacroAssembler::ActivationFrameAlignment() {
1044 #if V8_HOST_ARCH_ARM
1049 return OS::ActivationFrameAlignment();
1050 #else // V8_HOST_ARCH_ARM
1055 return FLAG_sim_stack_alignment;
1056 #endif // V8_HOST_ARCH_ARM
1060 void MacroAssembler::LeaveExitFrame(
bool save_doubles,
1061 Register argument_count,
1062 bool restore_context) {
1063 ConstantPoolUnavailableScope constant_pool_unavailable(
this);
1068 const int offset = ExitFrameConstants::kFrameSize;
1070 Operand(offset + DwVfpRegister::kMaxNumRegisters *
kDoubleSize));
1071 RestoreFPRegs(
r3,
ip);
1075 mov(
r3, Operand::Zero());
1076 mov(
ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1080 if (restore_context) {
1081 mov(
ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1085 mov(
ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1090 if (FLAG_enable_ool_constant_pool) {
1091 ldr(
pp,
MemOperand(
fp, ExitFrameConstants::kConstantPoolOffset));
1093 mov(
sp, Operand(
fp));
1095 if (argument_count.is_valid()) {
1101 void MacroAssembler::MovFromFloatResult(
const DwVfpRegister dst) {
1102 if (use_eabi_hardfloat()) {
1111 void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1112 MovFromFloatResult(dst);
1116 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
1117 const ParameterCount& actual,
1118 Handle<Code> code_constant,
1121 bool* definitely_mismatches,
1123 const CallWrapper& call_wrapper) {
1124 bool definitely_matches =
false;
1125 *definitely_mismatches =
false;
1126 Label regular_invoke;
1137 ASSERT(actual.is_immediate() || actual.reg().is(
r0));
1138 ASSERT(expected.is_immediate() || expected.reg().is(
r2));
1139 ASSERT((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(
r3));
1141 if (expected.is_immediate()) {
1142 ASSERT(actual.is_immediate());
1143 if (expected.immediate() == actual.immediate()) {
1144 definitely_matches =
true;
1146 mov(
r0, Operand(actual.immediate()));
1147 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1148 if (expected.immediate() == sentinel) {
1153 definitely_matches =
true;
1155 *definitely_mismatches =
true;
1156 mov(
r2, Operand(expected.immediate()));
1160 if (actual.is_immediate()) {
1161 cmp(expected.reg(), Operand(actual.immediate()));
1162 b(
eq, ®ular_invoke);
1163 mov(
r0, Operand(actual.immediate()));
1165 cmp(expected.reg(), Operand(actual.reg()));
1166 b(
eq, ®ular_invoke);
1170 if (!definitely_matches) {
1171 if (!code_constant.is_null()) {
1172 mov(
r3, Operand(code_constant));
1176 Handle<Code> adaptor =
1177 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1179 call_wrapper.BeforeCall(CallSize(adaptor));
1181 call_wrapper.AfterCall();
1182 if (!*definitely_mismatches) {
1186 Jump(adaptor, RelocInfo::CODE_TARGET);
1188 bind(®ular_invoke);
1193 void MacroAssembler::InvokeCode(Register code,
1194 const ParameterCount& expected,
1195 const ParameterCount& actual,
1197 const CallWrapper& call_wrapper) {
1202 bool definitely_mismatches =
false;
1203 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1204 &done, &definitely_mismatches, flag,
1206 if (!definitely_mismatches) {
1208 call_wrapper.BeforeCall(CallSize(code));
1210 call_wrapper.AfterCall();
1223 void MacroAssembler::InvokeFunction(Register fun,
1224 const ParameterCount& actual,
1226 const CallWrapper& call_wrapper) {
1233 Register expected_reg =
r2;
1234 Register code_reg =
r3;
1240 SharedFunctionInfo::kFormalParameterCountOffset));
1241 SmiUntag(expected_reg);
1245 ParameterCount expected(expected_reg);
1246 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1250 void MacroAssembler::InvokeFunction(Register
function,
1251 const ParameterCount& expected,
1252 const ParameterCount& actual,
1254 const CallWrapper& call_wrapper) {
1268 InvokeCode(
r3, expected, actual, flag, call_wrapper);
1272 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
1273 const ParameterCount& expected,
1274 const ParameterCount& actual,
1276 const CallWrapper& call_wrapper) {
1278 InvokeFunction(
r1, expected, actual, flag, call_wrapper);
1282 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1287 IsInstanceJSObjectType(map, scratch, fail);
1291 void MacroAssembler::IsInstanceJSObjectType(Register map,
1302 void MacroAssembler::IsObjectJSStringType(Register
object,
1314 void MacroAssembler::IsObjectNameType(Register
object,
1324 #ifdef ENABLE_DEBUGGER_SUPPORT
1325 void MacroAssembler::DebugBreak() {
1326 mov(
r0, Operand::Zero());
1327 mov(
r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1329 ASSERT(AllowThisStubCall(&ces));
1335 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1336 int handler_index) {
1349 StackHandler::IndexField::encode(handler_index) |
1350 StackHandler::KindField::encode(kind);
1351 mov(
r5, Operand(CodeObject()));
1352 mov(
r6, Operand(state));
1355 if (kind == StackHandler::JS_ENTRY) {
1356 mov(
cp, Operand(Smi::FromInt(0)));
1357 mov(
ip, Operand::Zero());
1364 mov(
r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1372 void MacroAssembler::PopTryHandler() {
1375 mov(
ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1381 void MacroAssembler::JumpToHandlerEntry() {
1386 ConstantPoolUnavailableScope constant_pool_unavailable(
this);
1387 if (FLAG_enable_ool_constant_pool) {
1392 mov(
r2, Operand(
r2,
LSR, StackHandler::kKindWidth));
1395 add(
pc,
r1, Operand::SmiUntag(
r2));
1399 void MacroAssembler::Throw(Register value) {
1409 if (!value.is(
r0)) {
1413 mov(
r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1429 JumpToHandlerEntry();
1433 void MacroAssembler::ThrowUncatchable(Register value) {
1443 if (!value.is(
r0)) {
1447 mov(
r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1451 Label fetch_next, check_kind;
1459 tst(
r2, Operand(StackHandler::KindField::kMask));
1469 JumpToHandlerEntry();
1473 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1476 Label same_contexts;
1478 ASSERT(!holder_reg.is(scratch));
1483 ldr(scratch,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
1486 cmp(scratch, Operand::Zero());
1487 Check(
ne, kWeShouldNotHaveAnEmptyLexicalContext);
1492 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX *
kPointerSize;
1494 ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1497 if (emit_debug_code()) {
1503 LoadRoot(
ip, Heap::kNativeContextMapRootIndex);
1504 cmp(holder_reg,
ip);
1505 Check(
eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1511 cmp(scratch, Operand(
ip));
1512 b(
eq, &same_contexts);
1515 if (emit_debug_code()) {
1519 mov(holder_reg,
ip);
1520 LoadRoot(
ip, Heap::kNullValueRootIndex);
1521 cmp(holder_reg,
ip);
1522 Check(
ne, kJSGlobalProxyContextShouldNotBeNull);
1525 LoadRoot(
ip, Heap::kNativeContextMapRootIndex);
1526 cmp(holder_reg,
ip);
1527 Check(
eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1537 int token_offset = Context::kHeaderSize +
1542 cmp(scratch, Operand(
ip));
1545 bind(&same_contexts);
1552 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1554 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1558 eor(t0, t0, Operand(scratch));
1564 mvn(scratch, Operand(t0));
1565 add(t0, scratch, Operand(t0,
LSL, 15));
1567 eor(t0, t0, Operand(t0,
LSR, 12));
1569 add(t0, t0, Operand(t0,
LSL, 2));
1571 eor(t0, t0, Operand(t0,
LSR, 4));
1573 mov(scratch, Operand(t0,
LSL, 11));
1574 add(t0, t0, Operand(t0,
LSL, 3));
1575 add(t0, t0, scratch);
1577 eor(t0, t0, Operand(t0,
LSR, 16));
1581 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1610 GetNumberHash(t0, t1);
1613 ldr(t1,
FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1615 sub(t1, t1, Operand(1));
1618 for (
int i = 0; i < kNumberDictionaryProbes; i++) {
1623 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1625 and_(t2, t2, Operand(t1));
1628 ASSERT(SeededNumberDictionary::kEntrySize == 3);
1629 add(t2, t2, Operand(t2,
LSL, 1));
1634 cmp(key, Operand(
ip));
1635 if (i != kNumberDictionaryProbes - 1) {
1645 const int kDetailsOffset =
1646 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
1648 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1652 const int kValueOffset =
1653 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
1658 void MacroAssembler::Allocate(
int object_size,
1664 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1665 if (!FLAG_inline_new) {
1666 if (emit_debug_code()) {
1668 mov(result, Operand(0x7091));
1669 mov(scratch1, Operand(0x7191));
1670 mov(scratch2, Operand(0x7291));
1676 ASSERT(!result.is(scratch1));
1677 ASSERT(!result.is(scratch2));
1678 ASSERT(!scratch1.is(scratch2));
1692 ExternalReference allocation_top =
1693 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1694 ExternalReference allocation_limit =
1695 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1698 reinterpret_cast<intptr_t
>(allocation_top.address());
1700 reinterpret_cast<intptr_t
>(allocation_limit.address());
1701 ASSERT((limit - top) == kPointerSize);
1705 Register topaddr = scratch1;
1706 mov(topaddr, Operand(allocation_top));
1712 ldm(
ia, topaddr, result.bit() |
ip.
bit());
1714 if (emit_debug_code()) {
1720 Check(
eq, kUnexpectedAllocationTop);
1735 cmp(result, Operand(
ip));
1738 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1747 Register source = result;
1750 while (object_size != 0) {
1751 if (((object_size >> shift) & 0x03) == 0) {
1754 int bits = object_size & (0xff <<
shift);
1755 object_size -= bits;
1757 Operand bits_operand(bits);
1758 ASSERT(bits_operand.is_single_instruction(
this));
1759 add(scratch2, source, bits_operand,
SetCC, cond);
1765 cmp(scratch2, Operand(
ip));
1776 void MacroAssembler::Allocate(Register object_size,
1782 if (!FLAG_inline_new) {
1783 if (emit_debug_code()) {
1785 mov(result, Operand(0x7091));
1786 mov(scratch1, Operand(0x7191));
1787 mov(scratch2, Operand(0x7291));
1795 ASSERT(!result.is(scratch1));
1796 ASSERT(!result.is(scratch2));
1797 ASSERT(!scratch1.is(scratch2));
1807 ExternalReference allocation_top =
1808 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1809 ExternalReference allocation_limit =
1810 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1812 reinterpret_cast<intptr_t
>(allocation_top.address());
1814 reinterpret_cast<intptr_t
>(allocation_limit.address());
1815 ASSERT((limit - top) == kPointerSize);
1819 Register topaddr = scratch1;
1820 mov(topaddr, Operand(allocation_top));
1824 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1826 ldm(
ia, topaddr, result.bit() |
ip.
bit());
1828 if (emit_debug_code()) {
1834 Check(
eq, kUnexpectedAllocationTop);
1840 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1848 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1849 cmp(result, Operand(
ip));
1852 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1860 if ((flags & SIZE_IN_WORDS) != 0) {
1863 add(scratch2, result, Operand(object_size),
SetCC);
1866 cmp(scratch2, Operand(
ip));
1870 if (emit_debug_code()) {
1872 Check(
eq, kUnalignedAllocationInNewSpace);
1877 if ((flags & TAG_OBJECT) != 0) {
1883 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
1885 ExternalReference new_space_allocation_top =
1886 ExternalReference::new_space_allocation_top_address(isolate());
1892 mov(scratch, Operand(new_space_allocation_top));
1894 cmp(
object, scratch);
1895 Check(
lt, kUndoAllocationOfNonAllocatedMemory);
1898 mov(scratch, Operand(new_space_allocation_top));
1903 void MacroAssembler::AllocateTwoByteString(Register result,
1908 Label* gc_required) {
1912 mov(scratch1, Operand(length,
LSL, 1));
1913 add(scratch1, scratch1,
1926 InitializeNewString(result,
1928 Heap::kStringMapRootIndex,
1934 void MacroAssembler::AllocateAsciiString(Register result,
1939 Label* gc_required) {
1944 add(scratch1, length,
1957 InitializeNewString(result,
1959 Heap::kAsciiStringMapRootIndex,
1965 void MacroAssembler::AllocateTwoByteConsString(Register result,
1969 Label* gc_required) {
1970 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1973 InitializeNewString(result,
1975 Heap::kConsStringMapRootIndex,
1981 void MacroAssembler::AllocateAsciiConsString(Register result,
1985 Label* gc_required) {
1986 Label allocate_new_space, install_map;
1989 ExternalReference high_promotion_mode = ExternalReference::
1990 new_space_high_promotion_mode_active_address(isolate());
1991 mov(scratch1, Operand(high_promotion_mode));
1993 cmp(scratch1, Operand::Zero());
1994 b(
eq, &allocate_new_space);
1996 Allocate(ConsString::kSize,
2005 bind(&allocate_new_space);
2006 Allocate(ConsString::kSize,
2015 InitializeNewString(result,
2017 Heap::kConsAsciiStringMapRootIndex,
2023 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2027 Label* gc_required) {
2028 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2031 InitializeNewString(result,
2033 Heap::kSlicedStringMapRootIndex,
2039 void MacroAssembler::AllocateAsciiSlicedString(Register result,
2043 Label* gc_required) {
2044 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2047 InitializeNewString(result,
2049 Heap::kSlicedAsciiStringMapRootIndex,
2055 void MacroAssembler::CompareObjectType(Register
object,
2059 const Register temp = type_reg.is(
no_reg) ?
ip : type_reg;
2062 CompareInstanceType(map, temp, type);
2066 void MacroAssembler::CheckObjectTypeRange(Register
object,
2070 Label* false_label) {
2075 sub(
ip,
ip, Operand(min_type));
2076 cmp(
ip, Operand(max_type - min_type));
2081 void MacroAssembler::CompareInstanceType(Register map,
2090 cmp(type_reg, Operand(type));
2094 void MacroAssembler::CompareRoot(Register
obj,
2095 Heap::RootListIndex index) {
2097 LoadRoot(
ip, index);
2102 void MacroAssembler::CheckFastElements(Register map,
2110 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2115 void MacroAssembler::CheckFastObjectElements(Register map,
2123 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2125 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2130 void MacroAssembler::CheckFastSmiElements(Register map,
2136 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2141 void MacroAssembler::StoreNumberToDoubleElements(
2144 Register elements_reg,
2146 LowDwVfpRegister double_scratch,
2148 int elements_offset) {
2149 Label smi_value, store;
2152 JumpIfSmi(value_reg, &smi_value);
2157 isolate()->factory()->heap_number_map(),
2161 vldr(double_scratch,
FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2163 if (emit_debug_code()) {
2166 Assert(
ne, kDefaultNaNModeNotSet);
2168 VFPCanonicalizeNaN(double_scratch);
2172 SmiToDouble(double_scratch, value_reg);
2175 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2176 vstr(double_scratch,
2178 FixedDoubleArray::kHeaderSize - elements_offset));
2182 void MacroAssembler::CompareMap(Register obj,
2185 Label* early_success) {
2187 CompareMap(scratch, map, early_success);
2191 void MacroAssembler::CompareMap(Register obj_map,
2193 Label* early_success) {
2194 cmp(obj_map, Operand(map));
2198 void MacroAssembler::CheckMap(Register obj,
2204 JumpIfSmi(obj, fail);
2208 CompareMap(obj, scratch, map, &success);
2214 void MacroAssembler::CheckMap(Register obj,
2216 Heap::RootListIndex index,
2220 JumpIfSmi(obj, fail);
2223 LoadRoot(
ip, index);
2229 void MacroAssembler::DispatchMap(Register obj,
2232 Handle<Code> success,
2236 JumpIfSmi(obj, &fail);
2239 mov(
ip, Operand(map));
2241 Jump(success, RelocInfo::CODE_TARGET,
eq);
2246 void MacroAssembler::TryGetFunctionPrototype(Register
function,
2250 bool miss_on_bound_function) {
2252 JumpIfSmi(
function, miss);
2258 if (miss_on_bound_function) {
2264 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2271 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2272 b(
ne, &non_instance);
2281 LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2287 CompareObjectType(result, scratch, scratch,
MAP_TYPE);
2296 bind(&non_instance);
2304 void MacroAssembler::CallStub(CodeStub* stub,
2305 TypeFeedbackId ast_id,
2307 ASSERT(AllowThisStubCall(stub));
2308 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2312 void MacroAssembler::TailCallStub(CodeStub* stub,
Condition cond) {
2313 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2317 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2318 return ref0.address() - ref1.address();
2322 void MacroAssembler::CallApiFunctionAndReturn(
2323 Register function_address,
2324 ExternalReference thunk_ref,
2328 ExternalReference next_address =
2329 ExternalReference::handle_scope_next_address(isolate());
2330 const int kNextOffset = 0;
2331 const int kLimitOffset = AddressOffset(
2332 ExternalReference::handle_scope_limit_address(isolate()),
2334 const int kLevelOffset = AddressOffset(
2335 ExternalReference::handle_scope_level_address(isolate()),
2338 ASSERT(function_address.is(
r1) || function_address.is(
r2));
2340 Label profiler_disabled;
2341 Label end_profiler_check;
2342 bool* is_profiling_flag =
2343 isolate()->cpu_profiler()->is_profiling_address();
2345 mov(
r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2347 cmp(
r9, Operand(0));
2348 b(
eq, &profiler_disabled);
2351 mov(
r3, Operand(thunk_ref));
2352 jmp(&end_profiler_check);
2354 bind(&profiler_disabled);
2355 Move(
r3, function_address);
2356 bind(&end_profiler_check);
2359 mov(
r9, Operand(next_address));
2363 add(
r6,
r6, Operand(1));
2366 if (FLAG_log_timer_events) {
2367 FrameScope frame(
this, StackFrame::MANUAL);
2368 PushSafepointRegisters();
2369 PrepareCallCFunction(1,
r0);
2370 mov(
r0, Operand(ExternalReference::isolate_address(isolate())));
2371 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2372 PopSafepointRegisters();
2378 DirectCEntryStub stub;
2379 stub.GenerateCall(
this,
r3);
2381 if (FLAG_log_timer_events) {
2382 FrameScope frame(
this, StackFrame::MANUAL);
2383 PushSafepointRegisters();
2384 PrepareCallCFunction(1,
r0);
2385 mov(
r0, Operand(ExternalReference::isolate_address(isolate())));
2386 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2387 PopSafepointRegisters();
2390 Label promote_scheduled_exception;
2391 Label exception_handled;
2392 Label delete_allocated_handles;
2393 Label leave_exit_frame;
2394 Label return_value_loaded;
2397 ldr(
r0, return_value_operand);
2398 bind(&return_value_loaded);
2402 if (emit_debug_code()) {
2405 Check(
eq, kUnexpectedLevelAfterReturnFromApiCall);
2407 sub(
r6,
r6, Operand(1));
2411 b(
ne, &delete_allocated_handles);
2414 bind(&leave_exit_frame);
2415 LoadRoot(
r4, Heap::kTheHoleValueRootIndex);
2416 mov(
ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2419 b(
ne, &promote_scheduled_exception);
2420 bind(&exception_handled);
2422 bool restore_context = context_restore_operand !=
NULL;
2423 if (restore_context) {
2424 ldr(
cp, *context_restore_operand);
2427 mov(
r4, Operand(stack_space));
2428 LeaveExitFrame(
false,
r4, !restore_context);
2431 bind(&promote_scheduled_exception);
2434 CallExternalReference(
2435 ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
2438 jmp(&exception_handled);
2441 bind(&delete_allocated_handles);
2444 PrepareCallCFunction(1,
r5);
2445 mov(
r0, Operand(ExternalReference::isolate_address(isolate())));
2447 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2449 jmp(&leave_exit_frame);
2453 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2454 return has_frame_ || !stub->SometimesSetsUpAFrame();
2458 void MacroAssembler::IllegalOperation(
int num_arguments) {
2459 if (num_arguments > 0) {
2460 add(
sp,
sp, Operand(num_arguments * kPointerSize));
2462 LoadRoot(
r0, Heap::kUndefinedValueRootIndex);
2466 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2472 (1 << String::kArrayIndexValueBits));
2475 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2476 SmiTag(index, hash);
2480 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2481 if (CpuFeatures::IsSupported(
VFP3)) {
2482 vmov(value.low(), smi);
2483 vcvt_f64_s32(value, 1);
2486 vmov(value.low(),
ip);
2487 vcvt_f64_s32(value, value.low());
2492 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2493 LowDwVfpRegister double_scratch) {
2494 ASSERT(!double_input.is(double_scratch));
2495 vcvt_s32_f64(double_scratch.low(), double_input);
2496 vcvt_f64_s32(double_scratch, double_scratch.low());
2497 VFPCompareAndSetFlags(double_input, double_scratch);
2501 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2502 DwVfpRegister double_input,
2503 LowDwVfpRegister double_scratch) {
2504 ASSERT(!double_input.is(double_scratch));
2505 vcvt_s32_f64(double_scratch.low(), double_input);
2506 vmov(result, double_scratch.low());
2507 vcvt_f64_s32(double_scratch, double_scratch.low());
2508 VFPCompareAndSetFlags(double_input, double_scratch);
2512 void MacroAssembler::TryInt32Floor(Register result,
2513 DwVfpRegister double_input,
2514 Register input_high,
2515 LowDwVfpRegister double_scratch,
2518 ASSERT(!result.is(input_high));
2519 ASSERT(!double_input.is(double_scratch));
2522 VmovHigh(input_high, double_input);
2525 Sbfx(result, input_high,
2526 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2527 cmp(result, Operand(-1));
2531 TryDoubleToInt32Exact(result, double_input, double_scratch);
2534 cmp(input_high, Operand::Zero());
2543 cmn(result, Operand(1));
2551 sub(result, result, Operand(1),
SetCC);
2558 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2559 DwVfpRegister double_input,
2562 vcvt_s32_f64(double_scratch.low(), double_input);
2563 vmov(result, double_scratch.low());
2566 sub(
ip, result, Operand(1));
2567 cmp(
ip, Operand(0x7ffffffe));
2572 void MacroAssembler::TruncateDoubleToI(Register result,
2573 DwVfpRegister double_input) {
2576 TryInlineTruncateDoubleToI(result, double_input, &done);
2583 DoubleToIStub stub(
sp, result, 0,
true,
true);
2593 void MacroAssembler::TruncateHeapNumberToI(Register result,
2597 ASSERT(!result.is(
object));
2599 vldr(double_scratch,
2601 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2605 DoubleToIStub stub(
object,
2617 void MacroAssembler::TruncateNumberToI(Register
object,
2619 Register heap_number_map,
2621 Label* not_number) {
2623 ASSERT(!result.is(
object));
2625 UntagAndJumpIfSmi(result,
object, &done);
2626 JumpIfNotHeapNumber(
object, heap_number_map, scratch1, not_number);
2627 TruncateHeapNumberToI(result,
object);
2633 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2635 int num_least_bits) {
2636 if (CpuFeatures::IsSupported(
ARMv7) && !predictable_code_size()) {
2640 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2645 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2647 int num_least_bits) {
2648 and_(dst, src, Operand((1 << num_least_bits) - 1));
2652 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
2660 if (f->nargs >= 0 && f->nargs != num_arguments) {
2661 IllegalOperation(num_arguments);
2669 mov(
r0, Operand(num_arguments));
2670 mov(
r1, Operand(ExternalReference(f, isolate())));
2671 CEntryStub stub(1, save_doubles);
2676 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
2677 int num_arguments) {
2678 mov(
r0, Operand(num_arguments));
2679 mov(
r1, Operand(ext));
2686 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
2693 mov(
r0, Operand(num_arguments));
2694 JumpToExternalReference(ext);
2698 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2701 TailCallExternalReference(ExternalReference(fid, isolate()),
2707 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin) {
2708 #if defined(__thumb__)
2710 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2712 mov(
r1, Operand(builtin));
2714 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2718 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
2720 const CallWrapper& call_wrapper) {
2724 GetBuiltinEntry(
r2,
id);
2726 call_wrapper.BeforeCall(CallSize(
r2));
2728 call_wrapper.AfterCall();
2736 void MacroAssembler::GetBuiltinFunction(Register target,
2737 Builtins::JavaScript
id) {
2740 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2744 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
2748 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript
id) {
2750 GetBuiltinFunction(
r1,
id);
2756 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
2757 Register scratch1, Register scratch2) {
2758 if (FLAG_native_code_counters && counter->Enabled()) {
2759 mov(scratch1, Operand(value));
2760 mov(scratch2, Operand(ExternalReference(counter)));
2766 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
2767 Register scratch1, Register scratch2) {
2769 if (FLAG_native_code_counters && counter->Enabled()) {
2770 mov(scratch2, Operand(ExternalReference(counter)));
2772 add(scratch1, scratch1, Operand(value));
2778 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
2779 Register scratch1, Register scratch2) {
2781 if (FLAG_native_code_counters && counter->Enabled()) {
2782 mov(scratch2, Operand(ExternalReference(counter)));
2784 sub(scratch1, scratch1, Operand(value));
2791 if (emit_debug_code())
2792 Check(cond, reason);
2796 void MacroAssembler::AssertFastElements(Register elements) {
2797 if (emit_debug_code()) {
2802 LoadRoot(
ip, Heap::kFixedArrayMapRootIndex);
2805 LoadRoot(
ip, Heap::kFixedDoubleArrayMapRootIndex);
2808 LoadRoot(
ip, Heap::kFixedCOWArrayMapRootIndex);
2811 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2833 RecordComment(
"Abort message: ");
2837 if (FLAG_trap_on_abort) {
2843 mov(
r0, Operand(Smi::FromInt(reason)));
2851 CallRuntime(Runtime::kAbort, 1);
2853 CallRuntime(Runtime::kAbort, 1);
2856 if (is_const_pool_blocked()) {
2860 static const int kExpectedAbortInstructions = 7;
2861 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2862 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2863 while (abort_instructions++ < kExpectedAbortInstructions) {
2870 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
2871 if (context_chain_length > 0) {
2873 ldr(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2874 for (
int i = 1; i < context_chain_length; i++) {
2875 ldr(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2886 void MacroAssembler::LoadTransitionedArrayMapConditional(
2889 Register map_in_out,
2891 Label* no_map_match) {
2894 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2895 ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2900 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2901 size_t offset = expected_kind * kPointerSize +
2902 FixedArrayBase::kHeaderSize;
2904 cmp(map_in_out,
ip);
2905 b(
ne, no_map_match);
2908 offset = transitioned_kind * kPointerSize +
2909 FixedArrayBase::kHeaderSize;
2914 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
2917 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2920 GlobalObject::kNativeContextOffset));
2922 ldr(
function,
MemOperand(
function, Context::SlotOffset(index)));
2926 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
2930 ldr(map,
FieldMemOperand(
function, JSFunction::kPrototypeOrInitialMapOffset));
2931 if (emit_debug_code()) {
2933 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail,
DO_SMI_CHECK);
2936 Abort(kGlobalFunctionsMustHaveInitialMap);
2942 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2945 Label* not_power_of_two_or_zero) {
2946 sub(scratch, reg, Operand(1),
SetCC);
2947 b(
mi, not_power_of_two_or_zero);
2949 b(
ne, not_power_of_two_or_zero);
2953 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2956 Label* zero_and_neg,
2957 Label* not_power_of_two) {
2958 sub(scratch, reg, Operand(1),
SetCC);
2959 b(
mi, zero_and_neg);
2961 b(
ne, not_power_of_two);
2965 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
2967 Label* on_not_both_smi) {
2971 b(
ne, on_not_both_smi);
2975 void MacroAssembler::UntagAndJumpIfSmi(
2976 Register dst, Register src, Label* smi_case) {
2978 SmiUntag(dst, src,
SetCC);
2983 void MacroAssembler::UntagAndJumpIfNotSmi(
2984 Register dst, Register src, Label* non_smi_case) {
2986 SmiUntag(dst, src,
SetCC);
2987 b(
cs, non_smi_case);
2991 void MacroAssembler::JumpIfEitherSmi(Register reg1,
2993 Label* on_either_smi) {
2997 b(
eq, on_either_smi);
3001 void MacroAssembler::AssertNotSmi(Register
object) {
3002 if (emit_debug_code()) {
3005 Check(
ne, kOperandIsASmi);
3010 void MacroAssembler::AssertSmi(Register
object) {
3011 if (emit_debug_code()) {
3014 Check(
eq, kOperandIsNotSmi);
3019 void MacroAssembler::AssertString(Register
object) {
3020 if (emit_debug_code()) {
3023 Check(
ne, kOperandIsASmiAndNotAString);
3028 Check(
lo, kOperandIsNotAString);
3033 void MacroAssembler::AssertName(Register
object) {
3034 if (emit_debug_code()) {
3037 Check(
ne, kOperandIsASmiAndNotAName);
3042 Check(
le, kOperandIsNotAName);
3047 void MacroAssembler::AssertUndefinedOrAllocationSite(Register
object,
3049 if (emit_debug_code()) {
3050 Label done_checking;
3051 AssertNotSmi(
object);
3052 CompareRoot(
object, Heap::kUndefinedValueRootIndex);
3053 b(
eq, &done_checking);
3055 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
3056 Assert(
eq, kExpectedUndefinedOrCell);
3057 bind(&done_checking);
3062 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3063 if (emit_debug_code()) {
3064 CompareRoot(reg, index);
3065 Check(
eq, kHeapNumberMapRegisterClobbered);
3070 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
3071 Register heap_number_map,
3073 Label* on_not_heap_number) {
3075 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3076 cmp(scratch, heap_number_map);
3077 b(
ne, on_not_heap_number);
3081 void MacroAssembler::LookupNumberStringCache(Register
object,
3088 Register number_string_cache = result;
3089 Register mask = scratch3;
3092 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3096 ldr(mask,
FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3099 sub(mask, mask, Operand(1));
3106 Label load_result_from_cache;
3107 JumpIfSmi(
object, &is_smi);
3110 Heap::kHeapNumberMapRootIndex,
3118 ldm(
ia, scratch1, scratch1.bit() | scratch2.bit());
3119 eor(scratch1, scratch1, Operand(scratch2));
3120 and_(scratch1, scratch1, Operand(mask));
3125 number_string_cache,
3128 Register probe = mask;
3130 JumpIfSmi(probe, not_found);
3132 vldr(
d0, scratch2, HeapNumber::kValueOffset);
3134 vldr(
d1, probe, HeapNumber::kValueOffset);
3135 VFPCompareAndSetFlags(
d0,
d1);
3137 b(&load_result_from_cache);
3140 Register scratch = scratch1;
3141 and_(scratch, mask, Operand(
object,
ASR, 1));
3145 number_string_cache,
3154 bind(&load_result_from_cache);
3155 ldr(result,
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3156 IncrementCounter(isolate()->counters()->number_to_string_native(),
3163 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3176 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3183 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3189 and_(scratch1, first, Operand(second));
3190 JumpIfSmi(scratch1, failure);
3191 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3199 void MacroAssembler::JumpIfNotUniqueName(Register reg,
3200 Label* not_unique_name) {
3206 b(
ne, not_unique_name);
3214 void MacroAssembler::AllocateHeapNumber(Register result,
3217 Register heap_number_map,
3222 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3226 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3228 str(heap_number_map,
FieldMemOperand(result, HeapObject::kMapOffset));
3230 str(heap_number_map,
MemOperand(result, HeapObject::kMapOffset));
3235 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3236 DwVfpRegister value,
3239 Register heap_number_map,
3240 Label* gc_required) {
3241 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3243 vstr(value, scratch1, HeapNumber::kValueOffset);
3248 void MacroAssembler::CopyFields(Register dst,
3250 LowDwVfpRegister double_scratch,
3252 int double_count = field_count / (DwVfpRegister::kSizeInBytes /
kPointerSize);
3253 for (
int i = 0; i < double_count; i++) {
3254 vldr(double_scratch,
FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3255 vstr(double_scratch,
FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3259 STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3261 int remain = field_count % (DwVfpRegister::kSizeInBytes /
kPointerSize);
3263 vldr(double_scratch.low(),
3265 vstr(double_scratch.low(),
3275 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3278 cmp(length, Operand(kPointerSize));
3281 bind(&align_loop_1);
3282 tst(src, Operand(kPointerSize - 1));
3286 sub(length, length, Operand(1),
SetCC);
3290 if (emit_debug_code()) {
3291 tst(src, Operand(kPointerSize - 1));
3292 Assert(
eq, kExpectingAlignmentForCopyBytes);
3294 cmp(length, Operand(kPointerSize));
3301 mov(scratch, Operand(scratch,
LSR, 8));
3303 mov(scratch, Operand(scratch,
LSR, 8));
3305 mov(scratch, Operand(scratch,
LSR, 8));
3308 sub(length, length, Operand(kPointerSize));
3313 cmp(length, Operand::Zero());
3318 sub(length, length, Operand(1),
SetCC);
3319 b(
ne, &byte_loop_1);
3324 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3325 Register end_offset,
3332 cmp(start_offset, end_offset);
3337 void MacroAssembler::CheckFor32DRegs(Register scratch) {
3338 mov(scratch, Operand(ExternalReference::cpu_features()));
3344 void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3345 CheckFor32DRegs(scratch);
3352 void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3353 CheckFor32DRegs(scratch);
3360 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3366 const int kFlatAsciiStringMask =
3368 const int kFlatAsciiStringTag =
3370 and_(scratch1, first, Operand(kFlatAsciiStringMask));
3371 and_(scratch2, second, Operand(kFlatAsciiStringMask));
3372 cmp(scratch1, Operand(kFlatAsciiStringTag));
3374 cmp(scratch2, Operand(kFlatAsciiStringTag),
eq);
3379 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3382 const int kFlatAsciiStringMask =
3384 const int kFlatAsciiStringTag =
3386 and_(scratch, type, Operand(kFlatAsciiStringMask));
3387 cmp(scratch, Operand(kFlatAsciiStringTag));
3391 static const int kRegisterPassedArguments = 4;
3394 int MacroAssembler::CalculateStackPassedWords(
int num_reg_arguments,
3395 int num_double_arguments) {
3396 int stack_passed_words = 0;
3397 if (use_eabi_hardfloat()) {
3400 if (num_double_arguments > DoubleRegister::NumRegisters()) {
3401 stack_passed_words +=
3402 2 * (num_double_arguments - DoubleRegister::NumRegisters());
3407 num_reg_arguments += 2 * num_double_arguments;
3410 if (num_reg_arguments > kRegisterPassedArguments) {
3411 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3413 return stack_passed_words;
3417 void MacroAssembler::EmitSeqStringSetCharCheck(Register
string,
3420 uint32_t encoding_mask) {
3423 Check(
ne, kNonObject);
3429 cmp(
ip, Operand(encoding_mask));
3430 Check(
eq, kUnexpectedStringType);
3435 Label index_tag_ok, index_tag_bad;
3436 TrySmiTag(index, index, &index_tag_bad);
3438 bind(&index_tag_bad);
3439 Abort(kIndexIsTooLarge);
3440 bind(&index_tag_ok);
3444 Check(
lt, kIndexIsTooLarge);
3446 cmp(index, Operand(Smi::FromInt(0)));
3447 Check(
ge, kIndexIsNegative);
3449 SmiUntag(index, index);
3453 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
3454 int num_double_arguments,
3456 int frame_alignment = ActivationFrameAlignment();
3457 int stack_passed_arguments = CalculateStackPassedWords(
3458 num_reg_arguments, num_double_arguments);
3459 if (frame_alignment > kPointerSize) {
3463 sub(
sp,
sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3465 and_(
sp,
sp, Operand(-frame_alignment));
3466 str(scratch,
MemOperand(
sp, stack_passed_arguments * kPointerSize));
3468 sub(
sp,
sp, Operand(stack_passed_arguments * kPointerSize));
3473 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
3475 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3479 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
3481 if (!use_eabi_hardfloat()) {
3488 void MacroAssembler::MovToFloatResult(DwVfpRegister src) {
3489 MovToFloatParameter(src);
3493 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
3494 DwVfpRegister src2) {
3497 if (!use_eabi_hardfloat()) {
3504 void MacroAssembler::CallCFunction(ExternalReference
function,
3505 int num_reg_arguments,
3506 int num_double_arguments) {
3507 mov(
ip, Operand(
function));
3508 CallCFunctionHelper(
ip, num_reg_arguments, num_double_arguments);
3512 void MacroAssembler::CallCFunction(Register
function,
3513 int num_reg_arguments,
3514 int num_double_arguments) {
3515 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
3519 void MacroAssembler::CallCFunction(ExternalReference
function,
3520 int num_arguments) {
3521 CallCFunction(
function, num_arguments, 0);
3525 void MacroAssembler::CallCFunction(Register
function,
3526 int num_arguments) {
3527 CallCFunction(
function, num_arguments, 0);
3531 void MacroAssembler::CallCFunctionHelper(Register
function,
3532 int num_reg_arguments,
3533 int num_double_arguments) {
3538 #if V8_HOST_ARCH_ARM
3539 if (emit_debug_code()) {
3540 int frame_alignment = OS::ActivationFrameAlignment();
3541 int frame_alignment_mask = frame_alignment - 1;
3542 if (frame_alignment > kPointerSize) {
3544 Label alignment_as_expected;
3545 tst(
sp, Operand(frame_alignment_mask));
3546 b(
eq, &alignment_as_expected);
3549 stop(
"Unexpected alignment");
3550 bind(&alignment_as_expected);
3559 int stack_passed_arguments = CalculateStackPassedWords(
3560 num_reg_arguments, num_double_arguments);
3561 if (ActivationFrameAlignment() > kPointerSize) {
3564 add(
sp,
sp, Operand(stack_passed_arguments *
sizeof(kPointerSize)));
3569 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3571 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3573 if (emit_debug_code()) {
3575 if (FLAG_enable_ool_constant_pool) {
3578 Check(
eq, kTheInstructionToPatchShouldBeALoadFromPp);
3582 Check(
eq, kTheInstructionToPatchShouldBeALoadFromPc);
3588 and_(result, result, Operand(kLdrOffsetMask));
3589 if (FLAG_enable_ool_constant_pool) {
3590 add(result,
pp, Operand(result));
3592 add(result, ldr_location, Operand(result));
3593 add(result, result, Operand(Instruction::kPCReadOffset));
3598 void MacroAssembler::CheckPageFlag(
3603 Label* condition_met) {
3605 ldr(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
3606 tst(scratch, Operand(mask));
3607 b(cc, condition_met);
3611 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3613 Label* if_deprecated) {
3614 if (map->CanBeDeprecated()) {
3615 mov(scratch, Operand(map));
3617 tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3618 b(
ne, if_deprecated);
3623 void MacroAssembler::JumpIfBlack(Register
object,
3627 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
3628 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
3632 void MacroAssembler::HasColor(Register
object,
3633 Register bitmap_scratch,
3634 Register mask_scratch,
3640 GetMarkBits(
object, bitmap_scratch, mask_scratch);
3642 Label other_color, word_boundary;
3643 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3644 tst(
ip, Operand(mask_scratch));
3645 b(first_bit == 1 ?
eq :
ne, &other_color);
3647 add(mask_scratch, mask_scratch, Operand(mask_scratch),
SetCC);
3648 b(
eq, &word_boundary);
3649 tst(
ip, Operand(mask_scratch));
3650 b(second_bit == 1 ?
ne :
eq, has_color);
3653 bind(&word_boundary);
3654 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3655 tst(
ip, Operand(1));
3656 b(second_bit == 1 ?
ne :
eq, has_color);
3664 void MacroAssembler::JumpIfDataObject(Register value,
3666 Label* not_data_object) {
3667 Label is_data_object;
3669 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3670 b(
eq, &is_data_object);
3677 b(
ne, not_data_object);
3678 bind(&is_data_object);
3682 void MacroAssembler::GetMarkBits(Register addr_reg,
3683 Register bitmap_reg,
3684 Register mask_reg) {
3686 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3691 mov(
ip, Operand(1));
3692 mov(mask_reg, Operand(
ip,
LSL, mask_reg));
3696 void MacroAssembler::EnsureNotWhite(
3698 Register bitmap_scratch,
3699 Register mask_scratch,
3700 Register load_scratch,
3701 Label* value_is_white_and_not_data) {
3703 GetMarkBits(value, bitmap_scratch, mask_scratch);
3706 ASSERT(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
3707 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
3708 ASSERT(strcmp(Marking::kGreyBitPattern,
"11") == 0);
3709 ASSERT(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
3715 ldr(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3716 tst(mask_scratch, load_scratch);
3719 if (emit_debug_code()) {
3723 tst(load_scratch, Operand(mask_scratch,
LSL, 1));
3725 stop(
"Impossible marking bit pattern");
3731 Register map = load_scratch;
3732 Register length = load_scratch;
3733 Label is_data_object;
3737 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3738 mov(length, Operand(HeapNumber::kSize),
LeaveCC,
eq);
3739 b(
eq, &is_data_object);
3746 Register instance_type = load_scratch;
3749 b(
ne, value_is_white_and_not_data);
3758 mov(length, Operand(ExternalString::kSize),
LeaveCC,
ne);
3759 b(
ne, &is_data_object);
3773 bind(&is_data_object);
3776 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3777 orr(
ip,
ip, Operand(mask_scratch));
3778 str(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3780 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3781 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3782 add(
ip,
ip, Operand(length));
3783 str(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3789 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3790 Usat(output_reg, 8, Operand(input_reg));
3794 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3795 DwVfpRegister input_reg,
3796 LowDwVfpRegister double_scratch) {
3801 VFPCompareAndSetFlags(input_reg, 0.0);
3805 mov(result_reg, Operand::Zero());
3810 Vmov(double_scratch, 255.0, result_reg);
3811 VFPCompareAndSetFlags(input_reg, double_scratch);
3813 mov(result_reg, Operand(255));
3824 vmov(result_reg, double_scratch.low());
3837 RecordComment(
"Throw message: ");
3842 mov(
r0, Operand(Smi::FromInt(reason)));
3849 CallRuntime(Runtime::kHiddenThrowMessage, 1);
3851 CallRuntime(Runtime::kHiddenThrowMessage, 1);
3854 if (is_const_pool_blocked()) {
3858 static const int kExpectedThrowMessageInstructions = 10;
3859 int throw_instructions = InstructionsGeneratedSince(&throw_start);
3860 ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
3861 while (throw_instructions++ < kExpectedThrowMessageInstructions) {
3877 void MacroAssembler::LoadInstanceDescriptors(Register map,
3878 Register descriptors) {
3883 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3885 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3889 void MacroAssembler::EnumLength(Register dst, Register map) {
3892 and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3896 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3897 Register empty_fixed_array_value =
r6;
3898 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3907 cmp(
r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
3908 b(
eq, call_runtime);
3917 cmp(
r3, Operand(Smi::FromInt(0)));
3918 b(
ne, call_runtime);
3926 cmp(
r2, empty_fixed_array_value);
3927 b(
eq, &no_elements);
3930 CompareRoot(
r2, Heap::kEmptySlowElementDictionaryRootIndex);
3931 b(
ne, call_runtime);
3935 cmp(
r2, null_value);
3940 void MacroAssembler::TestJSArrayForAllocationMemento(
3941 Register receiver_reg,
3942 Register scratch_reg,
3943 Label* no_memento_found) {
3944 ExternalReference new_space_start =
3945 ExternalReference::new_space_start(isolate());
3946 ExternalReference new_space_allocation_top =
3947 ExternalReference::new_space_allocation_top_address(isolate());
3948 add(scratch_reg, receiver_reg,
3949 Operand(JSArray::kSize + AllocationMemento::kSize -
kHeapObjectTag));
3950 cmp(scratch_reg, Operand(new_space_start));
3951 b(
lt, no_memento_found);
3952 mov(
ip, Operand(new_space_allocation_top));
3954 cmp(scratch_reg,
ip);
3955 b(
gt, no_memento_found);
3956 ldr(scratch_reg,
MemOperand(scratch_reg, -AllocationMemento::kSize));
3958 Operand(isolate()->factory()->allocation_memento_map()));
3969 if (reg1.is_valid()) regs |= reg1.bit();
3970 if (reg2.is_valid()) regs |= reg2.bit();
3971 if (reg3.is_valid()) regs |= reg3.bit();
3972 if (reg4.is_valid()) regs |= reg4.bit();
3973 if (reg5.is_valid()) regs |= reg5.bit();
3974 if (reg6.is_valid()) regs |= reg6.bit();
3976 for (
int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3977 Register candidate = Register::FromAllocationIndex(i);
3978 if (regs & candidate.bit())
continue;
3986 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
3991 ASSERT(!scratch1.is(scratch0));
3992 Factory* factory = isolate()->factory();
3993 Register current = scratch0;
3997 mov(current,
object);
4003 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4007 cmp(current, Operand(factory->null_value()));
4019 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
4020 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
4023 if (reg1.is_valid()) regs |= reg1.bit();
4024 if (reg2.is_valid()) regs |= reg2.bit();
4025 if (reg3.is_valid()) regs |= reg3.bit();
4026 if (reg4.is_valid()) regs |= reg4.bit();
4027 if (reg5.is_valid()) regs |= reg5.bit();
4028 if (reg6.is_valid()) regs |= reg6.bit();
4029 int n_of_non_aliasing_regs =
NumRegs(regs);
4031 return n_of_valid_regs != n_of_non_aliasing_regs;
4036 CodePatcher::CodePatcher(
byte* address,
4038 FlushICache flush_cache)
4039 : address_(address),
4040 size_(instructions * Assembler::kInstrSize),
4041 masm_(
NULL, address, size_ + Assembler::kGap),
4042 flush_cache_(flush_cache) {
4046 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4050 CodePatcher::~CodePatcher() {
4052 if (flush_cache_ == FLUSH) {
4053 CPU::FlushICache(address_, size_);
4057 ASSERT(masm_.pc_ == address_ + size_);
4058 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4062 void CodePatcher::Emit(
Instr instr) {
4063 masm()->emit(instr);
4067 void CodePatcher::Emit(
Address addr) {
4068 masm()->emit(reinterpret_cast<Instr>(addr));
4072 void CodePatcher::EmitCondition(
Condition cond) {
4073 Instr instr = Assembler::instr_at(masm_.pc_);
4079 void MacroAssembler::TruncatingDiv(Register result,
4082 ASSERT(!dividend.is(result));
4085 MultiplierAndShift ms(divisor);
4086 mov(
ip, Operand(ms.multiplier()));
4087 smull(
ip, result, dividend,
ip);
4088 if (divisor > 0 && ms.multiplier() < 0) {
4089 add(result, result, Operand(dividend));
4091 if (divisor < 0 && ms.multiplier() > 0) {
4092 sub(result, result, Operand(dividend));
4094 if (ms.shift() > 0) mov(result, Operand(result,
ASR, ms.shift()));
4095 add(result, result, Operand(dividend,
LSR, 31));
4101 #endif // V8_TARGET_ARCH_ARM
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
const intptr_t kSmiTagMask
const intptr_t kDoubleAlignmentMask
const Instr kLdrPCPattern
const LowDwVfpRegister d0
int NumRegs(RegList reglist)
TypeImpl< ZoneTypeConfig > Type
const uint32_t kIsNotInternalizedMask
const LowDwVfpRegister d15
#define ASSERT(condition)
const int kPointerSizeLog2
const uint32_t kStringRepresentationMask
PerThreadAssertScopeDebugOnly< DEFERRED_HANDLE_DEREFERENCE_ASSERT, true > AllowDeferredHandleDereference
int WhichPowerOf2(uint32_t x)
const intptr_t kObjectAlignmentMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
const intptr_t kHeapObjectTagMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
const VmovIndex VmovIndexHi
kInstanceClassNameOffset flag
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for ARMv7(ARM only)") DEFINE_bool(enable_32dregs
const uint32_t kNotStringTag
const uint32_t kVFPDefaultNaNModeControlBit
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
const LowDwVfpRegister d14
const LowDwVfpRegister d13
#define kNumSafepointSavedRegisters
const uint32_t kIsIndirectStringMask
#define kScratchDoubleReg
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool IsAligned(T value, U alignment)
int TenToThe(int exponent)
MacroAssembler(Isolate *isolate, void *buffer, int size)
const uint32_t kStringTag
const uint32_t kInternalizedTag
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void Load(const v8::FunctionCallbackInfo< v8::Value > &args)
const uint32_t kIsNotStringMask
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2=no_reg, Register reg3=no_reg, Register reg4=no_reg, Register reg5=no_reg, Register reg6=no_reg)
const char * GetBailoutReason(BailoutReason reason)
Handle< T > handle(T *t, Isolate *isolate)
const Instr kLdrPpPattern
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const VmovIndex VmovIndexLo
const intptr_t kPointerAlignment
void CopyBytes(uint8_t *target, uint8_t *source)
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
const uint32_t kOneByteStringTag
const uint32_t kIsIndirectStringTag
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
const uint32_t kVFPRoundingModeMask
const LowDwVfpRegister d1
#define STATIC_ASSERT(test)
#define kSafepointSavedRegisters
const intptr_t kDoubleAlignment
const uint32_t kStringEncodingMask
bool AreAliased(const CPURegister ®1, const CPURegister ®2, const CPURegister ®3=NoReg, const CPURegister ®4=NoReg, const CPURegister ®5=NoReg, const CPURegister ®6=NoReg, const CPURegister ®7=NoReg, const CPURegister ®8=NoReg)