32 #if defined(V8_TARGET_ARCH_ARM)
43 : Assembler(arg_isolate, buffer, size),
44 generating_stub_(
false),
45 allow_stub_calls_(
true),
47 if (isolate() !=
NULL) {
48 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
56 #if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
57 #error "flag -mthumb-interwork missing"
64 #if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
65 # error "For thumb inter-working we require an architecture which supports blx"
70 #if defined(USE_THUMB_INTERWORK)
75 void MacroAssembler::Jump(Register target,
Condition cond) {
84 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
87 mov(
ip, Operand(target, rmode));
90 mov(
pc, Operand(target, rmode),
LeaveCC, cond);
95 void MacroAssembler::Jump(
Address target, RelocInfo::Mode rmode,
97 ASSERT(!RelocInfo::IsCodeTarget(rmode));
98 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
102 void MacroAssembler::Jump(Handle<Code>
code, RelocInfo::Mode rmode,
104 ASSERT(RelocInfo::IsCodeTarget(rmode));
106 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
110 int MacroAssembler::CallSize(Register target,
Condition cond) {
114 return 2 * kInstrSize;
119 void MacroAssembler::Call(Register target,
Condition cond) {
121 BlockConstPoolScope block_const_pool(
this);
131 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
135 int MacroAssembler::CallSize(
137 int size = 2 * kInstrSize;
139 intptr_t immediate =
reinterpret_cast<intptr_t
>(target);
140 if (!Operand(immediate, rmode).is_single_instruction(
this, mov_instr)) {
147 int MacroAssembler::CallSizeNotPredictableCodeSize(
149 int size = 2 * kInstrSize;
151 intptr_t immediate =
reinterpret_cast<intptr_t
>(target);
152 if (!Operand(immediate, rmode).is_single_instruction(
NULL, mov_instr)) {
159 void MacroAssembler::Call(
Address target,
160 RelocInfo::Mode rmode,
164 BlockConstPoolScope block_const_pool(
this);
168 bool old_predictable_code_size = predictable_code_size();
170 set_predictable_code_size(
true);
189 positions_recorder()->WriteRecordedPositions();
191 mov(
ip, Operand(reinterpret_cast<int32_t>(target), rmode));
198 mov(
pc, Operand(reinterpret_cast<int32_t>(target), rmode),
LeaveCC, cond);
200 ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
202 set_predictable_code_size(old_predictable_code_size);
207 int MacroAssembler::CallSize(Handle<Code> code,
208 RelocInfo::Mode rmode,
209 TypeFeedbackId ast_id,
211 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
215 void MacroAssembler::Call(Handle<Code> code,
216 RelocInfo::Mode rmode,
217 TypeFeedbackId ast_id,
222 ASSERT(RelocInfo::IsCodeTarget(rmode));
223 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
224 SetRecordedAstId(ast_id);
225 rmode = RelocInfo::CODE_TARGET_WITH_ID;
228 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
232 void MacroAssembler::Ret(
Condition cond) {
241 void MacroAssembler::Drop(
int count,
Condition cond) {
248 void MacroAssembler::Ret(
int drop,
Condition cond) {
254 void MacroAssembler::Swap(Register reg1,
259 eor(reg1, reg1, Operand(reg2),
LeaveCC, cond);
260 eor(reg2, reg2, Operand(reg1),
LeaveCC, cond);
261 eor(reg1, reg1, Operand(reg2),
LeaveCC, cond);
263 mov(scratch, reg1,
LeaveCC, cond);
264 mov(reg1, reg2,
LeaveCC, cond);
265 mov(reg2, scratch,
LeaveCC, cond);
270 void MacroAssembler::Call(Label* target) {
275 void MacroAssembler::Push(Handle<Object> handle) {
276 mov(
ip, Operand(handle));
281 void MacroAssembler::Move(Register dst, Handle<Object> value) {
282 mov(dst, Operand(value));
286 void MacroAssembler::Move(Register dst, Register src,
Condition cond) {
295 CpuFeatures::Scope scope(
VFP2);
302 void MacroAssembler::And(Register dst, Register src1,
const Operand& src2,
304 if (!src2.is_reg() &&
305 !src2.must_output_reloc_info(
this) &&
306 src2.immediate() == 0) {
308 }
else if (!src2.is_single_instruction(
this) &&
309 !src2.must_output_reloc_info(
this) &&
310 CpuFeatures::IsSupported(
ARMv7) &&
313 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
315 and_(dst, src1, src2,
LeaveCC, cond);
320 void MacroAssembler::Ubfx(Register dst, Register src1,
int lsb,
int width,
323 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
324 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
325 and_(dst, src1, Operand(mask),
LeaveCC, cond);
327 mov(dst, Operand(dst,
LSR, lsb),
LeaveCC, cond);
330 ubfx(dst, src1, lsb, width, cond);
335 void MacroAssembler::Sbfx(Register dst, Register src1,
int lsb,
int width,
338 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
339 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
340 and_(dst, src1, Operand(mask),
LeaveCC, cond);
341 int shift_up = 32 - lsb - width;
342 int shift_down = lsb + shift_up;
344 mov(dst, Operand(dst,
LSL, shift_up),
LeaveCC, cond);
346 if (shift_down != 0) {
347 mov(dst, Operand(dst,
ASR, shift_down),
LeaveCC, cond);
350 sbfx(dst, src1, lsb, width, cond);
355 void MacroAssembler::Bfi(Register dst,
361 ASSERT(0 <= lsb && lsb < 32);
362 ASSERT(0 <= width && width < 32);
365 if (width == 0)
return;
366 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
367 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
368 bic(dst, dst, Operand(mask));
369 and_(scratch, src, Operand((1 << width) - 1));
370 mov(scratch, Operand(scratch,
LSL, lsb));
371 orr(dst, dst, scratch);
373 bfi(dst, src, lsb, width, cond);
378 void MacroAssembler::Bfc(Register dst, Register src,
int lsb,
int width,
381 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
382 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
383 bic(dst, src, Operand(mask));
385 Move(dst, src, cond);
386 bfc(dst, lsb, width, cond);
391 void MacroAssembler::Usat(Register dst,
int satpos,
const Operand& src,
393 if (!CpuFeatures::IsSupported(
ARMv7) || predictable_code_size()) {
395 ASSERT((satpos >= 0) && (satpos <= 31));
399 ASSERT((src.shift_op() ==
ASR) || (src.shift_op() ==
LSL));
403 int satval = (1 << satpos) - 1;
408 if (!(src.is_reg() && dst.is(src.rm()))) {
411 tst(dst, Operand(~satval));
417 usat(dst, satpos, src, cond);
422 void MacroAssembler::LoadRoot(Register destination,
423 Heap::RootListIndex index,
429 void MacroAssembler::StoreRoot(Register source,
430 Heap::RootListIndex index,
436 void MacroAssembler::LoadHeapObject(Register result,
437 Handle<HeapObject>
object) {
438 if (isolate()->heap()->InNewSpace(*
object)) {
439 Handle<JSGlobalPropertyCell> cell =
440 isolate()->factory()->NewJSGlobalPropertyCell(
object);
441 mov(result, Operand(cell));
442 ldr(result,
FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
444 mov(result, Operand(
object));
449 void MacroAssembler::InNewSpace(Register
object,
454 and_(scratch,
object, Operand(ExternalReference::new_space_mask(isolate())));
455 cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
460 void MacroAssembler::RecordWriteField(
475 JumpIfSmi(value, &done);
483 if (emit_debug_code()) {
487 stop(
"Unaligned cell in write barrier");
496 remembered_set_action,
503 if (emit_debug_code()) {
504 mov(value, Operand(BitCast<int32_t>(
kZapValue + 4)));
505 mov(dst, Operand(BitCast<int32_t>(
kZapValue + 8)));
513 void MacroAssembler::RecordWrite(Register
object,
525 if (emit_debug_code()) {
528 Check(
eq,
"Wrong address or value passed to RecordWrite");
541 MemoryChunk::kPointersToHereAreInterestingMask,
544 CheckPageFlag(
object,
546 MemoryChunk::kPointersFromHereAreInterestingMask,
554 RecordWriteStub stub(
object, value, address, remembered_set_action, fp_mode);
564 if (emit_debug_code()) {
565 mov(address, Operand(BitCast<int32_t>(
kZapValue + 12)));
566 mov(value, Operand(BitCast<int32_t>(
kZapValue + 16)));
571 void MacroAssembler::RememberedSetHelper(Register
object,
575 RememberedSetFinalAction and_then) {
577 if (emit_debug_code()) {
579 JumpIfNotInNewSpace(
object, scratch, &ok);
580 stop(
"Remembered set pointer is in new space");
584 ExternalReference store_buffer =
585 ExternalReference::store_buffer_top(isolate());
586 mov(
ip, Operand(store_buffer));
594 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
595 if (and_then == kFallThroughAtEnd) {
598 ASSERT(and_then == kReturnAtEnd);
602 StoreBufferOverflowStub store_buffer_overflow =
603 StoreBufferOverflowStub(fp_mode);
604 CallStub(&store_buffer_overflow);
607 if (and_then == kReturnAtEnd) {
614 void MacroAssembler::PushSafepointRegisters() {
626 void MacroAssembler::PopSafepointRegisters() {
633 void MacroAssembler::PushSafepointRegistersAndDoubles() {
634 PushSafepointRegisters();
635 sub(
sp,
sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
637 for (
int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
638 vstr(DwVfpRegister::FromAllocationIndex(i),
sp, i *
kDoubleSize);
643 void MacroAssembler::PopSafepointRegistersAndDoubles() {
644 for (
int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
645 vldr(DwVfpRegister::FromAllocationIndex(i),
sp, i *
kDoubleSize);
647 add(
sp,
sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
649 PopSafepointRegisters();
652 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
654 str(src, SafepointRegistersAndDoublesSlot(dst));
658 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
659 str(src, SafepointRegisterSlot(dst));
663 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
664 ldr(dst, SafepointRegisterSlot(src));
668 int MacroAssembler::SafepointRegisterStackIndex(
int reg_code) {
676 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
681 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
683 int doubles_size = DwVfpRegister::kNumAllocatableRegisters *
kDoubleSize;
684 int register_offset = SafepointRegisterStackIndex(reg.code()) *
kPointerSize;
689 void MacroAssembler::Ldrd(Register dst1, Register dst2,
701 if (CpuFeatures::IsSupported(
ARMv7) && !predictable_code_size()) {
702 CpuFeatures::Scope scope(
ARMv7);
703 ldrd(dst1, dst2, src, cond);
707 src2.set_offset(src2.offset() + 4);
708 if (dst1.is(src.rn())) {
709 ldr(dst2, src2, cond);
710 ldr(dst1, src, cond);
712 ldr(dst1, src, cond);
713 ldr(dst2, src2, cond);
717 if (dst1.is(src.rn())) {
719 ldr(dst1, src, cond);
722 src2.set_offset(src2.offset() - 4);
724 ldr(dst2, src2, cond);
731 void MacroAssembler::Strd(Register src1, Register src2,
743 if (CpuFeatures::IsSupported(
ARMv7) && !predictable_code_size()) {
744 CpuFeatures::Scope scope(
ARMv7);
745 strd(src1, src2, dst, cond);
749 dst2.set_offset(dst2.offset() + 4);
750 str(src1, dst, cond);
751 str(src2, dst2, cond);
754 dst2.set_offset(dst2.offset() - 4);
756 str(src2, dst2, cond);
762 void MacroAssembler::ClearFPSCRBits(
const uint32_t bits_to_clear,
763 const Register scratch,
766 bic(scratch, scratch, Operand(bits_to_clear),
LeaveCC, cond);
771 void MacroAssembler::VFPCompareAndSetFlags(
const DwVfpRegister src1,
772 const DwVfpRegister src2,
775 VFPCompareAndLoadFlags(src1, src2,
pc, cond);
778 void MacroAssembler::VFPCompareAndSetFlags(
const DwVfpRegister src1,
782 VFPCompareAndLoadFlags(src1, src2,
pc, cond);
786 void MacroAssembler::VFPCompareAndLoadFlags(
const DwVfpRegister src1,
787 const DwVfpRegister src2,
788 const Register fpscr_flags,
791 vcmp(src1, src2, cond);
792 vmrs(fpscr_flags, cond);
795 void MacroAssembler::VFPCompareAndLoadFlags(
const DwVfpRegister src1,
797 const Register fpscr_flags,
800 vcmp(src1, src2, cond);
801 vmrs(fpscr_flags, cond);
804 void MacroAssembler::Vmov(
const DwVfpRegister dst,
806 const Register scratch,
809 static const DoubleRepresentation minus_zero(-0.0);
810 static const DoubleRepresentation
zero(0.0);
811 DoubleRepresentation value(imm);
813 if (value.bits ==
zero.bits) {
815 }
else if (value.bits == minus_zero.bits) {
818 vmov(dst, imm, scratch, cond);
823 void MacroAssembler::EnterFrame(StackFrame::Type type) {
826 mov(
ip, Operand(Smi::FromInt(type)));
828 mov(
ip, Operand(CodeObject()));
834 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
846 void MacroAssembler::EnterExitFrame(
bool save_doubles,
int stack_space) {
852 mov(
fp, Operand(
sp));
855 if (emit_debug_code()) {
859 mov(
ip, Operand(CodeObject()));
863 mov(
ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
865 mov(
ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
870 DwVfpRegister first =
d0;
873 vstm(
db_w,
sp, first, last);
881 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
883 if (frame_alignment > 0) {
885 and_(
sp,
sp, Operand(-frame_alignment));
895 void MacroAssembler::InitializeNewString(Register
string,
897 Heap::RootListIndex map_index,
901 LoadRoot(scratch2, map_index);
903 mov(scratch1, Operand(String::kEmptyHashField));
909 int MacroAssembler::ActivationFrameAlignment() {
910 #if defined(V8_HOST_ARCH_ARM)
915 return OS::ActivationFrameAlignment();
916 #else // defined(V8_HOST_ARCH_ARM)
921 return FLAG_sim_stack_alignment;
922 #endif // defined(V8_HOST_ARCH_ARM)
926 void MacroAssembler::LeaveExitFrame(
bool save_doubles,
927 Register argument_count) {
933 DwVfpRegister first =
d0;
936 vldm(
ia,
r3, first, last);
941 mov(
ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
945 mov(
ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
952 mov(
sp, Operand(
fp));
954 if (argument_count.is_valid()) {
959 void MacroAssembler::GetCFunctionDoubleResult(
const DoubleRegister dst) {
961 if (use_eabi_hardfloat()) {
969 void MacroAssembler::SetCallKind(Register dst,
CallKind call_kind) {
976 mov(dst, Operand(Smi::FromInt(1)));
978 mov(dst, Operand(Smi::FromInt(0)));
983 void MacroAssembler::InvokePrologue(
const ParameterCount& expected,
984 const ParameterCount& actual,
985 Handle<Code> code_constant,
988 bool* definitely_mismatches,
990 const CallWrapper& call_wrapper,
992 bool definitely_matches =
false;
993 *definitely_mismatches =
false;
994 Label regular_invoke;
1006 ASSERT(actual.is_immediate() || actual.reg().is(
r0));
1007 ASSERT(expected.is_immediate() || expected.reg().is(
r2));
1008 ASSERT((!code_constant.is_null() && code_reg.is(
no_reg)) || code_reg.is(
r3));
1010 if (expected.is_immediate()) {
1011 ASSERT(actual.is_immediate());
1012 if (expected.immediate() == actual.immediate()) {
1013 definitely_matches =
true;
1015 mov(
r0, Operand(actual.immediate()));
1016 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1017 if (expected.immediate() == sentinel) {
1022 definitely_matches =
true;
1024 *definitely_mismatches =
true;
1025 mov(
r2, Operand(expected.immediate()));
1029 if (actual.is_immediate()) {
1030 cmp(expected.reg(), Operand(actual.immediate()));
1031 b(
eq, ®ular_invoke);
1032 mov(
r0, Operand(actual.immediate()));
1034 cmp(expected.reg(), Operand(actual.reg()));
1035 b(
eq, ®ular_invoke);
1039 if (!definitely_matches) {
1040 if (!code_constant.is_null()) {
1041 mov(
r3, Operand(code_constant));
1045 Handle<Code> adaptor =
1046 isolate()->builtins()->ArgumentsAdaptorTrampoline();
1048 call_wrapper.BeforeCall(CallSize(adaptor));
1049 SetCallKind(
r5, call_kind);
1051 call_wrapper.AfterCall();
1052 if (!*definitely_mismatches) {
1056 SetCallKind(
r5, call_kind);
1057 Jump(adaptor, RelocInfo::CODE_TARGET);
1059 bind(®ular_invoke);
1064 void MacroAssembler::InvokeCode(Register code,
1065 const ParameterCount& expected,
1066 const ParameterCount& actual,
1068 const CallWrapper& call_wrapper,
1074 bool definitely_mismatches =
false;
1075 InvokePrologue(expected, actual, Handle<Code>::null(), code,
1076 &done, &definitely_mismatches, flag,
1077 call_wrapper, call_kind);
1078 if (!definitely_mismatches) {
1080 call_wrapper.BeforeCall(CallSize(code));
1081 SetCallKind(
r5, call_kind);
1083 call_wrapper.AfterCall();
1086 SetCallKind(
r5, call_kind);
1097 void MacroAssembler::InvokeCode(Handle<Code> code,
1098 const ParameterCount& expected,
1099 const ParameterCount& actual,
1100 RelocInfo::Mode rmode,
1107 bool definitely_mismatches =
false;
1108 InvokePrologue(expected, actual, code,
no_reg,
1109 &done, &definitely_mismatches, flag,
1110 NullCallWrapper(), call_kind);
1111 if (!definitely_mismatches) {
1113 SetCallKind(
r5, call_kind);
1116 SetCallKind(
r5, call_kind);
1127 void MacroAssembler::InvokeFunction(Register fun,
1128 const ParameterCount& actual,
1130 const CallWrapper& call_wrapper,
1138 Register expected_reg =
r2;
1139 Register code_reg =
r3;
1145 SharedFunctionInfo::kFormalParameterCountOffset));
1150 ParameterCount expected(expected_reg);
1151 InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1155 void MacroAssembler::InvokeFunction(Handle<JSFunction>
function,
1156 const ParameterCount& actual,
1158 const CallWrapper& call_wrapper,
1164 LoadHeapObject(
r1,
function);
1167 ParameterCount expected(function->shared()->formal_parameter_count());
1172 InvokeCode(
r3, expected, actual, flag, call_wrapper, call_kind);
1176 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1181 IsInstanceJSObjectType(map, scratch, fail);
1185 void MacroAssembler::IsInstanceJSObjectType(Register map,
1196 void MacroAssembler::IsObjectJSStringType(Register
object,
1208 #ifdef ENABLE_DEBUGGER_SUPPORT
1209 void MacroAssembler::DebugBreak() {
1211 mov(
r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1213 ASSERT(AllowThisStubCall(&ces));
1219 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1220 int handler_index) {
1233 StackHandler::IndexField::encode(handler_index) |
1234 StackHandler::KindField::encode(kind);
1235 mov(
r5, Operand(CodeObject()));
1236 mov(
r6, Operand(state));
1239 if (kind == StackHandler::JS_ENTRY) {
1240 mov(
r7, Operand(Smi::FromInt(0)));
1248 mov(
r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1256 void MacroAssembler::PopTryHandler() {
1259 mov(
ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1265 void MacroAssembler::JumpToHandlerEntry() {
1271 mov(
r2, Operand(
r2,
LSR, StackHandler::kKindWidth));
1278 void MacroAssembler::Throw(Register value) {
1288 if (!value.is(
r0)) {
1292 mov(
r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1308 JumpToHandlerEntry();
1312 void MacroAssembler::ThrowUncatchable(Register value) {
1322 if (!value.is(
r0)) {
1326 mov(
r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1330 Label fetch_next, check_kind;
1338 tst(
r2, Operand(StackHandler::KindField::kMask));
1348 JumpToHandlerEntry();
1352 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1355 Label same_contexts;
1357 ASSERT(!holder_reg.is(scratch));
1362 ldr(scratch,
MemOperand(
fp, StandardFrameConstants::kContextOffset));
1366 Check(
ne,
"we should not have an empty lexical context");
1371 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX *
kPointerSize;
1373 ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1376 if (emit_debug_code()) {
1383 LoadRoot(
ip, Heap::kNativeContextMapRootIndex);
1384 cmp(holder_reg,
ip);
1385 Check(
eq,
"JSGlobalObject::native_context should be a native context.");
1391 cmp(scratch, Operand(
ip));
1392 b(
eq, &same_contexts);
1395 if (emit_debug_code()) {
1400 mov(holder_reg,
ip);
1401 LoadRoot(
ip, Heap::kNullValueRootIndex);
1402 cmp(holder_reg,
ip);
1403 Check(
ne,
"JSGlobalProxy::context() should not be null.");
1406 LoadRoot(
ip, Heap::kNativeContextMapRootIndex);
1407 cmp(holder_reg,
ip);
1408 Check(
eq,
"JSGlobalObject::native_context should be a native context.");
1418 int token_offset = Context::kHeaderSize +
1423 cmp(scratch, Operand(
ip));
1426 bind(&same_contexts);
1430 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1432 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1436 eor(t0, t0, Operand(scratch));
1442 mvn(scratch, Operand(t0));
1443 add(t0, scratch, Operand(t0,
LSL, 15));
1445 eor(t0, t0, Operand(t0,
LSR, 12));
1447 add(t0, t0, Operand(t0,
LSL, 2));
1449 eor(t0, t0, Operand(t0,
LSR, 4));
1451 mov(scratch, Operand(t0,
LSL, 11));
1452 add(t0, t0, Operand(t0,
LSL, 3));
1453 add(t0, t0, scratch);
1455 eor(t0, t0, Operand(t0,
LSR, 16));
1459 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1488 GetNumberHash(t0, t1);
1491 ldr(t1,
FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1493 sub(t1, t1, Operand(1));
1496 static const int kProbes = 4;
1497 for (
int i = 0; i < kProbes; i++) {
1502 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1504 and_(t2, t2, Operand(t1));
1507 ASSERT(SeededNumberDictionary::kEntrySize == 3);
1508 add(t2, t2, Operand(t2,
LSL, 1));
1513 cmp(key, Operand(
ip));
1514 if (i != kProbes - 1) {
1524 const int kDetailsOffset =
1525 SeededNumberDictionary::kElementsStartOffset + 2 *
kPointerSize;
1527 tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1531 const int kValueOffset =
1532 SeededNumberDictionary::kElementsStartOffset +
kPointerSize;
1537 void MacroAssembler::AllocateInNewSpace(
int object_size,
1543 if (!FLAG_inline_new) {
1544 if (emit_debug_code()) {
1546 mov(result, Operand(0x7091));
1547 mov(scratch1, Operand(0x7191));
1548 mov(scratch2, Operand(0x7291));
1554 ASSERT(!result.is(scratch1));
1555 ASSERT(!result.is(scratch2));
1556 ASSERT(!scratch1.is(scratch2));
1570 ExternalReference new_space_allocation_top =
1571 ExternalReference::new_space_allocation_top_address(isolate());
1572 ExternalReference new_space_allocation_limit =
1573 ExternalReference::new_space_allocation_limit_address(isolate());
1575 reinterpret_cast<intptr_t
>(new_space_allocation_top.address());
1577 reinterpret_cast<intptr_t
>(new_space_allocation_limit.address());
1578 ASSERT((limit - top) == kPointerSize);
1582 Register topaddr = scratch1;
1583 Register obj_size_reg = scratch2;
1584 mov(topaddr, Operand(new_space_allocation_top));
1585 Operand obj_size_operand = Operand(object_size);
1586 if (!obj_size_operand.is_single_instruction(
this)) {
1588 mov(obj_size_reg, obj_size_operand);
1595 ldm(
ia, topaddr, result.bit() |
ip.
bit());
1597 if (emit_debug_code()) {
1603 Check(
eq,
"Unexpected allocation top");
1611 if (obj_size_operand.is_single_instruction(
this)) {
1613 add(scratch2, result, obj_size_operand,
SetCC);
1616 add(scratch2, result, obj_size_reg,
SetCC);
1619 cmp(scratch2, Operand(
ip));
1630 void MacroAssembler::AllocateInNewSpace(Register object_size,
1636 if (!FLAG_inline_new) {
1637 if (emit_debug_code()) {
1639 mov(result, Operand(0x7091));
1640 mov(scratch1, Operand(0x7191));
1641 mov(scratch2, Operand(0x7291));
1649 ASSERT(!result.is(scratch1));
1650 ASSERT(!result.is(scratch2));
1651 ASSERT(!scratch1.is(scratch2));
1661 ExternalReference new_space_allocation_top =
1662 ExternalReference::new_space_allocation_top_address(isolate());
1663 ExternalReference new_space_allocation_limit =
1664 ExternalReference::new_space_allocation_limit_address(isolate());
1666 reinterpret_cast<intptr_t
>(new_space_allocation_top.address());
1668 reinterpret_cast<intptr_t
>(new_space_allocation_limit.address());
1669 ASSERT((limit - top) == kPointerSize);
1673 Register topaddr = scratch1;
1674 mov(topaddr, Operand(new_space_allocation_top));
1678 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1680 ldm(
ia, topaddr, result.bit() |
ip.
bit());
1682 if (emit_debug_code()) {
1688 Check(
eq,
"Unexpected allocation top");
1697 if ((flags & SIZE_IN_WORDS) != 0) {
1700 add(scratch2, result, Operand(object_size),
SetCC);
1703 cmp(scratch2, Operand(
ip));
1707 if (emit_debug_code()) {
1709 Check(
eq,
"Unaligned allocation in new space");
1714 if ((flags & TAG_OBJECT) != 0) {
1720 void MacroAssembler::UndoAllocationInNewSpace(Register
object,
1722 ExternalReference new_space_allocation_top =
1723 ExternalReference::new_space_allocation_top_address(isolate());
1729 mov(scratch, Operand(new_space_allocation_top));
1731 cmp(
object, scratch);
1732 Check(
lt,
"Undo allocation of non allocated memory");
1735 mov(scratch, Operand(new_space_allocation_top));
1740 void MacroAssembler::AllocateTwoByteString(Register result,
1745 Label* gc_required) {
1749 mov(scratch1, Operand(length,
LSL, 1));
1750 add(scratch1, scratch1,
1755 AllocateInNewSpace(scratch1,
1763 InitializeNewString(result,
1765 Heap::kStringMapRootIndex,
1771 void MacroAssembler::AllocateAsciiString(Register result,
1776 Label* gc_required) {
1781 add(scratch1, length,
1786 AllocateInNewSpace(scratch1,
1794 InitializeNewString(result,
1796 Heap::kAsciiStringMapRootIndex,
1802 void MacroAssembler::AllocateTwoByteConsString(Register result,
1806 Label* gc_required) {
1807 AllocateInNewSpace(ConsString::kSize,
1814 InitializeNewString(result,
1816 Heap::kConsStringMapRootIndex,
1822 void MacroAssembler::AllocateAsciiConsString(Register result,
1826 Label* gc_required) {
1827 AllocateInNewSpace(ConsString::kSize,
1834 InitializeNewString(result,
1836 Heap::kConsAsciiStringMapRootIndex,
1842 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1846 Label* gc_required) {
1847 AllocateInNewSpace(SlicedString::kSize,
1854 InitializeNewString(result,
1856 Heap::kSlicedStringMapRootIndex,
1862 void MacroAssembler::AllocateAsciiSlicedString(Register result,
1866 Label* gc_required) {
1867 AllocateInNewSpace(SlicedString::kSize,
1874 InitializeNewString(result,
1876 Heap::kSlicedAsciiStringMapRootIndex,
1882 void MacroAssembler::CompareObjectType(Register
object,
1887 CompareInstanceType(map, type_reg, type);
1891 void MacroAssembler::CompareInstanceType(Register map,
1895 cmp(type_reg, Operand(type));
1899 void MacroAssembler::CompareRoot(Register obj,
1900 Heap::RootListIndex index) {
1902 LoadRoot(
ip, index);
1907 void MacroAssembler::CheckFastElements(Register map,
1915 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1920 void MacroAssembler::CheckFastObjectElements(Register map,
1928 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1930 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1935 void MacroAssembler::CheckFastSmiElements(Register map,
1941 cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1946 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
1948 Register receiver_reg,
1949 Register elements_reg,
1955 Label smi_value, maybe_nan, have_double_value, is_nan, done;
1956 Register mantissa_reg = scratch2;
1957 Register exponent_reg = scratch3;
1960 JumpIfSmi(value_reg, &smi_value);
1965 isolate()->factory()->heap_number_map(),
1972 ldr(exponent_reg,
FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
1973 cmp(exponent_reg, scratch1);
1976 ldr(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1978 bind(&have_double_value);
1979 add(scratch1, elements_reg,
1981 str(mantissa_reg,
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
1982 uint32_t offset = FixedDoubleArray::kHeaderSize +
sizeof(
kHoleNanLower32);
1990 ldr(mantissa_reg,
FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
1991 cmp(mantissa_reg, Operand(0));
1992 b(
eq, &have_double_value);
1995 uint64_t nan_int64 = BitCast<uint64_t>(
1996 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
1997 mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
1998 mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
1999 jmp(&have_double_value);
2002 add(scratch1, elements_reg,
2004 add(scratch1, scratch1,
2008 FloatingPointHelper::Destination destination;
2009 if (CpuFeatures::IsSupported(
VFP2)) {
2010 destination = FloatingPointHelper::kVFPRegisters;
2012 destination = FloatingPointHelper::kCoreRegisters;
2015 Register untagged_value = elements_reg;
2016 SmiUntag(untagged_value, value_reg);
2017 FloatingPointHelper::ConvertIntToDouble(
this,
2025 if (destination == FloatingPointHelper::kVFPRegisters) {
2026 CpuFeatures::Scope scope(
VFP2);
2027 vstr(
d0, scratch1, 0);
2030 str(exponent_reg,
MemOperand(scratch1, Register::kSizeInBytes));
2036 void MacroAssembler::CompareMap(Register obj,
2039 Label* early_success,
2042 CompareMap(scratch, map, early_success, mode);
2046 void MacroAssembler::CompareMap(Register obj_map,
2048 Label* early_success,
2050 cmp(obj_map, Operand(map));
2055 Map* current_map = *map;
2058 current_map = current_map->LookupElementsTransitionMap(kind);
2059 if (!current_map)
break;
2060 b(
eq, early_success);
2061 cmp(obj_map, Operand(Handle<Map>(current_map)));
2068 void MacroAssembler::CheckMap(Register obj,
2075 JumpIfSmi(obj, fail);
2079 CompareMap(obj, scratch, map, &success, mode);
2085 void MacroAssembler::CheckMap(Register obj,
2087 Heap::RootListIndex index,
2091 JumpIfSmi(obj, fail);
2094 LoadRoot(
ip, index);
2100 void MacroAssembler::DispatchMap(Register obj,
2103 Handle<Code> success,
2107 JumpIfSmi(obj, &fail);
2110 mov(
ip, Operand(map));
2112 Jump(success, RelocInfo::CODE_TARGET,
eq);
2117 void MacroAssembler::TryGetFunctionPrototype(Register
function,
2121 bool miss_on_bound_function) {
2123 JumpIfSmi(
function, miss);
2129 if (miss_on_bound_function) {
2135 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2142 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2143 b(
ne, &non_instance);
2152 LoadRoot(
ip, Heap::kTheHoleValueRootIndex);
2158 CompareObjectType(result, scratch, scratch,
MAP_TYPE);
2167 bind(&non_instance);
2175 void MacroAssembler::CallStub(CodeStub* stub,
Condition cond) {
2176 ASSERT(AllowThisStubCall(stub));
2181 void MacroAssembler::TailCallStub(CodeStub* stub,
Condition cond) {
2182 ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
2183 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2187 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2188 return ref0.address() - ref1.address();
2192 void MacroAssembler::CallApiFunctionAndReturn(ExternalReference
function,
2194 ExternalReference next_address =
2195 ExternalReference::handle_scope_next_address();
2196 const int kNextOffset = 0;
2197 const int kLimitOffset = AddressOffset(
2198 ExternalReference::handle_scope_limit_address(),
2200 const int kLevelOffset = AddressOffset(
2201 ExternalReference::handle_scope_level_address(),
2205 mov(
r7, Operand(next_address));
2209 add(
r6,
r6, Operand(1));
2215 DirectCEntryStub stub;
2216 stub.GenerateCall(
this,
function);
2218 Label promote_scheduled_exception;
2219 Label delete_allocated_handles;
2220 Label leave_exit_frame;
2224 cmp(
r0, Operand(0));
2225 LoadRoot(
r0, Heap::kUndefinedValueRootIndex,
eq);
2231 if (emit_debug_code()) {
2234 Check(
eq,
"Unexpected level after return from api call");
2236 sub(
r6,
r6, Operand(1));
2240 b(
ne, &delete_allocated_handles);
2243 bind(&leave_exit_frame);
2244 LoadRoot(
r4, Heap::kTheHoleValueRootIndex);
2245 mov(
ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2248 b(
ne, &promote_scheduled_exception);
2251 mov(
r4, Operand(stack_space));
2252 LeaveExitFrame(
false,
r4);
2255 bind(&promote_scheduled_exception);
2256 TailCallExternalReference(
2257 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2262 bind(&delete_allocated_handles);
2265 PrepareCallCFunction(1,
r5);
2266 mov(
r0, Operand(ExternalReference::isolate_address()));
2268 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2270 jmp(&leave_exit_frame);
2274 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2275 if (!has_frame_ && stub->SometimesSetsUpAFrame())
return false;
2276 return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
2280 void MacroAssembler::IllegalOperation(
int num_arguments) {
2281 if (num_arguments > 0) {
2282 add(
sp,
sp, Operand(num_arguments * kPointerSize));
2284 LoadRoot(
r0, Heap::kUndefinedValueRootIndex);
2288 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2294 (1 << String::kArrayIndexValueBits));
2298 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2303 void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
2304 Register outHighReg,
2305 Register outLowReg) {
2309 vcvt_f64_s32(
d7,
s15);
2310 vmov(outLowReg, outHighReg,
d7);
2314 void MacroAssembler::ObjectToDoubleVFPRegister(Register
object,
2315 DwVfpRegister result,
2318 Register heap_number_map,
2319 SwVfpRegister scratch3,
2325 JumpIfNotSmi(
object, ¬_smi);
2328 vmov(scratch3, scratch1);
2329 vcvt_f64_s32(result, scratch3);
2336 cmp(scratch1, heap_number_map);
2343 HeapNumber::kExponentShift,
2344 HeapNumber::kExponentBits);
2346 cmp(scratch1, Operand(-1));
2349 vldr(result, scratch2, HeapNumber::kValueOffset);
2354 void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
2355 DwVfpRegister value,
2357 SwVfpRegister scratch2) {
2359 vmov(scratch2, scratch1);
2360 vcvt_f64_s32(value, scratch2);
2367 void MacroAssembler::ConvertToInt32(Register source,
2371 DwVfpRegister double_scratch,
2373 if (CpuFeatures::IsSupported(
VFP2)) {
2374 CpuFeatures::Scope scope(
VFP2);
2376 vldr(double_scratch, scratch, HeapNumber::kValueOffset);
2377 vcvt_s32_f64(double_scratch.low(), double_scratch);
2378 vmov(dest, double_scratch.low());
2383 sub(scratch, dest, Operand(1));
2384 cmp(scratch, Operand(LONG_MAX - 1));
2393 Label right_exponent, done;
2399 HeapNumber::kExponentShift,
2400 HeapNumber::kExponentBits);
2408 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
2413 int fudge_factor = 0x400;
2414 sub(scratch2, scratch2, Operand(fudge_factor));
2415 cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
2418 b(
eq, &right_exponent);
2426 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
2427 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor),
SetCC);
2433 rsb(dest, scratch2, Operand(30));
2435 bind(&right_exponent);
2437 and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
2439 orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
2444 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2445 mov(scratch2, Operand(scratch2,
LSL, shift_distance));
2453 orr(scratch, scratch2, Operand(scratch,
LSR, 32 - shift_distance));
2455 mov(dest, Operand(scratch,
LSR, dest));
2465 DwVfpRegister double_input,
2467 DwVfpRegister double_scratch,
2469 ASSERT(!result.is(scratch));
2470 ASSERT(!double_input.is(double_scratch));
2473 CpuFeatures::Scope scope(
VFP2);
2474 Register prev_fpscr = result;
2478 vcvt_s32_f64(double_scratch.low(), double_input);
2479 vmov(result, double_scratch.low());
2480 vcvt_f64_s32(double_scratch, double_scratch.low());
2481 VFPCompareAndSetFlags(double_input, double_scratch);
2485 int32_t check_inexact_conversion =
2496 check_inexact_conversion |
2501 orr(scratch, scratch, Operand(rounding_mode));
2506 vcvt_s32_f64(double_scratch.low(),
2516 vmov(result, double_scratch.low());
2524 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
2525 Register input_high,
2528 Label done, normal_exponent, restore_sign;
2533 HeapNumber::kExponentShift,
2534 HeapNumber::kExponentBits);
2537 cmp(result, Operand(HeapNumber::kExponentMask));
2544 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
2549 b(
le, &normal_exponent);
2550 mov(result, Operand(0));
2553 bind(&normal_exponent);
2554 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2556 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits),
SetCC);
2559 Register
sign = result;
2566 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2570 mov(input_high, Operand(input_high,
LSL, scratch));
2573 Label pos_shift, shift_done;
2574 rsb(scratch, scratch, Operand(32),
SetCC);
2578 rsb(scratch, scratch, Operand(0));
2579 mov(input_low, Operand(input_low,
LSL, scratch));
2583 mov(input_low, Operand(input_low,
LSR, scratch));
2586 orr(input_high, input_high, Operand(input_low));
2588 cmp(sign, Operand(0));
2591 rsb(result, input_high, Operand(0),
LeaveCC,
ne);
2597 void MacroAssembler::EmitECMATruncate(Register result,
2598 DwVfpRegister double_input,
2599 SwVfpRegister single_scratch,
2601 Register input_high,
2602 Register input_low) {
2603 CpuFeatures::Scope scope(
VFP2);
2604 ASSERT(!input_high.is(result));
2605 ASSERT(!input_low.is(result));
2606 ASSERT(!input_low.is(input_high));
2607 ASSERT(!scratch.is(result) &&
2608 !scratch.is(input_high) &&
2609 !scratch.is(input_low));
2610 ASSERT(!single_scratch.is(double_input.low()) &&
2611 !single_scratch.is(double_input.high()));
2618 vcvt_s32_f64(single_scratch, double_input);
2619 vmov(result, single_scratch);
2630 vmov(input_low, input_high, double_input);
2631 EmitOutOfInt32RangeTruncate(result,
2639 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2641 int num_least_bits) {
2642 if (CpuFeatures::IsSupported(
ARMv7) && !predictable_code_size()) {
2646 and_(dst, dst, Operand((1 << num_least_bits) - 1));
2651 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2653 int num_least_bits) {
2654 and_(dst, src, Operand((1 << num_least_bits) - 1));
2658 void MacroAssembler::CallRuntime(
const Runtime::Function* f,
2659 int num_arguments) {
2665 if (f->nargs >= 0 && f->nargs != num_arguments) {
2666 IllegalOperation(num_arguments);
2674 mov(
r0, Operand(num_arguments));
2675 mov(
r1, Operand(ExternalReference(f, isolate())));
2681 void MacroAssembler::CallRuntime(Runtime::FunctionId fid,
int num_arguments) {
2682 CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2686 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId
id) {
2687 const Runtime::Function*
function = Runtime::FunctionForId(
id);
2688 mov(
r0, Operand(function->nargs));
2689 mov(
r1, Operand(ExternalReference(
function, isolate())));
2695 void MacroAssembler::CallExternalReference(
const ExternalReference& ext,
2696 int num_arguments) {
2697 mov(
r0, Operand(num_arguments));
2698 mov(
r1, Operand(ext));
2705 void MacroAssembler::TailCallExternalReference(
const ExternalReference& ext,
2712 mov(
r0, Operand(num_arguments));
2713 JumpToExternalReference(ext);
2717 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2720 TailCallExternalReference(ExternalReference(fid, isolate()),
2726 void MacroAssembler::JumpToExternalReference(
const ExternalReference& builtin) {
2727 #if defined(__thumb__)
2729 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2731 mov(
r1, Operand(builtin));
2733 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2737 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript
id,
2739 const CallWrapper& call_wrapper) {
2743 GetBuiltinEntry(
r2,
id);
2745 call_wrapper.BeforeCall(CallSize(
r2));
2748 call_wrapper.AfterCall();
2757 void MacroAssembler::GetBuiltinFunction(Register target,
2758 Builtins::JavaScript
id) {
2761 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2765 JSBuiltinsObject::OffsetOfFunctionWithId(
id)));
2769 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript
id) {
2771 GetBuiltinFunction(
r1,
id);
2777 void MacroAssembler::SetCounter(StatsCounter* counter,
int value,
2778 Register scratch1, Register scratch2) {
2779 if (FLAG_native_code_counters && counter->Enabled()) {
2780 mov(scratch1, Operand(value));
2781 mov(scratch2, Operand(ExternalReference(counter)));
2787 void MacroAssembler::IncrementCounter(StatsCounter* counter,
int value,
2788 Register scratch1, Register scratch2) {
2790 if (FLAG_native_code_counters && counter->Enabled()) {
2791 mov(scratch2, Operand(ExternalReference(counter)));
2793 add(scratch1, scratch1, Operand(value));
2799 void MacroAssembler::DecrementCounter(StatsCounter* counter,
int value,
2800 Register scratch1, Register scratch2) {
2802 if (FLAG_native_code_counters && counter->Enabled()) {
2803 mov(scratch2, Operand(ExternalReference(counter)));
2805 sub(scratch1, scratch1, Operand(value));
2811 void MacroAssembler::Assert(
Condition cond,
const char* msg) {
2812 if (emit_debug_code())
2817 void MacroAssembler::AssertRegisterIsRoot(Register reg,
2818 Heap::RootListIndex index) {
2819 if (emit_debug_code()) {
2820 LoadRoot(
ip, index);
2822 Check(
eq,
"Register did not match expected root");
2827 void MacroAssembler::AssertFastElements(Register elements) {
2828 if (emit_debug_code()) {
2833 LoadRoot(
ip, Heap::kFixedArrayMapRootIndex);
2836 LoadRoot(
ip, Heap::kFixedDoubleArrayMapRootIndex);
2839 LoadRoot(
ip, Heap::kFixedCOWArrayMapRootIndex);
2842 Abort(
"JSObject with fast elements map has slow elements");
2849 void MacroAssembler::Check(
Condition cond,
const char* msg) {
2858 void MacroAssembler::Abort(
const char* msg) {
2866 intptr_t
p1 =
reinterpret_cast<intptr_t
>(msg);
2868 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2871 RecordComment(
"Abort message: ");
2876 mov(
r0, Operand(p0));
2878 mov(
r0, Operand(Smi::FromInt(p1 - p0)));
2885 CallRuntime(Runtime::kAbort, 2);
2887 CallRuntime(Runtime::kAbort, 2);
2890 if (is_const_pool_blocked()) {
2894 static const int kExpectedAbortInstructions = 10;
2895 int abort_instructions = InstructionsGeneratedSince(&abort_start);
2896 ASSERT(abort_instructions <= kExpectedAbortInstructions);
2897 while (abort_instructions++ < kExpectedAbortInstructions) {
2904 void MacroAssembler::LoadContext(Register dst,
int context_chain_length) {
2905 if (context_chain_length > 0) {
2907 ldr(dst,
MemOperand(
cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2908 for (
int i = 1; i < context_chain_length; i++) {
2909 ldr(dst,
MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2920 void MacroAssembler::LoadTransitionedArrayMapConditional(
2923 Register map_in_out,
2925 Label* no_map_match) {
2928 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2929 ldr(scratch,
FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2934 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2935 size_t offset = expected_kind * kPointerSize +
2936 FixedArrayBase::kHeaderSize;
2938 cmp(map_in_out,
ip);
2939 b(
ne, no_map_match);
2942 offset = transitioned_kind * kPointerSize +
2943 FixedArrayBase::kHeaderSize;
2948 void MacroAssembler::LoadInitialArrayMap(
2949 Register function_in, Register scratch,
2950 Register map_out,
bool can_have_holes) {
2951 ASSERT(!function_in.is(map_out));
2954 JSFunction::kPrototypeOrInitialMapOffset));
2955 if (!FLAG_smi_only_arrays) {
2962 }
else if (can_have_holes) {
2973 void MacroAssembler::LoadGlobalFunction(
int index, Register
function) {
2976 MemOperand(
cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2979 GlobalObject::kNativeContextOffset));
2981 ldr(
function,
MemOperand(
function, Context::SlotOffset(index)));
2985 void MacroAssembler::LoadGlobalFunctionInitialMap(Register
function,
2989 ldr(map,
FieldMemOperand(
function, JSFunction::kPrototypeOrInitialMapOffset));
2990 if (emit_debug_code()) {
2992 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail,
DO_SMI_CHECK);
2995 Abort(
"Global functions must have initial map");
3001 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
3004 Label* not_power_of_two_or_zero) {
3005 sub(scratch, reg, Operand(1),
SetCC);
3006 b(
mi, not_power_of_two_or_zero);
3008 b(
ne, not_power_of_two_or_zero);
3012 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
3015 Label* zero_and_neg,
3016 Label* not_power_of_two) {
3017 sub(scratch, reg, Operand(1),
SetCC);
3018 b(
mi, zero_and_neg);
3020 b(
ne, not_power_of_two);
3024 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3026 Label* on_not_both_smi) {
3030 b(
ne, on_not_both_smi);
3034 void MacroAssembler::UntagAndJumpIfSmi(
3035 Register dst, Register src, Label* smi_case) {
3042 void MacroAssembler::UntagAndJumpIfNotSmi(
3043 Register dst, Register src, Label* non_smi_case) {
3046 b(
cs, non_smi_case);
3050 void MacroAssembler::JumpIfEitherSmi(Register reg1,
3052 Label* on_either_smi) {
3056 b(
eq, on_either_smi);
3060 void MacroAssembler::AssertNotSmi(Register
object) {
3061 if (emit_debug_code()) {
3064 Check(
ne,
"Operand is a smi");
3069 void MacroAssembler::AssertSmi(Register
object) {
3070 if (emit_debug_code()) {
3073 Check(
eq,
"Operand is not smi");
3078 void MacroAssembler::AssertString(Register
object) {
3079 if (emit_debug_code()) {
3082 Check(
ne,
"Operand is a smi and not a string");
3087 Check(
lo,
"Operand is not a string");
3093 void MacroAssembler::AssertRootValue(Register src,
3094 Heap::RootListIndex root_value_index,
3096 if (emit_debug_code()) {
3097 CompareRoot(src, root_value_index);
3103 void MacroAssembler::JumpIfNotHeapNumber(Register
object,
3104 Register heap_number_map,
3106 Label* on_not_heap_number) {
3108 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3109 cmp(scratch, heap_number_map);
3110 b(
ne, on_not_heap_number);
3114 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3127 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3134 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3141 and_(scratch1, first, Operand(second));
3142 JumpIfSmi(scratch1, failure);
3143 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3153 void MacroAssembler::AllocateHeapNumber(Register result,
3156 Register heap_number_map,
3161 AllocateInNewSpace(HeapNumber::kSize,
3170 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3172 str(heap_number_map,
FieldMemOperand(result, HeapObject::kMapOffset));
3174 str(heap_number_map,
MemOperand(result, HeapObject::kMapOffset));
3179 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3180 DwVfpRegister value,
3183 Register heap_number_map,
3184 Label* gc_required) {
3185 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3187 vstr(value, scratch1, HeapNumber::kValueOffset);
3192 void MacroAssembler::CopyFields(Register dst,
3197 ASSERT((temps & ((1 << 15) - 1)) != 0);
3198 ASSERT((temps & dst.bit()) == 0);
3199 ASSERT((temps & src.bit()) == 0);
3204 for (
int i = 0; i < 15; i++) {
3205 if ((temps & (1 << i)) != 0) {
3212 for (
int i = 0; i < field_count; i++) {
3219 void MacroAssembler::CopyBytes(Register src,
3223 Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3227 cmp(length, Operand(0));
3229 bind(&align_loop_1);
3230 tst(src, Operand(kPointerSize - 1));
3234 sub(length, length, Operand(1),
SetCC);
3235 b(
ne, &byte_loop_1);
3239 if (emit_debug_code()) {
3240 tst(src, Operand(kPointerSize - 1));
3241 Assert(
eq,
"Expecting alignment for CopyBytes");
3243 cmp(length, Operand(kPointerSize));
3250 mov(scratch, Operand(scratch,
LSR, 8));
3252 mov(scratch, Operand(scratch,
LSR, 8));
3254 mov(scratch, Operand(scratch,
LSR, 8));
3257 sub(length, length, Operand(kPointerSize));
3262 cmp(length, Operand(0));
3267 sub(length, length, Operand(1),
SetCC);
3268 b(
ne, &byte_loop_1);
3273 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3274 Register end_offset,
3281 cmp(start_offset, end_offset);
3286 void MacroAssembler::CountLeadingZeros(Register zeros,
3289 ASSERT(!zeros.is(source) || !source.is(scratch));
3290 ASSERT(!zeros.is(scratch));
3294 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
3299 Move(scratch, source);
3302 tst(scratch, Operand(0xffff0000));
3303 add(zeros, zeros, Operand(16),
LeaveCC,
eq);
3306 tst(scratch, Operand(0xff000000));
3307 add(zeros, zeros, Operand(8),
LeaveCC,
eq);
3310 tst(scratch, Operand(0xf0000000));
3311 add(zeros, zeros, Operand(4),
LeaveCC,
eq);
3314 tst(scratch, Operand(0xc0000000));
3315 add(zeros, zeros, Operand(2),
LeaveCC,
eq);
3318 tst(scratch, Operand(0x80000000u));
3319 add(zeros, zeros, Operand(1),
LeaveCC,
eq);
3324 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3330 int kFlatAsciiStringMask =
3333 and_(scratch1, first, Operand(kFlatAsciiStringMask));
3334 and_(scratch2, second, Operand(kFlatAsciiStringMask));
3335 cmp(scratch1, Operand(kFlatAsciiStringTag));
3337 cmp(scratch2, Operand(kFlatAsciiStringTag),
eq);
3342 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3345 int kFlatAsciiStringMask =
3348 and_(scratch, type, Operand(kFlatAsciiStringMask));
3349 cmp(scratch, Operand(kFlatAsciiStringTag));
3353 static const int kRegisterPassedArguments = 4;
3356 int MacroAssembler::CalculateStackPassedWords(
int num_reg_arguments,
3357 int num_double_arguments) {
3358 int stack_passed_words = 0;
3359 if (use_eabi_hardfloat()) {
3363 stack_passed_words +=
3369 num_reg_arguments += 2 * num_double_arguments;
3372 if (num_reg_arguments > kRegisterPassedArguments) {
3373 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3375 return stack_passed_words;
3379 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
3380 int num_double_arguments,
3382 int frame_alignment = ActivationFrameAlignment();
3383 int stack_passed_arguments = CalculateStackPassedWords(
3384 num_reg_arguments, num_double_arguments);
3385 if (frame_alignment > kPointerSize) {
3389 sub(
sp,
sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3391 and_(
sp,
sp, Operand(-frame_alignment));
3392 str(scratch,
MemOperand(
sp, stack_passed_arguments * kPointerSize));
3394 sub(
sp,
sp, Operand(stack_passed_arguments * kPointerSize));
3399 void MacroAssembler::PrepareCallCFunction(
int num_reg_arguments,
3401 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3405 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg) {
3407 if (use_eabi_hardfloat()) {
3415 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg1,
3418 if (use_eabi_hardfloat()) {
3428 vmov(
r0,
r1, dreg1);
3429 vmov(
r2,
r3, dreg2);
3434 void MacroAssembler::SetCallCDoubleArguments(
DoubleRegister dreg,
3437 if (use_eabi_hardfloat()) {
3447 void MacroAssembler::CallCFunction(ExternalReference
function,
3448 int num_reg_arguments,
3449 int num_double_arguments) {
3450 mov(
ip, Operand(
function));
3451 CallCFunctionHelper(
ip, num_reg_arguments, num_double_arguments);
3455 void MacroAssembler::CallCFunction(Register
function,
3456 int num_reg_arguments,
3457 int num_double_arguments) {
3458 CallCFunctionHelper(
function, num_reg_arguments, num_double_arguments);
3462 void MacroAssembler::CallCFunction(ExternalReference
function,
3463 int num_arguments) {
3464 CallCFunction(
function, num_arguments, 0);
3468 void MacroAssembler::CallCFunction(Register
function,
3469 int num_arguments) {
3470 CallCFunction(
function, num_arguments, 0);
3474 void MacroAssembler::CallCFunctionHelper(Register
function,
3475 int num_reg_arguments,
3476 int num_double_arguments) {
3481 #if defined(V8_HOST_ARCH_ARM)
3482 if (emit_debug_code()) {
3483 int frame_alignment = OS::ActivationFrameAlignment();
3484 int frame_alignment_mask = frame_alignment - 1;
3485 if (frame_alignment > kPointerSize) {
3487 Label alignment_as_expected;
3488 tst(
sp, Operand(frame_alignment_mask));
3489 b(
eq, &alignment_as_expected);
3492 stop(
"Unexpected alignment");
3493 bind(&alignment_as_expected);
3502 int stack_passed_arguments = CalculateStackPassedWords(
3503 num_reg_arguments, num_double_arguments);
3504 if (ActivationFrameAlignment() > kPointerSize) {
3507 add(
sp,
sp, Operand(stack_passed_arguments *
sizeof(kPointerSize)));
3512 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3514 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3517 if (emit_debug_code()) {
3521 Check(
eq,
"The instruction to patch should be a load from pc.");
3526 and_(result, result, Operand(kLdrOffsetMask));
3527 add(result, ldr_location, Operand(result));
3528 add(result, result, Operand(kPCRegOffset));
3532 void MacroAssembler::CheckPageFlag(
3537 Label* condition_met) {
3539 ldr(scratch,
MemOperand(scratch, MemoryChunk::kFlagsOffset));
3540 tst(scratch, Operand(mask));
3541 b(cc, condition_met);
3545 void MacroAssembler::JumpIfBlack(Register
object,
3549 HasColor(
object, scratch0, scratch1, on_black, 1, 0);
3550 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
3554 void MacroAssembler::HasColor(Register
object,
3555 Register bitmap_scratch,
3556 Register mask_scratch,
3562 GetMarkBits(
object, bitmap_scratch, mask_scratch);
3564 Label other_color, word_boundary;
3565 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3566 tst(
ip, Operand(mask_scratch));
3567 b(first_bit == 1 ?
eq :
ne, &other_color);
3569 add(mask_scratch, mask_scratch, Operand(mask_scratch),
SetCC);
3570 b(
eq, &word_boundary);
3571 tst(
ip, Operand(mask_scratch));
3572 b(second_bit == 1 ?
ne :
eq, has_color);
3575 bind(&word_boundary);
3576 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3577 tst(
ip, Operand(1));
3578 b(second_bit == 1 ?
ne :
eq, has_color);
3586 void MacroAssembler::JumpIfDataObject(Register value,
3588 Label* not_data_object) {
3589 Label is_data_object;
3591 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3592 b(
eq, &is_data_object);
3599 b(
ne, not_data_object);
3600 bind(&is_data_object);
3604 void MacroAssembler::GetMarkBits(Register addr_reg,
3605 Register bitmap_reg,
3606 Register mask_reg) {
3608 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3613 mov(
ip, Operand(1));
3614 mov(mask_reg, Operand(
ip,
LSL, mask_reg));
3618 void MacroAssembler::EnsureNotWhite(
3620 Register bitmap_scratch,
3621 Register mask_scratch,
3622 Register load_scratch,
3623 Label* value_is_white_and_not_data) {
3625 GetMarkBits(value, bitmap_scratch, mask_scratch);
3628 ASSERT(strcmp(Marking::kWhiteBitPattern,
"00") == 0);
3629 ASSERT(strcmp(Marking::kBlackBitPattern,
"10") == 0);
3630 ASSERT(strcmp(Marking::kGreyBitPattern,
"11") == 0);
3631 ASSERT(strcmp(Marking::kImpossibleBitPattern,
"01") == 0);
3637 ldr(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3638 tst(mask_scratch, load_scratch);
3641 if (emit_debug_code()) {
3645 tst(load_scratch, Operand(mask_scratch,
LSL, 1));
3647 stop(
"Impossible marking bit pattern");
3653 Register map = load_scratch;
3654 Register length = load_scratch;
3655 Label is_data_object;
3659 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3660 mov(length, Operand(HeapNumber::kSize),
LeaveCC,
eq);
3661 b(
eq, &is_data_object);
3668 Register instance_type = load_scratch;
3671 b(
ne, value_is_white_and_not_data);
3680 mov(length, Operand(ExternalString::kSize),
LeaveCC,
ne);
3681 b(
ne, &is_data_object);
3695 bind(&is_data_object);
3698 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3699 orr(
ip,
ip, Operand(mask_scratch));
3700 str(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3702 and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3703 ldr(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3704 add(
ip,
ip, Operand(length));
3705 str(
ip,
MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3711 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3712 Usat(output_reg, 8, Operand(input_reg));
3716 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3723 Vmov(temp_double_reg, 0.0);
3724 VFPCompareAndSetFlags(input_reg, temp_double_reg);
3728 mov(result_reg, Operand(0));
3733 Vmov(temp_double_reg, 255.0, result_reg);
3734 VFPCompareAndSetFlags(input_reg, temp_double_reg);
3736 mov(result_reg, Operand(255));
3747 vmov(result_reg, input_reg.low());
3754 void MacroAssembler::LoadInstanceDescriptors(Register map,
3755 Register descriptors) {
3760 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3762 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3766 void MacroAssembler::EnumLength(Register dst, Register map) {
3769 and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3773 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3774 Register empty_fixed_array_value =
r6;
3775 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3784 cmp(
r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
3785 b(
eq, call_runtime);
3794 cmp(
r3, Operand(Smi::FromInt(0)));
3795 b(
ne, call_runtime);
3802 cmp(
r2, empty_fixed_array_value);
3803 b(
ne, call_runtime);
3806 cmp(
r2, null_value);
3818 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3819 reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
3822 if (reg1.is_valid()) regs |= reg1.bit();
3823 if (reg2.is_valid()) regs |= reg2.bit();
3824 if (reg3.is_valid()) regs |= reg3.bit();
3825 if (reg4.is_valid()) regs |= reg4.bit();
3826 if (reg5.is_valid()) regs |= reg5.bit();
3827 if (reg6.is_valid()) regs |= reg6.bit();
3828 int n_of_non_aliasing_regs =
NumRegs(regs);
3830 return n_of_valid_regs != n_of_non_aliasing_regs;
3836 : address_(address),
3837 instructions_(instructions),
3838 size_(instructions * Assembler::kInstrSize),
3839 masm_(
NULL, address, size_ + Assembler::kGap) {
3843 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3847 CodePatcher::~CodePatcher() {
3849 CPU::FlushICache(address_, size_);
3852 ASSERT(masm_.pc_ == address_ + size_);
3853 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3857 void CodePatcher::Emit(
Instr instr) {
3858 masm()->emit(instr);
3862 void CodePatcher::Emit(
Address addr) {
3863 masm()->emit(reinterpret_cast<Instr>(addr));
3867 void CodePatcher::EmitCondition(
Condition cond) {
3868 Instr instr = Assembler::instr_at(masm_.pc_);
3876 #endif // V8_TARGET_ARCH_ARM
const RegList kSafepointSavedRegisters
const intptr_t kSmiTagMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
const int kDoubleSizeLog2
const Instr kLdrPCPattern
const uint32_t kVFPInvalidOpExceptionBit
int NumRegs(RegList reglist)
const uint32_t kVFPOverflowExceptionBit
const uint32_t kVFPUnderflowExceptionBit
const int kNumSafepointSavedRegisters
bool AreAliased(Register r1, Register r2, Register r3, Register r4)
#define ASSERT(condition)
bool CanTransitionToMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const int kPointerSizeLog2
const uint32_t kStringRepresentationMask
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
int WhichPowerOf2(uint32_t x)
const intptr_t kObjectAlignmentMask
bool IsFastElementsKind(ElementsKind kind)
const intptr_t kHeapObjectTagMask
const uint32_t kVFPFlushToZeroMask
const uint32_t kNotStringTag
DwVfpRegister DoubleRegister
bool IsFastPackedElementsKind(ElementsKind kind)
const uint32_t kIsIndirectStringMask
bool IsAligned(T value, U alignment)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage message
const uint32_t kHoleNanLower32
const uint32_t kVFPExceptionMask
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
int TenToThe(int exponent)
MacroAssembler(Isolate *isolate, void *buffer, int size)
activate correct semantics for inheriting readonliness false
const uint32_t kIsNotStringMask
MemOperand FieldMemOperand(Register object, int offset)
const int kNumSafepointRegisters
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, bool allow_only_packed)
const uint32_t kVFPInexactExceptionBit
Condition NegateCondition(Condition cond)
#define ASSERT_EQ(v1, v2)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
const uint32_t kIsIndirectStringTag
CheckForInexactConversion
const uint32_t kVFPRoundingModeMask
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for ARMv7(ARM only)") DEFINE_bool(enable_fpu
#define STATIC_ASSERT(test)
const uint32_t kAsciiStringTag
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 instructions(ARM only)") DEFINE_bool(enable_vfp2
const uint32_t kStringEncodingMask
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag