39 #if V8_TARGET_ARCH_IA32
52 bool CpuFeatures::initialized_ =
false;
54 uint64_t CpuFeatures::supported_ = 0;
55 uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
56 uint64_t CpuFeatures::cross_compile_ = 0;
59 ExternalReference ExternalReference::cpu_features() {
60 ASSERT(CpuFeatures::initialized_);
61 return ExternalReference(&CpuFeatures::supported_);
103 uint64_t probed_features = 0;
105 if (cpu.has_sse41()) {
106 probed_features |=
static_cast<uint64_t
>(1) <<
SSE4_1;
108 if (cpu.has_sse3()) {
109 probed_features |=
static_cast<uint64_t
>(1) <<
SSE3;
111 if (cpu.has_sse2()) {
112 probed_features |=
static_cast<uint64_t
>(1) <<
SSE2;
114 if (cpu.has_cmov()) {
115 probed_features |=
static_cast<uint64_t
>(1) <<
CMOV;
120 probed_features |=
static_cast<uint64_t
>(1) <<
SAHF;
123 supported_ = probed_features | platform_features;
124 found_by_runtime_probing_only_ = probed_features & ~platform_features;
131 void Displacement::init(Label*
L,
Type type) {
134 if (L->is_linked()) {
140 data_ = NextField::encode(next) | TypeField::encode(type);
148 const int RelocInfo::kApplyMask =
150 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
151 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
154 bool RelocInfo::IsCodedSpecially() {
159 return (1 << rmode_) & kApplyMask;
163 bool RelocInfo::IsInConstantPool() {
168 void RelocInfo::PatchCode(
byte* instructions,
int instruction_count) {
170 for (
int i = 0; i < instruction_count; i++) {
171 *(pc_ + i) = *(instructions + i);
175 CPU::FlushICache(pc_, instruction_count);
181 void RelocInfo::PatchCodeWithCall(
Address target,
int guard_bytes) {
183 static const int kCallCodeSize = 5;
184 int code_size = kCallCodeSize + guard_bytes;
187 CodePatcher patcher(pc_, code_size);
191 Label check_codesize;
192 patcher.masm()->bind(&check_codesize);
196 patcher.masm()->call(target, RelocInfo::NONE32);
200 patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
204 for (
int i = 0; i < guard_bytes; i++) {
205 patcher.masm()->int3();
215 if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(
ebp)) {
219 }
else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
228 set_dispr(disp, rmode);
237 RelocInfo::Mode rmode) {
240 if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(
ebp)) {
243 set_sib(scale, index, base);
244 }
else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
247 set_sib(scale, index, base);
252 set_sib(scale, index, base);
253 set_dispr(disp, rmode);
261 RelocInfo::Mode rmode) {
265 set_sib(scale, index,
ebp);
266 set_dispr(disp, rmode);
270 bool Operand::is_reg(Register reg)
const {
271 return ((buf_[0] & 0xF8) == 0xC0)
272 && ((buf_[0] & 0x07) == reg.code());
276 bool Operand::is_reg_only()
const {
277 return (buf_[0] & 0xF8) == 0xC0;
295 #ifdef GENERATED_CODE_COVERAGE
296 static void InitCoverageLog();
300 : AssemblerBase(isolate, buffer, buffer_size),
301 positions_recorder_(this) {
307 memset(buffer_, 0xCC, buffer_size_);
311 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
313 #ifdef GENERATED_CODE_COVERAGE
336 Nop((m - (addr & mask)) & mask);
342 while (*a == 0x66) a++;
343 if (*a == 0x90)
return true;
344 if (a[0] == 0xf && a[1] == 0x1f)
return true;
355 for (; bytes > 0; bytes--) {
480 EMIT(0x50 | src.code());
487 emit_operand(
esi, src);
494 EMIT(0x58 | dst.code());
501 emit_operand(
eax, dst);
520 CHECK(dst.is_byte_register());
523 emit_operand(dst, src);
530 emit_operand(
eax, dst);
536 CHECK(src.is_byte_register());
539 emit_operand(src, dst);
547 emit_operand(dst, src);
555 emit_operand(src, dst);
563 emit_operand(
eax, dst);
564 EMIT(static_cast<int8_t>(imm16 & 0xff));
565 EMIT(static_cast<int8_t>(imm16 >> 8));
571 EMIT(0xB8 | dst.code());
578 EMIT(0xB8 | dst.code());
585 EMIT(0xB8 | dst.code());
593 emit_operand(dst, src);
600 EMIT(0xC0 | src.code() << 3 | dst.code());
607 emit_operand(
eax, dst);
615 emit_operand(
eax, dst);
623 emit_operand(src, dst);
631 emit_operand(dst, src);
639 emit_operand(dst, src);
647 emit_operand(dst, src);
655 emit_operand(dst, src);
665 emit_operand(dst, src);
697 if (src.is(
eax) || dst.is(
eax)) {
698 EMIT(0x90 | (src.is(
eax) ? dst.code() : src.code()));
701 EMIT(0xC0 | src.code() << 3 | dst.code());
708 emit_arith(2, Operand(dst), Immediate(imm32));
715 emit_operand(dst, src);
722 emit_operand(dst, src);
729 emit_operand(src, dst);
736 emit_arith(0, dst, x);
741 and_(dst, Immediate(imm32));
747 emit_arith(4, Operand(dst), x);
754 emit_operand(dst, src);
760 emit_arith(4, dst, x);
767 emit_operand(src, dst);
773 if (op.is_reg(
eax)) {
777 emit_operand(
edi, op);
784 CHECK(reg.is_byte_register());
787 emit_operand(reg, op);
792 CHECK(reg.is_byte_register());
795 emit_operand(reg, op);
804 emit_operand(
edi, op);
811 emit_arith(7, Operand(reg), Immediate(imm32));
817 emit_arith(7, Operand(reg), Immediate(handle));
824 emit_operand(reg, op);
830 emit_arith(7, op, imm);
836 emit_arith(7, op, Immediate(handle));
843 emit_operand(
eax, op);
851 emit_operand(
eax, op);
856 CHECK(dst.is_byte_register());
859 EMIT(0xC8 | dst.code());
866 emit_operand(
ecx, dst);
872 EMIT(0x48 | dst.code());
879 emit_operand(
ecx, dst);
892 EMIT(0xF8 | src.code());
899 EMIT(0xE8 | reg.code());
907 emit_operand(dst, src);
913 if (is_int8(imm32)) {
915 EMIT(0xC0 | dst.code() << 3 | src.code());
919 EMIT(0xC0 | dst.code() << 3 | src.code());
927 EMIT(0x40 | dst.code());
934 emit_operand(
eax, dst);
941 emit_operand(dst, src);
948 EMIT(0xE0 | src.code());
955 EMIT(0xD8 | dst.code());
962 EMIT(0xD0 | dst.code());
968 emit_arith(1, Operand(dst), Immediate(imm32));
975 emit_operand(dst, src);
981 emit_arith(1, dst, x);
988 emit_operand(src, dst);
997 EMIT(0xD0 | dst.code());
1000 EMIT(0xD0 | dst.code());
1011 EMIT(0xD8 | dst.code());
1014 EMIT(0xD8 | dst.code());
1025 EMIT(0xC8 | dst.code());
1028 EMIT(0xC8 | dst.code());
1037 EMIT(0xC8 | dst.code());
1046 EMIT(0xF8 | dst.code());
1049 EMIT(0xF8 | dst.code());
1058 EMIT(0xF8 | dst.code());
1065 emit_operand(dst, src);
1073 emit_operand(dst, src);
1082 EMIT(0xE0 | dst.code());
1085 EMIT(0xE0 | dst.code());
1094 EMIT(0xE0 | dst.code());
1102 emit_operand(dst, src);
1111 EMIT(0xE8 | dst.code());
1114 EMIT(0xE8 | dst.code());
1123 EMIT(0xE8 | dst.code());
1129 emit_arith(5, dst, x);
1136 emit_operand(dst, src);
1143 emit_operand(src, dst);
1148 if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
1160 EMIT(0xC0 | reg.code());
1169 emit_operand(reg, op);
1174 CHECK(reg.is_byte_register());
1177 emit_operand(reg, op);
1182 if (op.is_reg_only()) {
1183 test(op.reg(), imm);
1186 if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
1187 return test_b(op, imm.x_);
1191 emit_operand(
eax, op);
1203 }
else if (reg.is_byte_register()) {
1204 emit_arith_b(0xF6, 0xC0, reg, imm8);
1207 EMIT(0xC0 | reg.code());
1214 if (op.is_reg_only()) {
1220 emit_operand(
eax, op);
1227 emit_arith(6, Operand(dst), Immediate(imm32));
1234 emit_operand(dst, src);
1241 emit_operand(src, dst);
1247 emit_arith(6, dst, x);
1255 emit_operand(src, dst);
1263 emit_operand(src, dst);
1271 emit_operand(dst, src);
1295 ASSERT(is_uint16(imm16));
1301 EMIT((imm16 >> 8) & 0xFF);
1317 void Assembler::print(Label* L) {
1318 if (L->is_unused()) {
1319 PrintF(
"unused label\n");
1320 }
else if (L->is_bound()) {
1321 PrintF(
"bound label to %d\n", L->pos());
1322 }
else if (L->is_linked()) {
1325 while (l.is_linked()) {
1326 Displacement disp = disp_at(&l);
1327 PrintF(
"@ %d ", l.pos());
1333 PrintF(
"label in inconsistent state (pos = %d)\n", L->pos_);
1338 void Assembler::bind_to(Label* L,
int pos) {
1341 while (L->is_linked()) {
1342 Displacement disp = disp_at(L);
1343 int fixup_pos = L->pos();
1344 if (disp.type() == Displacement::CODE_RELATIVE) {
1348 if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
1352 int imm32 = pos - (fixup_pos +
sizeof(
int32_t));
1353 long_at_put(fixup_pos, imm32);
1357 while (L->is_near_linked()) {
1358 int fixup_pos = L->near_link_pos();
1359 int offset_to_next =
1360 static_cast<int>(*
reinterpret_cast<int8_t*
>(
addr_at(fixup_pos)));
1361 ASSERT(offset_to_next <= 0);
1363 int disp = pos - fixup_pos -
sizeof(int8_t);
1364 CHECK(0 <= disp && disp <= 127);
1366 if (offset_to_next < 0) {
1367 L->link_to(fixup_pos + offset_to_next, Label::kNear);
1386 if (L->is_bound()) {
1387 const int long_size = 5;
1392 emit(offs - long_size);
1404 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1406 if (RelocInfo::IsRuntimeEntry(rmode)) {
1407 emit(reinterpret_cast<uint32_t>(entry), rmode);
1409 emit(entry - (
pc_ +
sizeof(
int32_t)), rmode);
1416 return 1 + adr.len_;
1424 emit_operand(
edx, adr);
1429 return 1 +
sizeof(uint32_t) ;
1434 RelocInfo::Mode rmode,
1435 TypeFeedbackId ast_id) {
1438 ASSERT(RelocInfo::IsCodeTarget(rmode)
1439 || rmode == RelocInfo::CODE_AGE_SEQUENCE);
1441 emit(code, rmode, ast_id);
1447 if (L->is_bound()) {
1448 const int short_size = 2;
1449 const int long_size = 5;
1452 if (is_int8(offs - short_size)) {
1455 EMIT((offs - short_size) & 0xFF);
1459 emit(offs - long_size);
1461 }
else if (distance == Label::kNear) {
1467 emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
1474 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1476 if (RelocInfo::IsRuntimeEntry(rmode)) {
1477 emit(reinterpret_cast<uint32_t>(entry), rmode);
1479 emit(entry - (
pc_ +
sizeof(
int32_t)), rmode);
1487 emit_operand(
esp, adr);
1493 ASSERT(RelocInfo::IsCodeTarget(rmode));
1501 ASSERT(0 <= cc && static_cast<int>(cc) < 16);
1502 if (L->is_bound()) {
1503 const int short_size = 2;
1504 const int long_size = 6;
1507 if (is_int8(offs - short_size)) {
1510 EMIT((offs - short_size) & 0xFF);
1515 emit(offs - long_size);
1517 }
else if (distance == Label::kNear) {
1533 ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
1537 if (RelocInfo::IsRuntimeEntry(rmode)) {
1538 emit(reinterpret_cast<uint32_t>(entry), rmode);
1540 emit(entry - (
pc_ +
sizeof(
int32_t)), rmode);
1550 emit(code, RelocInfo::CODE_TARGET);
1558 emit_farith(0xD9, 0xC0, i);
1564 emit_farith(0xDD, 0xD8, i);
1599 emit_operand(
eax, adr);
1606 emit_operand(
eax, adr);
1613 emit_operand(
ebx, adr);
1620 emit_operand(
edx, adr);
1627 emit_operand(
ebx, adr);
1634 emit_operand(
edx, adr);
1641 emit_operand(
eax, adr);
1648 emit_operand(
ebp, adr);
1655 emit_operand(
ebx, adr);
1663 emit_operand(
ecx, adr);
1671 emit_operand(
ecx, adr);
1678 emit_operand(
edx, adr);
1685 emit_operand(
edi, adr);
1754 emit_farith(0xDC, 0xC0, i);
1760 emit_farith(0xD8, 0xC0, i);
1766 emit_farith(0xDC, 0xE8, i);
1772 emit_farith(0xD8, 0xE0, i);
1779 emit_operand(
esp, adr);
1785 emit_farith(0xD8, 0xC8, i);
1791 emit_farith(0xDC, 0xC8, i);
1797 emit_farith(0xDC, 0xF8, i);
1803 emit_farith(0xD8, 0xF0, i);
1809 emit_farith(0xDE, 0xC0, i);
1815 emit_farith(0xDE, 0xE8, i);
1821 emit_farith(0xDE, 0xE0, i);
1827 emit_farith(0xDE, 0xC8, i);
1833 emit_farith(0xDE, 0xF8, i);
1853 emit_farith(0xD9, 0xC8, i);
1866 emit_farith(0xDD, 0xC0, i);
1879 emit_farith(0xDD, 0xE8, i);
1945 ASSERT(reg.is_byte_register());
1949 EMIT(0xC0 | reg.code());
1959 emit_operand(dst, src);
1969 emit_operand(dst, src);
2195 EMIT(static_cast<byte>(mode) | 0x8);
2523 Register ireg = { reg.code() };
2524 emit_operand(ireg, adr);
2529 EMIT(0xC0 | dst.code() << 3 | src.code());
2534 EMIT(0xC0 | dst.code() << 3 | src.code());
2539 EMIT(0xC0 | (dst.code() << 3) | src.code());
2551 RecordRelocInfo(RelocInfo::JS_RETURN);
2558 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2563 if (FLAG_code_comments || force) {
2565 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2570 void Assembler::GrowBuffer() {
2577 desc.buffer_size = 4*
KB;
2584 (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
2589 desc.buffer = NewArray<byte>(desc.buffer_size);
2596 memset(desc.buffer, 0xCC, desc.buffer_size);
2600 int pc_delta = desc.buffer -
buffer_;
2604 reloc_info_writer.pos(), desc.reloc_size);
2607 if (
isolate()->assembler_spare_buffer() ==
NULL &&
2614 buffer_size_ = desc.buffer_size;
2616 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2617 reloc_info_writer.last_pc() + pc_delta);
2620 for (RelocIterator it(desc); !it.done(); it.next()) {
2621 RelocInfo::Mode rmode = it.rinfo()->rmode();
2622 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2634 void Assembler::emit_arith_b(
int op1,
int op2, Register dst,
int imm8) {
2635 ASSERT(is_uint8(op1) && is_uint8(op2));
2637 ASSERT((op1 & 0x01) == 0);
2639 EMIT(op2 | dst.code());
2644 void Assembler::emit_arith(
int sel, Operand dst,
const Immediate& x) {
2645 ASSERT((0 <= sel) && (sel <= 7));
2646 Register ireg = { sel };
2649 emit_operand(ireg, dst);
2651 }
else if (dst.is_reg(
eax)) {
2652 EMIT((sel << 3) | 0x05);
2656 emit_operand(ireg, dst);
2662 void Assembler::emit_operand(Register reg,
const Operand& adr) {
2663 const unsigned length = adr.len_;
2667 pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
2670 for (
unsigned i = 1; i < length; i++)
pc_[i] = adr.buf_[i];
2674 if (length >=
sizeof(
int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
2676 RecordRelocInfo(adr.rmode_);
2682 void Assembler::emit_farith(
int b1,
int b2,
int i) {
2683 ASSERT(is_uint8(b1) && is_uint8(b2));
2702 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2703 ASSERT(!RelocInfo::IsNone(rmode));
2705 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2716 reloc_info_writer.Write(&rinfo);
2733 #ifdef GENERATED_CODE_COVERAGE
2734 static FILE* coverage_log =
NULL;
2737 static void InitCoverageLog() {
2738 char* file_name = getenv(
"V8_GENERATED_CODE_COVERAGE_LOG");
2739 if (file_name !=
NULL) {
2740 coverage_log = fopen(file_name,
"aw+");
2745 void LogGeneratedCodeCoverage(
const char* file_line) {
2746 const char* return_address = (&file_line)[-1];
2747 char* push_insn =
const_cast<char*
>(return_address - 12);
2748 push_insn[0] = 0xeb;
2750 if (coverage_log !=
NULL) {
2751 fprintf(coverage_log,
"%s\n", file_line);
2752 fflush(coverage_log);
2760 #endif // V8_TARGET_ARCH_IA32
void cmp(Register src1, const Operand &src2, Condition cond=al)
void psllq(XMMRegister reg, int8_t shift)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static const int kMaximalBufferSize
static const int kNumAllocatableRegisters
Isolate * isolate() const
void fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void fst_d(const Operand &adr)
void ucomisd(XMMRegister dst, XMMRegister src)
void pcmpeqd(XMMRegister dst, XMMRegister src)
void cmpb(Register reg, int8_t imm8)
void cvttss2si(Register dst, const Operand &src)
void PopulateConstantPool(ConstantPoolArray *constant_pool)
void PrintF(const char *format,...)
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode)
static const char * AllocationIndexToString(int index)
void por(XMMRegister dst, XMMRegister src)
static int Decode(Isolate *isolate, FILE *f, byte *begin, byte *end)
bool buffer_overflow() const
void mulsd(XMMRegister dst, XMMRegister src)
void addps(XMMRegister dst, const Operand &src)
void cvtsd2si(Register dst, XMMRegister src)
void orpd(XMMRegister dst, XMMRegister src)
static int NumAllocatableRegisters()
void or_(Register dst, int32_t imm32)
void push(Register src, Condition cond=al)
void movntdq(const Operand &dst, XMMRegister src)
void cvtsi2sd(XMMRegister dst, Register src)
void cvtss2sd(XMMRegister dst, XMMRegister src)
TypeImpl< ZoneTypeConfig > Type
void sqrtsd(XMMRegister dst, XMMRegister src)
void fst_s(const Operand &adr)
static const int kMinimalBufferSize
static bool IsSupported(CpuFeature f)
void sbb(Register dst, const Operand &src)
void mulps(XMMRegister dst, const Operand &src)
void andpd(XMMRegister dst, XMMRegister src)
void j(Condition cc, Label *L, Label::Distance distance=Label::kFar)
#define ASSERT(condition)
void ptest(XMMRegister dst, XMMRegister src)
#define ASSERT_GE(v1, v2)
static const int kNumRegisters
static const char * AllocationIndexToString(int index)
void xorpd(XMMRegister dst, XMMRegister src)
void fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void bt(const Operand &dst, Register src)
void sar(Register dst, uint8_t imm8)
void fistp_s(const Operand &adr)
void movsx_b(Register dst, Register src)
void pxor(XMMRegister dst, XMMRegister src)
void addsd(XMMRegister dst, XMMRegister src)
void movzx_b(Register dst, Register src)
static int NumRegisters()
void shr_cl(Register dst)
void fld_d(const Operand &adr)
void cmpb_al(const Operand &op)
void xchg(Register dst, Register src)
void fild_s(const Operand &adr)
void movntdqa(XMMRegister dst, const Operand &src)
void rcl(Register dst, uint8_t imm8)
void enter(const Immediate &size)
void ret(const Register &xn=lr)
void shld(Register dst, Register src)
void neg(const Register &rd, const Operand &operand)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
static const int kNumAllocatableRegisters
void andps(XMMRegister dst, const Operand &src)
void fisttp_d(const Operand &adr)
void bsr(Register dst, Register src)
void movss(XMMRegister dst, const Operand &src)
void set_byte_at(int pos, byte value)
void cvtsd2ss(XMMRegister dst, XMMRegister src)
void pinsrd(XMMRegister dst, Register src, int8_t offset)
void movsd(XMMRegister dst, XMMRegister src)
void GetCode(CodeDesc *desc)
void movdqa(XMMRegister dst, const Operand &src)
void movdqu(XMMRegister dst, const Operand &src)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
static void TooLateToEnableNow()
void movmskpd(Register dst, XMMRegister src)
void fisttp_s(const Operand &adr)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void orps(XMMRegister dst, const Operand &src)
void shl(Register dst, uint8_t imm8)
static void MemMove(void *dest, const void *src, size_t size)
void cmpw_ax(const Operand &op)
void ror(const Register &rd, const Register &rs, unsigned shift)
static Register from_code(int code)
MaybeObject * AllocateConstantPool(Heap *heap)
void emit_sse_operand(XMMRegister reg, const Operand &adr)
void rcr(Register dst, uint8_t imm8)
void fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void add(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void setcc(Condition cc, Register reg)
void fld_s(const Operand &adr)
void fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
static bool IsNop(Instr instr, int type=NON_MARKING_NOP)
void RecordDebugBreakSlot()
void pand(XMMRegister dst, XMMRegister src)
void mov_w(Register dst, const Operand &src)
void fstp_d(const Operand &adr)
void push_imm32(int32_t imm32)
void fistp_d(const Operand &adr)
void pextrd(Register dst, XMMRegister src, int8_t offset)
static const int kNumRegisters
void ror_cl(Register dst)
void shrd(Register dst, Register src)
void movaps(XMMRegister dst, XMMRegister src)
void movmskps(Register dst, XMMRegister src)
Operand(Register reg, Shift shift=LSL, unsigned shift_amount=0)
Handle< T > handle(T *t, Isolate *isolate)
void psrlq(XMMRegister reg, int8_t shift)
void RecordComment(const char *msg)
int CallSize(const Operand &adr)
void mov_b(Register dst, Register src)
bool emit_debug_code() const
void fstp_s(const Operand &adr)
void divsd(XMMRegister dst, XMMRegister src)
void lea(Register dst, const Operand &src)
void fild_d(const Operand &adr)
void xor_(Register dst, int32_t imm32)
void mov(Register dst, const Operand &src, SBit s=LeaveCC, Condition cond=al)
static const int kHeaderSize
Assembler(Isolate *isolate, void *buffer, int buffer_size)
void cmpltsd(XMMRegister dst, XMMRegister src)
#define ASSERT_EQ(v1, v2)
void test(Register reg, const Immediate &imm)
void shr(Register dst, uint8_t imm8)
static XMMRegister from_code(int code)
void divps(XMMRegister dst, const Operand &src)
void sar_cl(Register dst)
void movd(XMMRegister dst, Register src)
void xorps(XMMRegister dst, const Operand &src)
void cmov(Condition cc, Register dst, Register src)
PositionsRecorder * positions_recorder()
void movsx_w(Register dst, Register src)
void subps(XMMRegister dst, const Operand &src)
void fisub_s(const Operand &adr)
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle)
void extractps(Register dst, XMMRegister src, byte imm8)
void shufps(XMMRegister dst, XMMRegister src, byte imm8)
static uint64_t CpuFeaturesImpliedByPlatform()
#define RUNTIME_ENTRY(name, nargs, ressize)
bool IsEnabled(CpuFeature f)
void and_(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
void fist_s(const Operand &adr)
void DeleteArray(T *array)
void bts(Register dst, Register src)
void prefetch(const Operand &src, int level)
void movzx_w(Register dst, Register src)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void sub(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)
static const char * AllocationIndexToString(int index)
void subsd(XMMRegister dst, XMMRegister src)
void cmpw(const Operand &op, Immediate imm16)
void test_b(Register reg, const Operand &op)
void mul(Register dst, Register src1, Register src2, SBit s=LeaveCC, Condition cond=al)
void cvttsd2si(Register dst, const Operand &src)
void shl_cl(Register dst)
void adc(Register dst, Register src1, const Operand &src2, SBit s=LeaveCC, Condition cond=al)