28 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
29 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
41 #define LS_MACRO_LIST(V) \
42 V(Ldrb, Register&, rt, LDRB_w) \
43 V(Strb, Register&, rt, STRB_w) \
44 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
45 V(Ldrh, Register&, rt, LDRH_w) \
46 V(Strh, Register&, rt, STRH_w) \
47 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
48 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
49 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
50 V(Ldrsw, Register&, rt, LDRSW_x)
127 class MacroAssembler :
public Assembler {
135 inline void And(
const Register& rd,
137 const Operand& operand);
138 inline void Ands(
const Register& rd,
140 const Operand& operand);
141 inline void Bic(
const Register& rd,
143 const Operand& operand);
144 inline void Bics(
const Register& rd,
146 const Operand& operand);
147 inline void Orr(
const Register& rd,
149 const Operand& operand);
150 inline void Orn(
const Register& rd,
152 const Operand& operand);
153 inline void Eor(
const Register& rd,
155 const Operand& operand);
156 inline void Eon(
const Register& rd,
158 const Operand& operand);
159 inline void Tst(
const Register& rn,
const Operand& operand);
162 const Operand& operand,
166 inline void Add(
const Register& rd,
168 const Operand& operand);
169 inline void Adds(
const Register& rd,
171 const Operand& operand);
172 inline void Sub(
const Register& rd,
174 const Operand& operand);
175 inline void Subs(
const Register& rd,
177 const Operand& operand);
178 inline void Cmn(
const Register& rn,
const Operand& operand);
179 inline void Cmp(
const Register& rn,
const Operand& operand);
180 inline void Neg(
const Register& rd,
181 const Operand& operand);
182 inline void Negs(
const Register& rd,
183 const Operand& operand);
187 const Operand& operand,
192 inline void Adc(
const Register& rd,
194 const Operand& operand);
195 inline void Adcs(
const Register& rd,
197 const Operand& operand);
198 inline void Sbc(
const Register& rd,
200 const Operand& operand);
201 inline void Sbcs(
const Register& rd,
203 const Operand& operand);
204 inline void Ngc(
const Register& rd,
205 const Operand& operand);
206 inline void Ngcs(
const Register& rd,
207 const Operand& operand);
210 const Operand& operand,
215 void Mov(
const Register& rd,
216 const Operand& operand,
218 void Mov(
const Register& rd, uint64_t imm);
219 inline void Mvn(
const Register& rd, uint64_t imm);
220 void Mvn(
const Register& rd,
const Operand& operand);
221 static bool IsImmMovn(uint64_t imm,
unsigned reg_size);
222 static bool IsImmMovz(uint64_t imm,
unsigned reg_size);
226 inline void Ccmp(
const Register& rn,
227 const Operand& operand,
230 inline void Ccmn(
const Register& rn,
231 const Operand& operand,
235 const Operand& operand,
239 void Csel(
const Register& rd,
241 const Operand& operand,
245 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
246 inline void FN(const REGTYPE REG, const MemOperand& addr);
248 #undef DECLARE_FUNCTION
255 void Load(
const Register& rt,
const MemOperand& addr, Representation r);
256 void Store(
const Register& rt,
const MemOperand& addr, Representation r);
259 inline void Adr(
const Register& rd, Label* label);
260 inline void Asr(
const Register& rd,
const Register& rn,
unsigned shift);
261 inline void Asr(
const Register& rd,
const Register& rn,
const Register& rm);
268 void B(Label* label,
BranchType type, Register reg = NoReg,
int bit = -1);
270 inline void B(Label* label);
273 inline void Bfi(
const Register& rd,
277 inline void Bfxil(
const Register& rd,
281 inline void Bind(Label* label);
282 inline void Bl(Label* label);
283 inline void Blr(
const Register& xn);
284 inline void Br(
const Register& xn);
286 void Cbnz(
const Register& rt, Label* label);
287 void Cbz(
const Register& rt, Label* label);
288 inline void Cinc(
const Register& rd,
const Register& rn,
Condition cond);
289 inline void Cinv(
const Register& rd,
const Register& rn,
Condition cond);
290 inline void Cls(
const Register& rd,
const Register& rn);
291 inline void Clz(
const Register& rd,
const Register& rn);
292 inline void Cneg(
const Register& rd,
const Register& rn,
Condition cond);
294 inline void CmovX(
const Register& rd,
const Register& rn,
Condition cond);
297 inline void Csinc(
const Register& rd,
301 inline void Csinv(
const Register& rd,
305 inline void Csneg(
const Register& rd,
312 inline void Extr(
const Register& rd,
316 inline void Fabs(
const FPRegister& fd,
const FPRegister& fn);
317 inline void Fadd(
const FPRegister& fd,
318 const FPRegister& fn,
319 const FPRegister& fm);
320 inline void Fccmp(
const FPRegister& fn,
321 const FPRegister& fm,
324 inline void Fcmp(
const FPRegister& fn,
const FPRegister& fm);
325 inline void Fcmp(
const FPRegister& fn,
double value);
326 inline void Fcsel(
const FPRegister& fd,
327 const FPRegister& fn,
328 const FPRegister& fm,
330 inline void Fcvt(
const FPRegister& fd,
const FPRegister& fn);
331 inline void Fcvtas(
const Register& rd,
const FPRegister& fn);
332 inline void Fcvtau(
const Register& rd,
const FPRegister& fn);
333 inline void Fcvtms(
const Register& rd,
const FPRegister& fn);
334 inline void Fcvtmu(
const Register& rd,
const FPRegister& fn);
335 inline void Fcvtns(
const Register& rd,
const FPRegister& fn);
336 inline void Fcvtnu(
const Register& rd,
const FPRegister& fn);
337 inline void Fcvtzs(
const Register& rd,
const FPRegister& fn);
338 inline void Fcvtzu(
const Register& rd,
const FPRegister& fn);
339 inline void Fdiv(
const FPRegister& fd,
340 const FPRegister& fn,
341 const FPRegister& fm);
342 inline void Fmadd(
const FPRegister& fd,
343 const FPRegister& fn,
344 const FPRegister& fm,
345 const FPRegister& fa);
346 inline void Fmax(
const FPRegister& fd,
347 const FPRegister& fn,
348 const FPRegister& fm);
349 inline void Fmaxnm(
const FPRegister& fd,
350 const FPRegister& fn,
351 const FPRegister& fm);
352 inline void Fmin(
const FPRegister& fd,
353 const FPRegister& fn,
354 const FPRegister& fm);
355 inline void Fminnm(
const FPRegister& fd,
356 const FPRegister& fn,
357 const FPRegister& fm);
358 inline void Fmov(FPRegister fd, FPRegister fn);
359 inline void Fmov(FPRegister fd, Register rn);
364 inline void Fmov(FPRegister fd,
double imm);
365 inline void Fmov(FPRegister fd,
float imm);
369 ASSERT(allow_macro_instructions_);
370 Fmov(fd, static_cast<double>(imm));
425 inline void Movk(
const Register& rd, uint64_t imm,
int shift = -1);
476 void Tbnz(
const Register& rt,
unsigned bit_pos, Label* label);
477 void Tbz(
const Register& rt,
unsigned bit_pos, Label* label);
510 Label * is_not_representable =
NULL,
511 Label * is_representable =
NULL);
627 queued_.push_back(rt);
636 std::vector<CPURegister> queued_;
677 inline void Claim(uint64_t count, uint64_t unit_size =
kXRegSize);
680 inline void Drop(uint64_t count, uint64_t unit_size =
kXRegSize);
701 const uint64_t bit_pattern,
707 const uint64_t bit_pattern,
782 ASSERT(sp_alignment >= 16);
818 if (object->IsHeapObject()) {
826 static int SafepointRegisterStackIndex(
int reg_code);
838 template<
typename Field>
840 static const uint64_t
shift = Field::kShift + kSmiShift;
841 static const uint64_t setbits =
CountSetBits(Field::kMask, 32);
842 Ubfx(reg, reg, shift, setbits);
865 Label* not_smi_label =
NULL);
869 Label* both_smi_label,
870 Label* not_smi_label =
NULL);
873 Label* either_smi_label,
874 Label* not_smi_label =
NULL);
877 Label* not_smi_label);
880 Label* not_smi_label);
898 Label* on_heap_number,
899 Label* on_not_heap_number =
NULL);
901 Label* on_heap_number,
904 Label* on_not_heap_number,
943 Label* on_successful_conversion =
NULL,
944 Label* on_failed_conversion =
NULL) {
946 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
947 on_failed_conversion);
958 Label* on_successful_conversion =
NULL,
959 Label* on_failed_conversion =
NULL) {
961 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
962 on_failed_conversion);
1016 Register first_object_instance_type,
1017 Register second_object_instance_type,
1025 Register first_object_instance_type,
1026 Register second_object_instance_type,
1093 int num_reg_arguments);
1095 int num_reg_arguments,
1096 int num_double_arguments);
1098 int num_reg_arguments,
1099 int num_double_arguments);
1108 ExternalReference thunk_ref,
1149 void Jump(intptr_t target, RelocInfo::Mode rmode);
1152 void Call(Label* target);
1155 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1161 static int CallSize(Label* target);
1164 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1174 void InvokePrologue(
const ParameterCount& expected,
1175 const ParameterCount& actual,
1180 bool* definitely_mismatches,
1183 const ParameterCount& expected,
1184 const ParameterCount& actual,
1190 const ParameterCount& actual,
1194 const ParameterCount& expected,
1195 const ParameterCount& actual,
1199 const ParameterCount& expected,
1200 const ParameterCount& actual,
1244 void set_allow_macro_instructions(
bool value) {
1245 allow_macro_instructions_ = value;
1247 bool allow_macro_instructions()
const {
return allow_macro_instructions_; }
1257 saved_(masm->use_real_aborts_), masm_(masm) {
1258 masm_->use_real_aborts_ =
false;
1261 masm_->use_real_aborts_ = saved_;
1268 #ifdef ENABLE_DEBUGGER_SUPPORT
1320 Label* gc_required);
1326 Label* gc_required);
1331 Label* gc_required);
1336 Label* gc_required);
1341 Label* gc_required);
1346 Label* gc_required);
1408 Label* if_cond_pass,
1415 Label* if_not_object);
1487 Label* if_not_equal);
1517 Label* not_string, Label*
string =
NULL);
1526 Label* fall_through);
1531 uint64_t bit_pattern,
1532 Label* if_all_clear,
1534 Label* fall_through);
1554 int elements_offset = 0);
1569 uint32_t encoding_mask);
1618 Label* call_runtime);
1629 Label* no_memento_found);
1634 Label* memento_found) {
1635 Label no_memento_found;
1638 B(
eq, memento_found);
1639 Bind(&no_memento_found);
1677 int extra_space = 0);
1691 bool restore_context);
1753 Label* if_all_clear);
1757 Label* if_deprecated);
1763 InNewSpace(
object,
ne, branch);
1768 InNewSpace(
object,
eq, branch);
1803 remembered_set_action,
1829 Label* object_is_white_and_not_data);
1835 Label* not_data_object);
1844 inline void GetMarkBits(
Register addr_reg,
1909 Label* no_map_match);
1949 void Printf(
const char * format,
2006 static bool IsCodeAgeSequence(
byte* sequence);
2020 void CopyFieldsUnrolledPairsHelper(
Register dst,
Register src,
unsigned count,
2033 void PushHelper(
int count,
int size,
2036 void PopHelper(
int count,
int size,
2043 void PrepareForPush(
Operand total_size);
2044 void PrepareForPop(
Operand total_size);
2046 void PrepareForPush(
int count,
int size) { PrepareForPush(count * size); }
2047 void PrepareForPop(
int count,
int size) { PrepareForPop(count * size); }
2059 void JumpToHandlerEntry(Register exception,
2066 void InNewSpace(Register
object,
2079 void TryConvertDoubleToInt(Register as_int,
2081 FPRegister scratch_d,
2082 Label* on_successful_conversion =
NULL,
2083 Label* on_failed_conversion =
NULL);
2085 bool generating_stub_;
2090 bool allow_macro_instructions_;
2100 bool use_real_aborts_;
2103 Handle<Object> code_object_;
2109 CPURegList tmp_list_;
2110 CPURegList fptmp_list_;
2112 void InitializeNewString(Register
string,
2159 masm_->CheckConstPool(
false,
true);
2160 masm_->CheckVeneerPool(
false,
true);
2162 masm_->StartBlockPools();
2165 masm_->bind(&start_);
2167 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2168 masm_->set_allow_macro_instructions(
false);
2173 masm_->EndBlockPools();
2175 if (start_.is_bound()) {
2176 ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2178 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2187 bool previous_allow_macro_instructions_;
2202 : available_(masm->TmpList()),
2203 availablefp_(masm->FPTmpList()),
2204 old_available_(available_->list()),
2205 old_availablefp_(availablefp_->list()) {
2256 return smi_check_ !=
NULL;
2274 const Label* smi_check);
2279 Emit(masm, NoReg, &unbound);
2294 class RegisterBits :
public BitField<unsigned, 0, 5> {};
2295 class DeltaBits :
public BitField<uint32_t, 5, 32-5> {};
2300 #ifdef GENERATED_CODE_COVERAGE
2301 #error "Unsupported option"
2302 #define CODE_COVERAGE_STRINGIFY(x) #x
2303 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2304 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2305 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2307 #define ACCESS_MASM(masm) masm->
2310 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
void AssertRegisterIsClear(Register reg, BailoutReason reason)
void Csetm(const Register &rd, Condition cond)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void Neg(const Register &rd, const Operand &operand)
void Poke(const CPURegister &src, const Operand &offset)
void SmiUntag(Register reg, SBit s=LeaveCC)
void SmiAbs(const Register &smi, Label *slow)
void TestMapBitfield(Register object, uint64_t mask)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if available(ARM only)") DEFINE_bool(enable_sudiv
void Mvn(const Register &rd, uint64_t imm)
Isolate * isolate() const
void Adr(const Register &rd, Label *label)
void ClaimBySMI(const Register &count_smi, uint64_t unit_size=kXRegSize)
void TestAndBranchIfAllClear(const Register ®, const uint64_t bit_pattern, Label *label)
void Cmn(const Register &rn, const Operand &operand)
void PushSafepointRegisters()
void Frintn(const FPRegister &fd, const FPRegister &fn)
void Adcs(const Register &rd, const Register &rn, const Operand &operand)
void Ands(const Register &rd, const Register &rn, const Operand &operand)
bool generating_stub() const
void Udiv(const Register &rd, const Register &rn, const Register &rm)
void Fminnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void FillFields(Register dst, Register field_count, Register filler)
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index, BailoutReason reason=kRegisterDidNotMatchExpectedRoot)
static int SlotOffset(int index)
void Orr(const Register &rd, const Register &rn, const Operand &operand)
void GetRelocatedValueLocation(Register ldr_location, Register result)
void InvokeFunction(Register function, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void Fsqrt(const FPRegister &fd, const FPRegister &fn)
void CmovX(const Register &rd, const Register &rn, Condition cond)
void PrintfNoPreserve(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void LoadElementsKindFromMap(Register result, Register map)
void SmiTag(Register reg, SBit s=LeaveCC)
void B(Label *label, BranchType type, Register reg=NoReg, int bit=-1)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
void Ldp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void Lsr(const Register &rd, const Register &rn, unsigned shift)
void Tbz(const Register &rt, unsigned bit_pos, Label *label)
void ClampInt32ToUint8(Register in_out)
void CompareAndBranch(const Register &lhs, const Operand &rhs, Condition cond, Label *label)
void Fcvtas(const Register &rd, const FPRegister &fn)
void AllocateTwoByteSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Debug(const char *message, uint32_t code, Instr params=BREAK)
bool use_real_aborts() const
void Ubfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Smaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
const unsigned kDRegSizeInBits
void AssertString(Register object)
void SmiUntagToFloat(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
void ExitFramePreserveFPRegs()
void IsObjectJSStringType(Register object, Register scratch, Label *fail)
void ConditionalCompareMacro(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op)
static TypeFeedbackId None()
void Ubfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void AlignAndSetCSPForFrame()
void Fabs(const FPRegister &fd, const FPRegister &fn)
void JumpForHeapNumber(Register object, Register heap_number_map, Label *on_heap_number, Label *on_not_heap_number=NULL)
void PushSRegList(RegList regs)
void JumpToExternalReference(const ExternalReference &builtin)
void JumpIfEitherInstanceTypeIsNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP)
void LoadInstanceDescriptors(Register map, Register descriptors)
InstructionAccurateScope(MacroAssembler *masm, size_t count=0)
void Cneg(const Register &rd, const Register &rn, Condition cond)
void AllocateAsciiString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
void PushCalleeSavedRegisters()
STATIC_ASSERT((reg_zero==(reg_not_zero^1))&&(reg_bit_clear==(reg_bit_set^1))&&(always==(never^1)))
void Sdiv(const Register &rd, const Register &rn, const Register &rm)
const unsigned kXRegSizeInBits
void Fcvtnu(const Register &rd, const FPRegister &fn)
void LoadFromNumberDictionary(Label *miss, Register elements, Register key, Register result, Register t0, Register t1, Register t2)
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, Register scratch1, Label *found)
void CzeroX(const Register &rd, Condition cond)
void Fccmp(const FPRegister &fn, const FPRegister &fm, StatusFlags nzcv, Condition cond)
void Fmul(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void CheckMap(Register obj, Register scratch, Handle< Map > map, Label *fail, SmiCheckType smi_check_type)
void Store(Register src, const MemOperand &dst, Representation r)
void GetBuiltinEntry(Register target, Builtins::JavaScript id)
void JumpIfSmi(Register value, Label *smi_label)
void Ldr(const FPRegister &ft, double imm)
void DispatchMap(Register obj, Register scratch, Handle< Map > map, Handle< Code > success, SmiCheckType smi_check_type)
TypeImpl< ZoneTypeConfig > Type
bool AllowThisStubCall(CodeStub *stub)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage message
void DisableInstrumentation()
void EnterFrame(StackFrame::Type type, bool load_constant_pool=false)
void Peek(const CPURegister &dst, const Operand &offset)
void StoreToSafepointRegisterSlot(Register src, Register dst)
void PopSafepointRegistersAndDoubles()
void Adc(const Register &rd, const Register &rn, const Operand &operand)
void LeaveExitFrame(bool save_doubles, Register argument_count, bool restore_context)
const Register & StackPointer() const
static void Emit(MacroAssembler *masm, const Register ®, const Label *smi_check)
void CheckFastObjectElements(Register map, Register scratch, Label *fail)
static void EmitNotInlined(MacroAssembler *masm)
void EnumLengthSmi(Register dst, Register map)
static const Function * FunctionForId(FunctionId id)
void Bfi(Register dst, Register src, Register scratch, int lsb, int width, Condition cond=al)
void PopSizeRegList(RegList registers, unsigned reg_size, CPURegister::RegisterType type=CPURegister::kRegister)
NoUseRealAbortsScope(MacroAssembler *masm)
void Fcsel(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, Condition cond)
void InvokeCode(Register code, const ParameterCount &expected, const ParameterCount &actual, InvokeFlag flag, const CallWrapper &call_wrapper)
void ThrowIf(Condition cc, BailoutReason reason)
#define ASSERT(condition)
void PushMultipleTimes(CPURegister src, Register count)
void JumpIfBothNotSmi(Register value1, Register value2, Label *not_smi_label)
void CompareMap(Register obj, Register scratch, Handle< Map > map, Label *early_success)
void AssertNotSmi(Register object)
void RecordWriteField(Register object, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void Fcvtau(const Register &rd, const FPRegister &fn)
void Subs(const Register &rd, const Register &rn, const Operand &operand)
void PushTryHandler(StackHandler::Kind kind, int handler_index)
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind, ElementsKind transitioned_kind, Register map_in_out, Register scratch, Label *no_map_match)
void And(Register dst, Register src1, const Operand &src2, Condition cond=al)
void NumberOfOwnDescriptors(Register dst, Register map)
void JumpIfMinusZero(DoubleRegister input, Label *on_negative_zero)
void CompareAndSplit(const Register &lhs, const Operand &rhs, Condition cond, Label *if_true, Label *if_false, Label *fall_through)
void Ldnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &src)
void JumpIfRoot(const Register &obj, Heap::RootListIndex index, Label *if_equal)
void IncrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void PopXRegList(RegList regs)
void Bic(const Register &rd, const Register &rn, const Operand &operand)
void Mul(const Register &rd, const Register &rn, const Register &rm)
void PushXRegList(RegList regs)
void Sbfx(Register dst, Register src, int lsb, int width, Condition cond=al)
void Negs(const Register &rd, const Operand &operand)
void Umsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Load(Register dst, const MemOperand &src, Representation r)
void Extr(const Register &rd, const Register &rn, const Register &rm, unsigned lsb)
void PopSRegList(RegList regs)
void LoadStoreMacro(const CPURegister &rt, const MemOperand &addr, LoadStoreOp op)
void CompareRoot(Register obj, Heap::RootListIndex index)
void AssertSmi(Register object)
void JumpIfNotUniqueName(Register reg, Label *not_unique_name)
void DecrementCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void LoadTrueFalseRoots(Register true_root, Register false_root)
SeqStringSetCharCheckIndexType
void TruncatingDiv(Register result, Register dividend, int32_t divisor)
void Hint(SystemHint code)
void EmitSeqStringSetCharCheck(Register string, Register index, Register value, uint32_t encoding_mask)
void Msub(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
kInstanceClassNameOffset flag
void Csinv(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void EnumLengthUntagged(Register dst, Register map)
Handle< Object > CodeObject()
void Abort(BailoutReason msg)
void IsObjectNameType(Register object, Register scratch, Label *fail)
int CountSetBits(uint64_t value, int width)
void Eor(const Register &rd, const Register &rn, const Operand &operand)
const unsigned kWRegSizeInBits
void Csinc(const Register &rd, const Register &rn, const Register &rm, Condition cond)
Instruction * SmiCheck() const
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label *failure)
void CompareInstanceType(Register map, Register type_reg, InstanceType type)
static bool IsImmMovz(uint64_t imm, unsigned reg_size)
void JumpIfNotHeapNumber(Register object, Register heap_number_map, Register scratch, Label *on_not_heap_number)
MemOperand UntagSmiFieldMemOperand(Register object, int offset)
void Sbcs(const Register &rd, const Register &rn, const Operand &operand)
MemOperand UntagSmiMemOperand(Register object, int offset)
void IsObjectJSObjectType(Register heap_object, Register map, Register scratch, Label *fail)
void CompareObjectType(Register heap_object, Register map, Register type_reg, InstanceType type)
void PushSizeRegList(RegList registers, unsigned reg_size, CPURegister::RegisterType type=CPURegister::kRegister)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
void Fadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void RecordWriteContextSlot(Register context, int offset, Register value, Register scratch, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void ExitFrameRestoreFPRegs()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void Cbnz(const Register &rt, Label *label)
void TestJSArrayForAllocationMemento(Register receiver_reg, Register scratch_reg, Label *no_memento_found)
void AssertHasValidColor(const Register ®)
void Sxtb(const Register &rd, const Register &rn)
void Uxtw(const Register &rd, const Register &rn)
static int ActivationFrameAlignment()
void TruncateNumberToI(Register object, Register result, Register heap_number_map, Register scratch1, Label *not_int32)
void CheckFastElements(Register map, Register scratch, Label *fail)
void TestForMinusZero(DoubleRegister input)
void TryConvertDoubleToInt32(Register as_int, FPRegister value, FPRegister scratch_d, Label *on_successful_conversion=NULL, Label *on_failed_conversion=NULL)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Condition InvertCondition(Condition cond)
void AddSubWithCarryMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubWithCarryOp op)
void LoadGlobalFunction(int index, Register function)
void TryGetFunctionPrototype(Register function, Register result, Register scratch, Label *miss, bool miss_on_bound_function=false)
InlineSmiCheckInfo(Address info)
static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size)
void SetStackPointer(const Register &stack_pointer)
void PushWRegList(RegList regs)
void TestAndBranchIfAnySet(const Register ®, const uint64_t bit_pattern, Label *label)
void Sbfiz(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
void CallStub(CodeStub *stub, TypeFeedbackId ast_id=TypeFeedbackId::None(), Condition cond=al)
void CallCFunction(ExternalReference function, int num_arguments)
void Fcmp(const FPRegister &fn, const FPRegister &fm)
void Dmb(BarrierDomain domain, BarrierType type)
void DecodeField(Register reg)
const unsigned kInstructionSize
void AllocateAsciiConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void Fsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Fcvtns(const Register &rd, const FPRegister &fn)
void IsInstanceJSObjectType(Register map, Register scratch, Label *fail)
void Eon(const Register &rd, const Register &rn, const Operand &operand)
void JumpIfHeapNumber(Register object, Label *on_heap_number, Register heap_number_map=NoReg)
void Rev32(const Register &rd, const Register &rn)
void Jump(Register target, Condition cond=al)
void RecordWrite(Register object, Register address, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action=EMIT_REMEMBERED_SET, SmiCheck smi_check=INLINE_SMI_CHECK)
void JumpIfDataObject(Register value, Register scratch, Label *not_data_object)
void TruncateHeapNumberToI(Register result, Register object)
void Bfxil(const Register &rd, const Register &rn, unsigned lsb, unsigned width)
const unsigned kSRegSizeInBits
void Allocate(int object_size, Register result, Register scratch1, Register scratch2, Label *gc_required, AllocationFlags flags)
void Fmov(FPRegister fd, FPRegister fn)
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required, TaggingMode tagging_mode=TAG_RESULT)
void CopyBytes(Register src, Register dst, Register length, Register scratch)
void EnableInstrumentation()
void LoadHeapObject(Register dst, Handle< HeapObject > object)
void Uxtb(const Register &rd, const Register &rn)
void Throw(Register value)
void Fcvtzs(const Register &rd, const FPRegister &fn)
void JumpIfEitherNotSmi(Register value1, Register value2, Label *not_smi_label)
void ThrowIfSmi(const Register &value, BailoutReason reason)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void Sxtw(const Register &rd, const Register &rn)
void Ngc(const Register &rd, const Operand &operand)
void JumpIfNotObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_not_object)
void PushDRegList(RegList regs)
void Bics(const Register &rd, const Register &rn, const Operand &operand)
void set_has_frame(bool value)
void SetCounter(StatsCounter *counter, int value, Register scratch1, Register scratch2)
void Fmin(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void Cinc(const Register &rd, const Register &rn, Condition cond)
bool NeedExtraInstructionsOrRegisterBranch(Label *label, ImmBranchType branch_type)
void Mneg(const Register &rd, const Register &rn, const Register &rm)
void PopCPURegList(CPURegList registers)
void SmiUntagToDouble(FPRegister dst, Register src, UntagMode mode=kNotSpeculativeUntag)
MacroAssembler(Isolate *isolate, void *buffer, int size)
void PopWRegList(RegList regs)
void Csneg(const Register &rd, const Register &rn, const Register &rm, Condition cond)
void LoadContext(Register dst, int context_chain_length)
void Cset(const Register &rd, Condition cond)
static int CallSize(Register target, Condition cond=al)
void CallExternalReference(const ExternalReference &ext, int num_arguments)
void Fnmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void CallApiFunctionAndReturn(Register function_address, ExternalReference thunk_ref, int stack_space, MemOperand return_value_operand, MemOperand *context_restore_operand)
void AssertFastElements(Register elements)
void Fneg(const FPRegister &fd, const FPRegister &fn)
void CheckMapDeprecated(Handle< Map > map, Register scratch, Label *if_deprecated)
void PushSafepointRegistersAndDoubles()
void JumpIfBlack(Register object, Register scratch0, Register scratch1, Label *on_black)
void Fcvtzu(const Register &rd, const FPRegister &fn)
void ClampDoubleToUint8(Register result_reg, DwVfpRegister input_reg, LowDwVfpRegister double_scratch)
void AllocateTwoByteConsString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
#define T(name, string, precedence)
void JumpIfJSArrayHasAllocationMemento(Register receiver, Register scratch1, Register scratch2, Label *memento_found)
void AnnotateInstrumentation(const char *marker_name)
void Cbz(const Register &rt, Label *label)
void Drop(int count, Condition cond=al)
void Ngcs(const Register &rd, const Operand &operand)
void JumpIfNotInNewSpace(Register object, Label *branch)
void Sxth(const Register &rd, const Register &rn)
void Cls(const Register &rd, const Register &rn)
void JumpIfInNewSpace(Register object, Label *branch)
void GetBuiltinFunction(Register target, Builtins::JavaScript id)
void Stp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array shift
void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label *miss)
void CopyFields(Register dst, Register src, LowDwVfpRegister double_scratch, int field_count)
void Orn(const Register &rd, const Register &rn, const Operand &operand)
void Abs(const Register &rd, const Register &rm, Label *is_not_representable=NULL, Label *is_representable=NULL)
void LookupNumberStringCache(Register object, Register result, Register scratch1, Register scratch2, Register scratch3, Label *not_found)
void TruncateDoubleToI(Register result, DwVfpRegister double_input)
void LoadObject(Register result, Handle< Object > object)
static bool IsImmMovn(uint64_t imm, unsigned reg_size)
void Add(const Register &rd, const Register &rn, const Operand &operand)
void Tbnz(const Register &rt, unsigned bit_pos, Label *label)
void Rev(const Register &rd, const Register &rn)
void Fmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Printf(const char *format, const CPURegister &arg0=NoCPUReg, const CPURegister &arg1=NoCPUReg, const CPURegister &arg2=NoCPUReg, const CPURegister &arg3=NoCPUReg)
void Clz(const Register &rd, const Register &rn)
void Ldpsw(const Register &rt, const Register &rt2, const MemOperand &src)
void CallRuntimeSaveDoubles(Runtime::FunctionId id)
UseScratchRegisterScope(MacroAssembler *masm)
Handle< T > handle(T *t, Isolate *isolate)
void ThrowUncatchable(Register value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
void Fmov(FPRegister fd, T imm)
void StoreRoot(Register source, Heap::RootListIndex index, Condition cond=al)
MemOperand FieldMemOperand(Register object, int offset)
~InstructionAccurateScope()
void Smulh(const Register &rd, const Register &rn, const Register &rm)
void LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch)
void GetNumberHash(Register t0, Register scratch)
void hint(SystemHint code)
void CallRuntime(const Runtime::Function *f, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void Rbit(const Register &rd, const Register &rn)
void Frintz(const FPRegister &fd, const FPRegister &fn)
void TryConvertDoubleToInt64(Register as_int, FPRegister value, FPRegister scratch_d, Label *on_successful_conversion=NULL, Label *on_failed_conversion=NULL)
void Msr(SystemRegister sysreg, const Register &rt)
~UseScratchRegisterScope()
void Mrs(const Register &rt, SystemRegister sysreg)
void Ror(const Register &rd, const Register &rs, unsigned shift)
Register UnsafeAcquire(const Register ®)
void BumpSystemStackPointer(const Operand &space)
void PokePair(const CPURegister &src1, const CPURegister &src2, int offset)
CPURegister::RegisterType type() const
void JumpIfNotRoot(const Register &obj, Heap::RootListIndex index, Label *if_not_equal)
void JumpIfBothInstanceTypesAreNotSequentialAscii(Register first_object_instance_type, Register second_object_instance_type, Register scratch1, Register scratch2, Label *failure)
void Frinta(const FPRegister &fd, const FPRegister &fn)
void Fmadd(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void Cmp(const Register &rn, const Operand &operand)
void Tst(const Register &rn, const Operand &operand)
void UndoAllocationInNewSpace(Register object, Register scratch)
void PeekPair(const CPURegister &dst1, const CPURegister &dst2, int offset)
void Prologue(PrologueFrameMode frame_mode)
const Register & SmiRegister() const
void Lsl(const Register &rd, const Register &rn, unsigned shift)
void Fnmsub(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm, const FPRegister &fa)
void LoadFromSafepointRegisterSlot(Register dst, Register src)
void JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label *if_cond_pass, Condition cond=eq)
void Call(Register target, Condition cond=al)
void Scvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void AllocateAsciiSlicedString(Register result, Register length, Register scratch1, Register scratch2, Label *gc_required)
void AssertStackConsistency()
void Ccmp(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void set_generating_stub(bool value)
void CheckPageFlagClear(const Register &object, const Register &scratch, int mask, Label *if_all_clear)
void CheckRegisterIsClear(Register reg, BailoutReason reason)
void Check(Condition cond, BailoutReason reason)
void CallRuntime(Runtime::FunctionId id, int num_arguments, SaveFPRegsMode save_doubles=kDontSaveFPRegs)
void InlineData(uint64_t data)
void JumpIfNotSmi(Register value, Label *not_smi_label)
void Smsubl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void PushCPURegList(CPURegList registers)
void Assert(Condition cond, BailoutReason reason)
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper &call_wrapper=NullCallWrapper())
void Claim(uint64_t count, uint64_t unit_size=kXRegSize)
void Br(const Register &xn)
void Adds(const Register &rd, const Register &rn, const Operand &operand)
void Fcvtms(const Register &rd, const FPRegister &fn)
void StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, LowDwVfpRegister double_scratch, Label *fail, int elements_offset=0)
void Fmax(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void TailCallStub(CodeStub *stub, Condition cond=al)
void Fcvtmu(const Register &rd, const FPRegister &fn)
void Smull(const Register &rd, const Register &rn, const Register &rm)
void TestAndSplit(const Register ®, uint64_t bit_pattern, Label *if_all_clear, Label *if_any_set, Label *fall_through)
void Rev16(const Register &rd, const Register &rn)
void PopDRegList(RegList regs)
void Movk(const Register &rd, uint64_t imm, int shift=-1)
void Stnp(const CPURegister &rt, const CPURegister &rt2, const MemOperand &dst)
void AssertName(Register object)
void Csel(const Register &rd, const Register &rn, const Operand &operand, Condition cond)
void EnsureNotWhite(Register object, Register scratch1, Register scratch2, Register scratch3, Label *object_is_white_and_not_data)
static void EmitCodeAgeSequence(Assembler *assm, Code *stub)
void InitializeRootRegister()
void JumpIfEitherIsNotSequentialAsciiStrings(Register first, Register second, Register scratch1, Register scratch2, Label *failure, SmiCheckType smi_check=DO_SMI_CHECK)
void Ccmn(const Register &rn, const Operand &operand, StatusFlags nzcv, Condition cond)
void DropBySMI(const Register &count_smi, uint64_t unit_size=kXRegSize)
void PopCalleeSavedRegisters()
int LeaveFrame(StackFrame::Type type)
void JumpIfEitherSmi(Register reg1, Register reg2, Label *on_either_smi)
void Madd(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
void Asr(const Register &rd, const Register &rn, unsigned shift)
MemOperand ContextMemOperand(Register context, int index)
static bool IsYoungSequence(byte *sequence)
void IndexFromHash(Register hash, Register index)
void TailCallExternalReference(const ExternalReference &ext, int num_arguments, int result_size)
void Queue(const CPURegister &rt)
void JumpIfBothSmi(Register value1, Register value2, Label *both_smi_label, Label *not_smi_label=NULL)
void EnterExitFrame(bool save_doubles, int stack_space=0)
void Sbc(const Register &rd, const Register &rn, const Operand &operand)
void AddSubMacro(const Register &rd, const Register &rn, const Operand &operand, FlagsUpdate S, AddSubOp op)
void AssertIsString(const Register &object)
void CheckPageFlagSet(const Register &object, const Register &scratch, int mask, Label *if_any_set)
void Umaddl(const Register &rd, const Register &rn, const Register &rm, const Register &ra)
BranchType InvertBranchType(BranchType type)
void EmitFrameSetupForCodeAgePatching()
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size)
void AllocateTwoByteString(Register result, Register length, Register scratch1, Register scratch2, Register scratch3, Label *gc_required)
PushPopQueue(MacroAssembler *masm)
void Blr(const Register &xn)
Register AcquireSameSizeAs(const Register ®)
void Sub(const Register &rd, const Register &rn, const Operand &operand)
void Fdiv(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond=al)
void RememberedSetHelper(Register object, Register addr, Register scratch, SaveFPRegsMode save_fp, RememberedSetFinalAction and_then)
void HasColor(Register object, Register scratch0, Register scratch1, Label *has_color, int first_bit, int second_bit)
void Fcvt(const FPRegister &fd, const FPRegister &fn)
void Dsb(BarrierDomain domain, BarrierType type)
MemOperand GlobalObjectMemOperand()
void Cinv(const Register &rd, const Register &rn, Condition cond)
void Fmaxnm(const FPRegister &fd, const FPRegister &fn, const FPRegister &fm)
void CheckEnumCache(Register null_value, Label *call_runtime)
void PopSafepointRegisters()
void AssertUndefinedOrAllocationSite(Register object, Register scratch)
void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, Register scratch2, Register heap_number_map, Label *gc_required)
void LogicalMacro(const Register &rd, const Register &rn, const Operand &operand, LogicalOp op)
void Ucvtf(const FPRegister &fd, const Register &rn, unsigned fbits=0)
void Uxth(const Register &rd, const Register &rn)
static const int kCallApiFunctionSpillSpace
void Move(Register dst, Register src)
void Mov(const Register &rd, const Operand &operand, DiscardMoveMode discard_mode=kDontDiscardForSameWReg)