41 const int kCallInstructionSizeInWords = 4;
46 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code*
code) {
47 Address code_start_address = code->instruction_start();
50 code->InvalidateRelocation();
52 if (FLAG_zap_code_space) {
54 byte* pointer = code->FindCodeAgeSequence();
55 if (pointer !=
NULL) {
58 pointer = code->instruction_start();
60 CodePatcher patcher(pointer, 1);
61 patcher.masm()->break_(0xCC);
63 DeoptimizationInputData* data =
65 int osr_offset = data->OsrPcOffset()->value();
67 CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
68 osr_patcher.masm()->break_(0xCC);
72 DeoptimizationInputData* deopt_data =
74 SharedFunctionInfo* shared =
82 for (
int i = 0; i < deopt_data->DeoptCount(); i++) {
83 if (deopt_data->Pc(i)->value() == -1)
continue;
84 Address call_address = code_start_address + deopt_data->Pc(i)->value();
91 CodePatcher patcher(call_address, call_size_in_words);
92 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
94 call_address >= prev_call_address +
patch_size());
98 prev_call_address = call_address;
104 void Deoptimizer::FillInputFrame(
Address tos, JavaScriptFrame* frame) {
125 void Deoptimizer::SetPlatformCompiledStubRegisters(
126 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
127 ApiFunction
function(descriptor->deoptimization_handler_);
128 ExternalReference xref(&
function, ExternalReference::BUILTIN_CALL, isolate_);
129 intptr_t handler =
reinterpret_cast<intptr_t
>(xref.address());
130 int params = descriptor->GetHandlerParameterCount();
131 output_frame->SetRegister(
s0.
code(), params);
133 output_frame->SetRegister(
s2.
code(), handler);
137 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
140 output_frame->SetDoubleRegister(i, double_value);
145 bool Deoptimizer::HasAlignmentPadding(JSFunction*
function) {
151 Code* Deoptimizer::NotifyStubFailureBuiltin() {
152 return isolate_->
builtins()->
builtin(Builtins::kNotifyStubFailureSaveDoubles);
161 void Deoptimizer::EntryGenerator::Generate() {
169 RegList saved_regs = restored_regs |
sp.
bit() | ra.bit();
171 const int kDoubleRegsSize =
175 __ Subu(
sp,
sp, Operand(kDoubleRegsSize));
185 for (
int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
186 if ((saved_regs & (1 << i)) != 0) {
191 const int kSavedRegistersAreaSize =
202 __ Addu(t0,
sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
208 __ PrepareCallCFunction(6, t1);
210 __ li(a1, Operand(type()));
214 __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
218 AllowExternalCallThatCantCauseGC scope(masm());
219 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
229 ASSERT(Register::kNumRegisters == kNumberOfRegisters);
232 if ((saved_regs & (1 << i)) != 0) {
235 }
else if (FLAG_debug_code) {
245 int dst_offset = i *
kDoubleSize + double_regs_offset;
252 __ Addu(
sp,
sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
264 Label pop_loop_header;
265 __ BranchShort(&pop_loop_header);
269 __ addiu(a3, a3,
sizeof(uint32_t));
270 __ bind(&pop_loop_header);
271 __ BranchShort(&pop_loop,
ne, a2, Operand(
sp));
276 __ PrepareCallCFunction(1, a1);
279 AllowExternalCallThatCantCauseGC scope(masm());
281 ExternalReference::compute_output_frames_function(isolate()), 1);
286 Label outer_push_loop, inner_push_loop,
287 outer_loop_header, inner_loop_header;
294 __ jmp(&outer_loop_header);
295 __ bind(&outer_push_loop);
299 __ jmp(&inner_loop_header);
300 __ bind(&inner_push_loop);
301 __ Subu(a3, a3, Operand(
sizeof(uint32_t)));
302 __ Addu(t2, a2, Operand(a3));
305 __ bind(&inner_loop_header);
306 __ BranchShort(&inner_push_loop,
ne, a3, Operand(zero_reg));
308 __ Addu(t0, t0, Operand(kPointerSize));
309 __ bind(&outer_loop_header);
310 __ BranchShort(&outer_push_loop,
lt, t0, Operand(a1));
315 int src_offset = i *
kDoubleSize + double_regs_offset;
331 ASSERT(!(at.bit() & restored_regs));
334 for (
int i = kNumberOfRegisters - 1; i >= 0; i--) {
336 if ((restored_regs & (1 << i)) != 0) {
341 __ InitializeRootRegister();
346 __ stop(
"Unreachable.");
359 __ bind(&table_start);
360 for (
int i = 0; i < count(); i++) {
367 const int remaining_entries = (count() - i) * table_entry_size_;
368 __ Addu(t9, t9, remaining_entries);
375 while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
379 ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
382 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
383 count() * table_entry_size_);
388 SetFrameSlot(offset, value);
393 SetFrameSlot(offset, value);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static int registers_offset()
void SetCallerPc(unsigned offset, intptr_t value)
Code * builtin(Name name)
void SetFrameSlot(unsigned offset, intptr_t value)
#define ASSERT(condition)
const RegList kJSCallerSaved
const int kPointerSizeLog2
static SharedFunctionInfo * cast(Object *obj)
double GetDoubleRegister(unsigned n) const
static int NumAllocatableRegisters()
static const int kMaxNumAllocatableRegisters
static const int kNumRegisters
static int double_registers_offset()
static int frame_content_offset()
static int output_offset()
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static int state_offset()
const RegList kCalleeSaved
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void SetRegister(unsigned n, intptr_t value)
static const int kMaxNumRegisters
static int NumAllocatableRegisters()
static int CallSize(Register target, Condition cond=al)
friend class BlockTrampolinePoolScope
void SetCallerConstantPool(unsigned offset, intptr_t value)
uint32_t GetFrameSize() const
static int frame_size_offset()
static int output_count_offset()
const unsigned kNumberOfRegisters
MemOperand CFunctionArgumentOperand(int index)
static const int kFunctionOffset
#define ASSERT_EQ(v1, v2)
virtual void GeneratePrologue()
const uint32_t kDebugZapValue
static const int kInstrSize
static uint32_t & uint32_at(Address addr)
static FPURegister FromAllocationIndex(int index)
void SetDoubleRegister(unsigned n, double value)
Register ToRegister(int num)
void SetCallerFp(unsigned offset, intptr_t value)
void EvictFromOptimizedCodeMap(Code *optimized_code, const char *reason)
static int continuation_offset()
static int input_offset()