48 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code*
code) {
51 code->InvalidateRelocation();
56 DeoptimizationInputData* deopt_data =
58 SharedFunctionInfo* shared =
61 Address code_start_address = code->instruction_start();
67 for (
int i = 0; i < deopt_data->DeoptCount(); i++) {
68 if (deopt_data->Pc(i)->value() == -1)
continue;
70 Address call_address = code_start_address + deopt_data->Pc(i)->value();
76 patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
79 (call_address >= prev_call_address +
patch_size()));
82 prev_call_address = call_address;
88 void Deoptimizer::FillInputFrame(
Address tos, JavaScriptFrame* frame) {
97 input_->
SetRegister(jssp.code(),
reinterpret_cast<intptr_t
>(frame->sp()));
111 bool Deoptimizer::HasAlignmentPadding(JSFunction*
function) {
117 void Deoptimizer::SetPlatformCompiledStubRegisters(
118 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
119 ApiFunction
function(descriptor->deoptimization_handler_);
120 ExternalReference xref(&
function, ExternalReference::BUILTIN_CALL, isolate_);
121 intptr_t handler =
reinterpret_cast<intptr_t
>(xref.address());
122 int params = descriptor->GetHandlerParameterCount();
123 output_frame->SetRegister(x0.code(), params);
124 output_frame->SetRegister(x1.code(), handler);
128 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
131 output_frame->SetDoubleRegister(i, double_value);
136 Code* Deoptimizer::NotifyStubFailureBuiltin() {
137 return isolate_->
builtins()->
builtin(Builtins::kNotifyStubFailureSaveDoubles);
143 void Deoptimizer::EntryGenerator::Generate() {
153 __ PushCPURegList(saved_fp_registers);
157 saved_registers.Combine(
fp);
158 __ PushCPURegList(saved_registers);
160 const int kSavedRegistersAreaSize =
162 (saved_fp_registers.Count() *
kDRegSize);
165 const int kFPRegistersOffset = saved_registers.Count() *
kXRegSize;
168 Register bailout_id = x2;
169 __ Peek(bailout_id, kSavedRegistersAreaSize);
171 Register code_object = x3;
172 Register fp_to_sp = x4;
175 __ Mov(code_object,
lr);
177 __ Add(fp_to_sp, masm()->StackPointer(),
179 __ Sub(fp_to_sp,
fp, fp_to_sp);
188 __ Mov(x5, ExternalReference::isolate_address(isolate()));
192 AllowExternalCallThatCantCauseGC scope(masm());
193 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
197 Register deoptimizer = x0;
203 CPURegList copy_to_input = saved_registers;
204 for (
int i = 0; i < saved_registers.Count(); i++) {
207 CPURegister current_reg = copy_to_input.PopLowestIndex();
214 for (
int i = 0; i < saved_fp_registers.Count(); i++) {
218 int src_offset = kFPRegistersOffset + (i *
kDoubleSize);
219 __ Peek(x2, src_offset);
224 __ Drop(1 + (kSavedRegistersAreaSize /
kXRegSize));
228 Register unwind_limit = x2;
230 __ Add(unwind_limit, unwind_limit,
__ StackPointer());
237 Label pop_loop_header;
238 __ B(&pop_loop_header);
242 __ Bind(&pop_loop_header);
243 __ Cmp(unwind_limit,
__ StackPointer());
251 AllowExternalCallThatCantCauseGC scope(masm());
253 ExternalReference::compute_output_frames_function(isolate()), 1);
258 Label outer_push_loop, inner_push_loop,
259 outer_loop_header, inner_loop_header;
263 __ B(&outer_loop_header);
265 __ Bind(&outer_push_loop);
266 Register current_frame = x2;
269 __ B(&inner_loop_header);
271 __ Bind(&inner_push_loop);
273 __ Add(x6, current_frame, x3);
276 __ Bind(&inner_loop_header);
277 __ Cbnz(x3, &inner_push_loop);
280 __ Bind(&outer_loop_header);
282 __ B(
lt, &outer_push_loop);
285 ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
286 !saved_fp_registers.IncludesAliasOf(fp_zero) &&
287 !saved_fp_registers.IncludesAliasOf(fp_scratch));
289 while (!saved_fp_registers.IsEmpty()) {
290 const CPURegister reg = saved_fp_registers.PopLowestIndex();
312 ASSERT(!saved_registers.IncludesAliasOf(
lr));
313 Register last_output_frame =
lr;
314 __ Mov(last_output_frame, current_frame);
318 Register continuation = x7;
319 saved_registers.Remove(continuation);
321 while (!saved_registers.IsEmpty()) {
323 CPURegister current_reg = saved_registers.PopLowestIndex();
326 __ Ldr(current_reg,
MemOperand(last_output_frame, offset));
332 __ InitializeRootRegister();
343 UseScratchRegisterScope temps(masm());
344 Register entry_id = temps.AcquireX();
350 InstructionAccurateScope scope(masm());
355 ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
357 for (
int i = 0; i < count(); i++) {
358 int start = masm()->pc_offset();
360 __ movz(entry_id, i);
362 ASSERT(masm()->pc_offset() - start == table_entry_size_);
371 SetFrameSlot(offset, value);
376 SetFrameSlot(offset, value);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static int registers_offset()
void SetCallerPc(unsigned offset, intptr_t value)
Code * builtin(Name name)
static const RegList kAllocatableFPRegisters
const unsigned kDRegSizeInBits
void SetFrameSlot(unsigned offset, intptr_t value)
const unsigned kXRegSizeInBits
#define ASSERT(condition)
const int kPointerSizeLog2
static SharedFunctionInfo * cast(Object *obj)
double GetDoubleRegister(unsigned n) const
static int NumAllocatableRegisters()
static int double_registers_offset()
static int frame_content_offset()
static int output_offset()
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
const unsigned kInstructionSize
static int state_offset()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void SetRegister(unsigned n, intptr_t value)
static const int kMaxNumRegisters
void SetCallerConstantPool(unsigned offset, intptr_t value)
uint32_t GetFrameSize() const
static int frame_size_offset()
static int output_count_offset()
static const int kFunctionOffset
static uint64_t & uint64_at(Address addr)
static int NumRegisters()
virtual void GeneratePrologue()
void SetDoubleRegister(unsigned n, double value)
void SetCallerFp(unsigned offset, intptr_t value)
void EvictFromOptimizedCodeMap(Code *optimized_code, const char *reason)
static int continuation_offset()
static int input_offset()