30 #if V8_TARGET_ARCH_IA32
40 const int Deoptimizer::table_entry_size_ = 10;
49 Isolate*
isolate = code->GetIsolate();
50 HandleScope scope(isolate);
54 int min_reloc_size = 0;
55 int prev_pc_offset = 0;
56 DeoptimizationInputData* deopt_data =
58 for (
int i = 0; i < deopt_data->DeoptCount(); i++) {
59 int pc_offset = deopt_data->Pc(i)->value();
60 if (pc_offset == -1)
continue;
62 int pc_delta = pc_offset - prev_pc_offset;
66 if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
71 prev_pc_offset = pc_offset;
77 int reloc_length = code->relocation_info()->length();
78 if (min_reloc_size > reloc_length) {
79 int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
81 int min_padding = min_reloc_size - reloc_length;
83 int additional_comments =
84 (min_padding + comment_reloc_size - 1) / comment_reloc_size;
86 int padding = additional_comments * comment_reloc_size;
90 Factory* factory = isolate->factory();
91 Handle<ByteArray> new_reloc =
92 factory->NewByteArray(reloc_length + padding,
TENURED);
93 OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
94 code->relocation_info()->GetDataStartAddress(),
98 RelocInfoWriter reloc_info_writer(
99 new_reloc->GetDataStartAddress() + padding, 0);
100 intptr_t comment_string
101 =
reinterpret_cast<intptr_t
>(RelocInfo::kFillerCommentString);
102 RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string,
NULL);
103 for (
int i = 0; i < additional_comments; ++i) {
105 byte* pos_before = reloc_info_writer.pos();
107 reloc_info_writer.Write(&rinfo);
108 ASSERT(RelocInfo::kMinRelocCommentSize ==
109 pos_before - reloc_info_writer.pos());
112 code->set_relocation_info(*new_reloc);
117 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
118 Address code_start_address = code->instruction_start();
120 if (FLAG_zap_code_space) {
122 byte* pointer = code->FindCodeAgeSequence();
123 if (pointer !=
NULL) {
124 pointer += kNoCodeAgeSequenceLength;
126 pointer = code->instruction_start();
128 CodePatcher patcher(pointer, 1);
129 patcher.masm()->int3();
131 DeoptimizationInputData* data =
133 int osr_offset = data->OsrPcOffset()->value();
134 if (osr_offset > 0) {
135 CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
136 osr_patcher.masm()->int3();
144 ByteArray* reloc_info = code->relocation_info();
145 Address reloc_end_address = reloc_info->address() + reloc_info->Size();
146 RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
153 DeoptimizationInputData* deopt_data =
155 SharedFunctionInfo* shared =
163 for (
int i = 0; i < deopt_data->DeoptCount(); i++) {
164 if (deopt_data->Pc(i)->value() == -1)
continue;
166 Address call_address = code_start_address + deopt_data->Pc(i)->value();
167 CodePatcher patcher(call_address,
patch_size());
169 patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
171 RelocInfo rinfo(call_address + 1,
173 reinterpret_cast<intptr_t>(deopt_entry),
175 reloc_info_writer.Write(&rinfo);
179 call_address >= prev_call_address +
patch_size());
182 prev_call_address = call_address;
187 int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
189 code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
192 reloc_info->set_length(new_reloc_size);
196 Address junk_address = reloc_info->address() + reloc_info->Size();
197 ASSERT(junk_address <= reloc_end_address);
198 isolate->heap()->CreateFillerObjectAt(junk_address,
199 reloc_end_address - junk_address);
203 void Deoptimizer::FillInputFrame(
Address tos, JavaScriptFrame* frame) {
224 void Deoptimizer::SetPlatformCompiledStubRegisters(
225 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
227 reinterpret_cast<intptr_t
>(descriptor->deoptimization_handler_);
228 int params = descriptor->GetHandlerParameterCount();
229 output_frame->SetRegister(
eax.
code(), params);
230 output_frame->SetRegister(
ebx.
code(), handler);
234 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
238 output_frame->SetDoubleRegister(i, double_value);
243 bool Deoptimizer::HasAlignmentPadding(JSFunction*
function) {
244 int parameter_count =
function->shared()->formal_parameter_count() + 1;
246 unsigned alignment_state_offset =
257 Code* Deoptimizer::NotifyStubFailureBuiltin() {
259 Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
266 void Deoptimizer::EntryGenerator::Generate() {
274 __ sub(
esp, Immediate(kDoubleRegsSize));
276 CpuFeatureScope scope(masm(),
SSE2);
280 __ movsd(Operand(
esp, offset), xmm_reg);
286 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
290 __ mov(
ebx, Operand(
esp, kSavedRegistersAreaSize));
294 __ mov(
ecx, Operand(
esp, kSavedRegistersAreaSize + 1 * kPointerSize));
295 __ lea(
edx, Operand(
esp, kSavedRegistersAreaSize + 2 * kPointerSize));
301 __ PrepareCallCFunction(6,
eax);
303 __ mov(Operand(
esp, 0 * kPointerSize),
eax);
304 __ mov(Operand(
esp, 1 * kPointerSize), Immediate(type()));
305 __ mov(Operand(
esp, 2 * kPointerSize),
ebx);
306 __ mov(Operand(
esp, 3 * kPointerSize),
ecx);
307 __ mov(Operand(
esp, 4 * kPointerSize),
edx);
308 __ mov(Operand(
esp, 5 * kPointerSize),
309 Immediate(ExternalReference::isolate_address(isolate())));
311 AllowExternalCallThatCantCauseGC scope(masm());
312 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
320 for (
int i = kNumberOfRegisters - 1; i >= 0; i--) {
322 __ pop(Operand(
ebx, offset));
327 CpuFeatureScope scope(masm(),
SSE2);
330 int dst_offset = i *
kDoubleSize + double_regs_offset;
332 __ movsd(
xmm0, Operand(
esp, src_offset));
333 __ movsd(Operand(
ebx, dst_offset),
xmm0);
343 __ add(
esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
354 Label pop_loop_header;
355 __ jmp(&pop_loop_header);
358 __ pop(Operand(
edx, 0));
359 __ add(
edx, Immediate(
sizeof(uint32_t)));
360 __ bind(&pop_loop_header);
366 __ PrepareCallCFunction(1,
ebx);
367 __ mov(Operand(
esp, 0 * kPointerSize),
eax);
369 AllowExternalCallThatCantCauseGC scope(masm());
371 ExternalReference::compute_output_frames_function(isolate()), 1);
381 if (FLAG_debug_code) {
383 __ Assert(
equal, kAlignmentMarkerExpected);
385 __ bind(&no_padding);
388 Label outer_push_loop, inner_push_loop,
389 outer_loop_header, inner_loop_header;
395 __ jmp(&outer_loop_header);
396 __ bind(&outer_push_loop);
400 __ jmp(&inner_loop_header);
401 __ bind(&inner_push_loop);
402 __ sub(
ecx, Immediate(
sizeof(uint32_t)));
404 __ bind(&inner_loop_header);
407 __ add(
eax, Immediate(kPointerSize));
408 __ bind(&outer_loop_header);
414 CpuFeatureScope scope(masm(),
SSE2);
417 int src_offset = i * kDoubleSize + double_regs_offset;
418 __ movsd(xmm_reg, Operand(
ebx, src_offset));
431 __ push(Operand(
ebx, offset));
445 for (
int i = 0; i < count(); i++) {
446 int start = masm()->pc_offset();
450 ASSERT(masm()->pc_offset() - start == table_entry_size_);
457 SetFrameSlot(offset, value);
462 SetFrameSlot(offset, value);
477 #endif // V8_TARGET_ARCH_IA32
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static int registers_offset()
void SetCallerPc(unsigned offset, intptr_t value)
static const int kNumAllocatableRegisters
Code * builtin(Name name)
static const int kFixedFrameSize
void SetFrameSlot(unsigned offset, intptr_t value)
static XMMRegister FromAllocationIndex(int index)
static bool IsSupported(CpuFeature f)
#define ASSERT(condition)
#define ASSERT_GE(v1, v2)
static SharedFunctionInfo * cast(Object *obj)
double GetDoubleRegister(unsigned n) const
static int NumAllocatableRegisters()
const int kAlignmentPaddingPushed
static const int kNumRegisters
static int double_registers_offset()
static int frame_content_offset()
static int output_offset()
Isolate * isolate() const
static void MemCopy(void *dest, const void *src, size_t size)
static Address GetDeoptimizationEntry(Isolate *isolate, int id, BailoutType type, GetEntryMode mode=ENSURE_ENTRY_CODE)
static int state_offset()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void SetRegister(unsigned n, intptr_t value)
static const int kDynamicAlignmentStateOffset
static void MemMove(void *dest, const void *src, size_t size)
const int kAlignmentZapValue
static void EnsureRelocSpaceForLazyDeoptimization(Handle< Code > code)
void SetCallerConstantPool(unsigned offset, intptr_t value)
uint32_t GetFrameSize() const
static int frame_size_offset()
static int output_count_offset()
static const int kHeaderSize
const unsigned kNumberOfRegisters
intptr_t GetFrameSlot(unsigned offset)
static const int kCallInstructionLength
static const int kFunctionOffset
virtual void GeneratePrologue()
static uint32_t & uint32_at(Address addr)
#define RUNTIME_ENTRY(name, nargs, ressize)
void SetDoubleRegister(unsigned n, double value)
static const int kLocal0Offset
void SetCallerFp(unsigned offset, intptr_t value)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
void EvictFromOptimizedCodeMap(Code *optimized_code, const char *reason)
static int continuation_offset()
static int has_alignment_padding_offset()
static int input_offset()