43 if (FLAG_concurrent_osr) {
45 for (
int i = 0; i < osr_buffer_capacity_; i++) {
56 { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
60 Isolate::SetIsolateThreadLocals(isolate_,
NULL);
65 ElapsedTimer total_timer;
66 if (FLAG_trace_concurrent_recompilation) total_timer.Start();
69 input_queue_semaphore_.Wait();
73 if (FLAG_concurrent_recompilation_delay != 0) {
74 OS::Sleep(FLAG_concurrent_recompilation_delay);
77 switch (static_cast<StopFlag>(
Acquire_Load(&stop_thread_))) {
81 if (FLAG_trace_concurrent_recompilation) {
82 time_spent_total_ = total_timer.Elapsed();
84 stop_semaphore_.Signal();
89 FlushInputQueue(
true);
91 Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
92 stop_semaphore_.Signal();
97 ElapsedTimer compiling_timer;
98 if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
102 if (FLAG_trace_concurrent_recompilation) {
103 time_spent_compiling_ += compiling_timer.Elapsed();
110 LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
111 if (input_queue_length_ == 0)
return NULL;
114 input_queue_shift_ = InputQueueIndex(1);
115 input_queue_length_--;
120 void OptimizingCompilerThread::CompileNext() {
121 OptimizedCompileJob* job = NextInput();
132 output_queue_.Enqueue(job);
137 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
138 bool restore_function_code) {
140 CompilationInfo*
info = job->info();
141 if (restore_function_code) {
142 if (info->is_osr()) {
143 if (!job->IsWaitingForInstall()) {
145 Handle<Code>
code = info->unoptimized_code();
146 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
150 Handle<JSFunction>
function = info->closure();
151 function->ReplaceCode(function->shared()->code());
158 void OptimizingCompilerThread::FlushInputQueue(
bool restore_function_code) {
159 OptimizedCompileJob* job;
160 while ((job = NextInput())) {
163 input_queue_semaphore_.Wait();
165 if (!job->info()->is_osr()) {
166 DisposeOptimizedCompileJob(job, restore_function_code);
172 void OptimizingCompilerThread::FlushOutputQueue(
bool restore_function_code) {
173 OptimizedCompileJob* job;
174 while (output_queue_.Dequeue(&job)) {
176 if (!job->info()->is_osr()) {
177 DisposeOptimizedCompileJob(job, restore_function_code);
183 void OptimizingCompilerThread::FlushOsrBuffer(
bool restore_function_code) {
184 for (
int i = 0; i < osr_buffer_capacity_; i++) {
185 if (osr_buffer_[i] !=
NULL) {
186 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
187 osr_buffer_[i] =
NULL;
194 ASSERT(!IsOptimizerThread());
195 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
196 if (FLAG_block_concurrent_recompilation)
Unblock();
197 input_queue_semaphore_.Signal();
198 stop_semaphore_.Wait();
199 FlushOutputQueue(
true);
200 if (FLAG_concurrent_osr) FlushOsrBuffer(
true);
201 if (FLAG_trace_concurrent_recompilation) {
202 PrintF(
" ** Flushed concurrent recompilation queues.\n");
208 ASSERT(!IsOptimizerThread());
210 if (FLAG_block_concurrent_recompilation)
Unblock();
211 input_queue_semaphore_.Signal();
212 stop_semaphore_.Wait();
214 if (FLAG_concurrent_recompilation_delay != 0) {
217 while (input_queue_length_ > 0) CompileNext();
220 FlushInputQueue(
false);
221 FlushOutputQueue(
false);
224 if (FLAG_concurrent_osr) FlushOsrBuffer(
false);
226 if (FLAG_trace_concurrent_recompilation) {
227 double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
228 PrintF(
" ** Compiler thread did %.2f%% useful work\n", percentage);
231 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
232 FLAG_concurrent_osr) {
233 PrintF(
"[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
241 ASSERT(!IsOptimizerThread());
245 while (output_queue_.Dequeue(&job)) {
249 if (FLAG_trace_osr) {
252 PrintF(
" is ready for install and entry at AST id %d]\n",
258 uint32_t offset = code->TranslateAstIdToPcOffset(info->
osr_ast_id());
261 if (function->IsOptimized()) {
262 DisposeOptimizedCompileJob(job,
false);
265 function->ReplaceCode(
266 code.
is_null() ?
function->shared()->code() : *
code);
275 ASSERT(!IsOptimizerThread());
281 LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
282 ASSERT_LT(input_queue_length_, input_queue_capacity_);
284 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
285 input_queue_[InputQueueIndex(0)] = job;
286 input_queue_length_++;
289 LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
290 ASSERT_LT(input_queue_length_, input_queue_capacity_);
291 input_queue_[InputQueueIndex(input_queue_length_)] = job;
292 input_queue_length_++;
294 if (FLAG_block_concurrent_recompilation) {
297 input_queue_semaphore_.Signal();
303 ASSERT(!IsOptimizerThread());
304 while (blocked_jobs_ > 0) {
305 input_queue_semaphore_.Signal();
313 ASSERT(!IsOptimizerThread());
314 for (
int i = 0; i < osr_buffer_capacity_; i++) {
316 if (current !=
NULL &&
320 osr_buffer_[i] =
NULL;
330 ASSERT(!IsOptimizerThread());
331 for (
int i = 0; i < osr_buffer_capacity_; i++) {
333 if (current !=
NULL &&
343 ASSERT(!IsOptimizerThread());
344 for (
int i = 0; i < osr_buffer_capacity_; i++) {
355 ASSERT(!IsOptimizerThread());
359 stale = osr_buffer_[osr_buffer_cursor_];
361 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
367 CompilationInfo* info = stale->
info();
368 if (FLAG_trace_osr) {
369 PrintF(
"[COSR - Discarded ");
370 info->closure()->PrintName();
371 PrintF(
", AST id %d]\n", info->osr_ast_id().ToInt());
373 DisposeOptimizedCompileJob(stale,
false);
375 osr_buffer_[osr_buffer_cursor_] = job;
376 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
381 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
382 return isolate->concurrent_recompilation_enabled() &&
383 isolate->optimizing_compiler_thread()->IsOptimizerThread();
387 bool OptimizingCompilerThread::IsOptimizerThread() {
388 LockGuard<Mutex> lock_guard(&thread_id_mutex_);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static void RemoveStackCheck(Handle< Code > code, uint32_t pc_offset)
void InstallOptimizedFunctions()
#define CHECK_EQ(expected, value)
static const char * v8_recompile_concurrent
void PrintF(const char *format,...)
bool HasSameOsrEntry(Handle< JSFunction > function, BailoutId osr_ast_id)
#define ASSERT(condition)
OptimizedCompileJob * FindReadyOSRCandidate(Handle< JSFunction > function, BailoutId osr_ast_id)
StackGuard * stack_guard()
static Handle< Code > GetConcurrentlyOptimizedCode(OptimizedCompileJob *job)
BailoutId osr_ast_id() const
bool IsWaitingForInstall()
static ThreadId Current()
~OptimizingCompilerThread()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Handle< Code > unoptimized_code() const
void QueueForOptimization(OptimizedCompileJob *optimizing_compiler)
CompilationInfo * info() const
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Handle< JSFunction > closure() const
static void Sleep(const int milliseconds)
#define ASSERT_LT(v1, v2)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
#define ASSERT_EQ(v1, v2)
#define ASSERT_NE(v1, v2)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void DeleteArray(T *array)
void RequestInstallCode()
bool IsQueuedForOSR(Handle< JSFunction > function, BailoutId osr_ast_id)