v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
optimizing-compiler-thread.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
29 
30 #include "v8.h"
31 
32 #include "full-codegen.h"
33 #include "hydrogen.h"
34 #include "isolate.h"
35 #include "v8threads.h"
36 
37 namespace v8 {
38 namespace internal {
39 
41  ASSERT_EQ(0, input_queue_length_);
42  DeleteArray(input_queue_);
43  if (FLAG_concurrent_osr) {
44 #ifdef DEBUG
45  for (int i = 0; i < osr_buffer_capacity_; i++) {
46  CHECK_EQ(NULL, osr_buffer_[i]);
47  }
48 #endif
49  DeleteArray(osr_buffer_);
50  }
51 }
52 
53 
55 #ifdef DEBUG
56  { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
57  thread_id_ = ThreadId::Current().ToInteger();
58  }
59 #endif
60  Isolate::SetIsolateThreadLocals(isolate_, NULL);
61  DisallowHeapAllocation no_allocation;
62  DisallowHandleAllocation no_handles;
64 
65  ElapsedTimer total_timer;
66  if (FLAG_trace_concurrent_recompilation) total_timer.Start();
67 
68  while (true) {
69  input_queue_semaphore_.Wait();
72 
73  if (FLAG_concurrent_recompilation_delay != 0) {
74  OS::Sleep(FLAG_concurrent_recompilation_delay);
75  }
76 
77  switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
78  case CONTINUE:
79  break;
80  case STOP:
81  if (FLAG_trace_concurrent_recompilation) {
82  time_spent_total_ = total_timer.Elapsed();
83  }
84  stop_semaphore_.Signal();
85  return;
86  case FLUSH:
87  // The main thread is blocked, waiting for the stop semaphore.
88  { AllowHandleDereference allow_handle_dereference;
89  FlushInputQueue(true);
90  }
91  Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
92  stop_semaphore_.Signal();
93  // Return to start of consumer loop.
94  continue;
95  }
96 
97  ElapsedTimer compiling_timer;
98  if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
99 
100  CompileNext();
101 
102  if (FLAG_trace_concurrent_recompilation) {
103  time_spent_compiling_ += compiling_timer.Elapsed();
104  }
105  }
106 }
107 
108 
109 OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
110  LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
111  if (input_queue_length_ == 0) return NULL;
112  OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
113  ASSERT_NE(NULL, job);
114  input_queue_shift_ = InputQueueIndex(1);
115  input_queue_length_--;
116  return job;
117 }
118 
119 
120 void OptimizingCompilerThread::CompileNext() {
121  OptimizedCompileJob* job = NextInput();
122  ASSERT_NE(NULL, job);
123 
124  // The function may have already been optimized by OSR. Simply continue.
125  OptimizedCompileJob::Status status = job->OptimizeGraph();
126  USE(status); // Prevent an unused-variable error in release mode.
128 
129  // The function may have already been optimized by OSR. Simply continue.
130  // Use a mutex to make sure that functions marked for install
131  // are always also queued.
132  output_queue_.Enqueue(job);
133  isolate_->stack_guard()->RequestInstallCode();
134 }
135 
136 
137 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
138  bool restore_function_code) {
139  // The recompile job is allocated in the CompilationInfo's zone.
140  CompilationInfo* info = job->info();
141  if (restore_function_code) {
142  if (info->is_osr()) {
143  if (!job->IsWaitingForInstall()) {
144  // Remove stack check that guards OSR entry on original code.
145  Handle<Code> code = info->unoptimized_code();
146  uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
147  BackEdgeTable::RemoveStackCheck(code, offset);
148  }
149  } else {
150  Handle<JSFunction> function = info->closure();
151  function->ReplaceCode(function->shared()->code());
152  }
153  }
154  delete info;
155 }
156 
157 
158 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
159  OptimizedCompileJob* job;
160  while ((job = NextInput())) {
161  // This should not block, since we have one signal on the input queue
162  // semaphore corresponding to each element in the input queue.
163  input_queue_semaphore_.Wait();
164  // OSR jobs are dealt with separately.
165  if (!job->info()->is_osr()) {
166  DisposeOptimizedCompileJob(job, restore_function_code);
167  }
168  }
169 }
170 
171 
172 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
173  OptimizedCompileJob* job;
174  while (output_queue_.Dequeue(&job)) {
175  // OSR jobs are dealt with separately.
176  if (!job->info()->is_osr()) {
177  DisposeOptimizedCompileJob(job, restore_function_code);
178  }
179  }
180 }
181 
182 
183 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
184  for (int i = 0; i < osr_buffer_capacity_; i++) {
185  if (osr_buffer_[i] != NULL) {
186  DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
187  osr_buffer_[i] = NULL;
188  }
189  }
190 }
191 
192 
194  ASSERT(!IsOptimizerThread());
195  Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
196  if (FLAG_block_concurrent_recompilation) Unblock();
197  input_queue_semaphore_.Signal();
198  stop_semaphore_.Wait();
199  FlushOutputQueue(true);
200  if (FLAG_concurrent_osr) FlushOsrBuffer(true);
201  if (FLAG_trace_concurrent_recompilation) {
202  PrintF(" ** Flushed concurrent recompilation queues.\n");
203  }
204 }
205 
206 
208  ASSERT(!IsOptimizerThread());
209  Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
210  if (FLAG_block_concurrent_recompilation) Unblock();
211  input_queue_semaphore_.Signal();
212  stop_semaphore_.Wait();
213 
214  if (FLAG_concurrent_recompilation_delay != 0) {
215  // At this point the optimizing compiler thread's event loop has stopped.
216  // There is no need for a mutex when reading input_queue_length_.
217  while (input_queue_length_ > 0) CompileNext();
219  } else {
220  FlushInputQueue(false);
221  FlushOutputQueue(false);
222  }
223 
224  if (FLAG_concurrent_osr) FlushOsrBuffer(false);
225 
226  if (FLAG_trace_concurrent_recompilation) {
227  double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
228  PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
229  }
230 
231  if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
232  FLAG_concurrent_osr) {
233  PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
234  }
235 
236  Join();
237 }
238 
239 
241  ASSERT(!IsOptimizerThread());
242  HandleScope handle_scope(isolate_);
243 
244  OptimizedCompileJob* job;
245  while (output_queue_.Dequeue(&job)) {
246  CompilationInfo* info = job->info();
247  Handle<JSFunction> function(*info->closure());
248  if (info->is_osr()) {
249  if (FLAG_trace_osr) {
250  PrintF("[COSR - ");
251  info->closure()->PrintName();
252  PrintF(" is ready for install and entry at AST id %d]\n",
253  info->osr_ast_id().ToInt());
254  }
255  job->WaitForInstall();
256  // Remove stack check that guards OSR entry on original code.
257  Handle<Code> code = info->unoptimized_code();
258  uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
259  BackEdgeTable::RemoveStackCheck(code, offset);
260  } else {
261  if (function->IsOptimized()) {
262  DisposeOptimizedCompileJob(job, false);
263  } else {
265  function->ReplaceCode(
266  code.is_null() ? function->shared()->code() : *code);
267  }
268  }
269  }
270 }
271 
272 
275  ASSERT(!IsOptimizerThread());
276  CompilationInfo* info = job->info();
277  if (info->is_osr()) {
278  osr_attempts_++;
279  AddToOsrBuffer(job);
280  // Add job to the front of the input queue.
281  LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
282  ASSERT_LT(input_queue_length_, input_queue_capacity_);
283  // Move shift_ back by one.
284  input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
285  input_queue_[InputQueueIndex(0)] = job;
286  input_queue_length_++;
287  } else {
288  // Add job to the back of the input queue.
289  LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
290  ASSERT_LT(input_queue_length_, input_queue_capacity_);
291  input_queue_[InputQueueIndex(input_queue_length_)] = job;
292  input_queue_length_++;
293  }
294  if (FLAG_block_concurrent_recompilation) {
295  blocked_jobs_++;
296  } else {
297  input_queue_semaphore_.Signal();
298  }
299 }
300 
301 
303  ASSERT(!IsOptimizerThread());
304  while (blocked_jobs_ > 0) {
305  input_queue_semaphore_.Signal();
306  blocked_jobs_--;
307  }
308 }
309 
310 
312  Handle<JSFunction> function, BailoutId osr_ast_id) {
313  ASSERT(!IsOptimizerThread());
314  for (int i = 0; i < osr_buffer_capacity_; i++) {
315  OptimizedCompileJob* current = osr_buffer_[i];
316  if (current != NULL &&
317  current->IsWaitingForInstall() &&
318  current->info()->HasSameOsrEntry(function, osr_ast_id)) {
319  osr_hits_++;
320  osr_buffer_[i] = NULL;
321  return current;
322  }
323  }
324  return NULL;
325 }
326 
327 
329  BailoutId osr_ast_id) {
330  ASSERT(!IsOptimizerThread());
331  for (int i = 0; i < osr_buffer_capacity_; i++) {
332  OptimizedCompileJob* current = osr_buffer_[i];
333  if (current != NULL &&
334  current->info()->HasSameOsrEntry(function, osr_ast_id)) {
335  return !current->IsWaitingForInstall();
336  }
337  }
338  return false;
339 }
340 
341 
343  ASSERT(!IsOptimizerThread());
344  for (int i = 0; i < osr_buffer_capacity_; i++) {
345  OptimizedCompileJob* current = osr_buffer_[i];
346  if (current != NULL && *current->info()->closure() == function) {
347  return !current->IsWaitingForInstall();
348  }
349  }
350  return false;
351 }
352 
353 
354 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
355  ASSERT(!IsOptimizerThread());
356  // Find the next slot that is empty or has a stale job.
357  OptimizedCompileJob* stale = NULL;
358  while (true) {
359  stale = osr_buffer_[osr_buffer_cursor_];
360  if (stale == NULL || stale->IsWaitingForInstall()) break;
361  osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
362  }
363 
364  // Add to found slot and dispose the evicted job.
365  if (stale != NULL) {
366  ASSERT(stale->IsWaitingForInstall());
367  CompilationInfo* info = stale->info();
368  if (FLAG_trace_osr) {
369  PrintF("[COSR - Discarded ");
370  info->closure()->PrintName();
371  PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
372  }
373  DisposeOptimizedCompileJob(stale, false);
374  }
375  osr_buffer_[osr_buffer_cursor_] = job;
376  osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
377 }
378 
379 
380 #ifdef DEBUG
381 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
382  return isolate->concurrent_recompilation_enabled() &&
383  isolate->optimizing_compiler_thread()->IsOptimizerThread();
384 }
385 
386 
387 bool OptimizingCompilerThread::IsOptimizerThread() {
388  LockGuard<Mutex> lock_guard(&thread_id_mutex_);
389  return ThreadId::Current().ToInteger() == thread_id_;
390 }
391 #endif
392 
393 
394 } } // namespace v8::internal
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static void RemoveStackCheck(Handle< Code > code, uint32_t pc_offset)
#define CHECK_EQ(expected, value)
Definition: checks.h:252
static const char * v8_recompile_concurrent
Definition: log.h:336
void PrintF(const char *format,...)
Definition: v8utils.cc:40
int ToInteger() const
Definition: isolate.h:187
bool HasSameOsrEntry(Handle< JSFunction > function, BailoutId osr_ast_id)
Definition: compiler.h:324
#define ASSERT(condition)
Definition: checks.h:329
OptimizedCompileJob * FindReadyOSRCandidate(Handle< JSFunction > function, BailoutId osr_ast_id)
int ToInt() const
Definition: utils.h:1162
StackGuard * stack_guard()
Definition: isolate.h:874
static Handle< Code > GetConcurrentlyOptimizedCode(OptimizedCompileJob *job)
Definition: compiler.cc:1204
BailoutId osr_ast_id() const
Definition: compiler.h:91
static ThreadId Current()
Definition: isolate.h:170
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
Handle< Code > unoptimized_code() const
Definition: compiler.h:92
void QueueForOptimization(OptimizedCompileJob *optimizing_compiler)
CompilationInfo * info() const
Definition: compiler.h:547
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Handle< JSFunction > closure() const
Definition: compiler.h:81
static void Sleep(const int milliseconds)
#define ASSERT_LT(v1, v2)
Definition: checks.h:333
bool is_null() const
Definition: handles.h:81
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
#define ASSERT_EQ(v1, v2)
Definition: checks.h:330
void USE(T)
Definition: globals.h:341
#define ASSERT_NE(v1, v2)
Definition: checks.h:331
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void DeleteArray(T *array)
Definition: allocation.h:91
bool IsQueuedForOSR(Handle< JSFunction > function, BailoutId osr_ast_id)