v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
optimizing-compiler-thread.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
29 
30 #include "v8.h"
31 
32 #include "hydrogen.h"
33 #include "isolate.h"
34 #include "v8threads.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
41 #ifdef DEBUG
42  thread_id_ = ThreadId::Current().ToInteger();
43 #endif
44  Isolate::SetIsolateThreadLocals(isolate_, NULL);
45 
46  int64_t epoch = 0;
47  if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
48 
49  while (true) {
50  input_queue_semaphore_->Wait();
51  if (Acquire_Load(&stop_thread_)) {
52  stop_semaphore_->Signal();
53  if (FLAG_trace_parallel_recompilation) {
54  time_spent_total_ = OS::Ticks() - epoch;
55  }
56  return;
57  }
58 
59  int64_t compiling_start = 0;
60  if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
61 
62  Heap::RelocationLock relocation_lock(isolate_->heap());
63  OptimizingCompiler* optimizing_compiler = NULL;
64  input_queue_.Dequeue(&optimizing_compiler);
65  Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
66 
67  ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
68 
69  OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
71  // Prevent an unused-variable error in release mode.
72  USE(status);
73 
74  output_queue_.Enqueue(optimizing_compiler);
75  isolate_->stack_guard()->RequestCodeReadyEvent();
76 
77  if (FLAG_trace_parallel_recompilation) {
78  time_spent_compiling_ += OS::Ticks() - compiling_start;
79  }
80  }
81 }
82 
83 
85  Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
86  input_queue_semaphore_->Signal();
87  stop_semaphore_->Wait();
88 
89  if (FLAG_trace_parallel_recompilation) {
90  double compile_time = static_cast<double>(time_spent_compiling_);
91  double total_time = static_cast<double>(time_spent_total_);
92  double percentage = (compile_time * 100) / total_time;
93  PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
94  }
95 }
96 
97 
99  HandleScope handle_scope(isolate_);
100  int functions_installed = 0;
101  while (!output_queue_.IsEmpty()) {
102  OptimizingCompiler* compiler = NULL;
103  output_queue_.Dequeue(&compiler);
105  functions_installed++;
106  }
107  if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
108  PrintF(" ** Installed %d function(s).\n", functions_installed);
109  }
110 }
111 
112 
114  OptimizingCompiler* optimizing_compiler) {
115  input_queue_.Enqueue(optimizing_compiler);
116  input_queue_semaphore_->Signal();
117 }
118 
119 #ifdef DEBUG
120 bool OptimizingCompilerThread::IsOptimizerThread() {
121  if (!FLAG_parallel_recompilation) return false;
122  return ThreadId::Current().ToInteger() == thread_id_;
123 }
124 #endif
125 
126 
127 } } // namespace v8::internal
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static int64_t Ticks()
int ToInteger() const
Definition: isolate.h:171
#define ASSERT(condition)
Definition: checks.h:270
void QueueForOptimization(OptimizingCompiler *optimizing_compiler)
StackGuard * stack_guard()
Definition: isolate.h:834
static void InstallOptimizedCode(OptimizingCompiler *info)
Definition: compiler.cc:895
static ThreadId Current()
Definition: isolate.h:154
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
virtual void Signal()=0
void USE(T)
Definition: globals.h:289
virtual void Wait()=0
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)