v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
v8threads.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #include "api.h"
31 #include "bootstrapper.h"
32 #include "debug.h"
33 #include "execution.h"
34 #include "v8threads.h"
35 #include "regexp-stack.h"
36 
37 namespace v8 {
38 
39 
40 // Track whether this V8 instance has ever called v8::Locker. This allows the
41 // API code to verify that the lock is always held when V8 is being entered.
42 bool Locker::active_ = false;
43 
44 
45 // Constructor for the Locker object. Once the Locker is constructed the
46 // current thread will be guaranteed to have the lock for a given isolate.
48  : has_lock_(false),
49  top_level_(true),
50  isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
51  if (isolate_ == NULL) {
52  isolate_ = i::Isolate::GetDefaultIsolateForLocking();
53  }
54  // Record that the Locker has been used at least once.
55  active_ = true;
56  // Get the big lock if necessary.
57  if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
58  isolate_->thread_manager()->Lock();
59  has_lock_ = true;
60 
61  // Make sure that V8 is initialized. Archiving of threads interferes
62  // with deserialization by adding additional root pointers, so we must
63  // initialize here, before anyone can call ~Locker() or Unlocker().
64  if (!isolate_->IsInitialized()) {
65  isolate_->Enter();
67  isolate_->Exit();
68  }
69 
70  // This may be a locker within an unlocker in which case we have to
71  // get the saved state for this thread and restore it.
72  if (isolate_->thread_manager()->RestoreThread()) {
73  top_level_ = false;
74  } else {
75  internal::ExecutionAccess access(isolate_);
76  isolate_->stack_guard()->ClearThread(access);
77  isolate_->stack_guard()->InitThread(access);
78  }
79  if (isolate_->IsDefaultIsolate()) {
80  // This only enters if not yet entered.
82  }
83  }
85 }
86 
87 
88 bool Locker::IsLocked(v8::Isolate* isolate) {
89  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
90  if (internal_isolate == NULL) {
91  internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
92  }
93  return internal_isolate->thread_manager()->IsLockedByCurrentThread();
94 }
95 
96 
98  return active_;
99 }
100 
101 
104  if (has_lock_) {
105  if (isolate_->IsDefaultIsolate()) {
106  isolate_->Exit();
107  }
108  if (top_level_) {
109  isolate_->thread_manager()->FreeThreadResources();
110  } else {
111  isolate_->thread_manager()->ArchiveThread();
112  }
113  isolate_->thread_manager()->Unlock();
114  }
115 }
116 
117 
119  : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
120  if (isolate_ == NULL) {
121  isolate_ = i::Isolate::GetDefaultIsolateForLocking();
122  }
124  if (isolate_->IsDefaultIsolate()) {
125  isolate_->Exit();
126  }
127  isolate_->thread_manager()->ArchiveThread();
128  isolate_->thread_manager()->Unlock();
129 }
130 
131 
134  isolate_->thread_manager()->Lock();
135  isolate_->thread_manager()->RestoreThread();
136  if (isolate_->IsDefaultIsolate()) {
137  isolate_->Enter();
138  }
139 }
140 
141 
142 void Locker::StartPreemption(int every_n_ms) {
144 }
145 
146 
149 }
150 
151 
152 namespace internal {
153 
154 
157  // First check whether the current thread has been 'lazily archived', i.e.
158  // not archived at all. If that is the case we put the state storage we
159  // had prepared back in the free list, since we didn't need it after all.
160  if (lazily_archived_thread_.Equals(ThreadId::Current())) {
161  lazily_archived_thread_ = ThreadId::Invalid();
162  Isolate::PerIsolateThreadData* per_thread =
163  isolate_->FindPerThreadDataForThisThread();
164  ASSERT(per_thread != NULL);
165  ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
166  lazily_archived_thread_state_->set_id(ThreadId::Invalid());
167  lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
168  lazily_archived_thread_state_ = NULL;
169  per_thread->set_thread_state(NULL);
170  return true;
171  }
172 
173  // Make sure that the preemption thread cannot modify the thread state while
174  // it is being archived or restored.
175  ExecutionAccess access(isolate_);
176 
177  // If there is another thread that was lazily archived then we have to really
178  // archive it now.
179  if (lazily_archived_thread_.IsValid()) {
180  EagerlyArchiveThread();
181  }
182  Isolate::PerIsolateThreadData* per_thread =
183  isolate_->FindPerThreadDataForThisThread();
184  if (per_thread == NULL || per_thread->thread_state() == NULL) {
185  // This is a new thread.
186  isolate_->stack_guard()->InitThread(access);
187  return false;
188  }
189  ThreadState* state = per_thread->thread_state();
190  char* from = state->data();
191  from = isolate_->handle_scope_implementer()->RestoreThread(from);
192  from = isolate_->RestoreThread(from);
193  from = Relocatable::RestoreState(isolate_, from);
194 #ifdef ENABLE_DEBUGGER_SUPPORT
195  from = isolate_->debug()->RestoreDebug(from);
196 #endif
197  from = isolate_->stack_guard()->RestoreStackGuard(from);
198  from = isolate_->regexp_stack()->RestoreStack(from);
199  from = isolate_->bootstrapper()->RestoreState(from);
200  per_thread->set_thread_state(NULL);
201  if (state->terminate_on_restore()) {
202  isolate_->stack_guard()->TerminateExecution();
203  state->set_terminate_on_restore(false);
204  }
205  state->set_id(ThreadId::Invalid());
206  state->Unlink();
208  return true;
209 }
210 
211 
213  mutex_->Lock();
214  mutex_owner_ = ThreadId::Current();
216 }
217 
218 
220  mutex_owner_ = ThreadId::Invalid();
221  mutex_->Unlock();
222 }
223 
224 
225 static int ArchiveSpacePerThread() {
228 #ifdef ENABLE_DEBUGGER_SUPPORT
229  Debug::ArchiveSpacePerThread() +
230 #endif
234  Relocatable::ArchiveSpacePerThread();
235 }
236 
237 
238 ThreadState::ThreadState(ThreadManager* thread_manager)
239  : id_(ThreadId::Invalid()),
240  terminate_on_restore_(false),
241  data_(NULL),
242  next_(this),
243  previous_(this),
244  thread_manager_(thread_manager) {
245 }
246 
247 
248 ThreadState::~ThreadState() {
249  DeleteArray<char>(data_);
250 }
251 
252 
253 void ThreadState::AllocateSpace() {
254  data_ = NewArray<char>(ArchiveSpacePerThread());
255 }
256 
257 
259  next_->previous_ = previous_;
260  previous_->next_ = next_;
261 }
262 
263 
265  ThreadState* flying_anchor =
266  list == FREE_LIST ? thread_manager_->free_anchor_
267  : thread_manager_->in_use_anchor_;
268  next_ = flying_anchor->next_;
269  previous_ = flying_anchor;
270  flying_anchor->next_ = this;
271  next_->previous_ = this;
272 }
273 
274 
276  ThreadState* gotten = free_anchor_->next_;
277  if (gotten == free_anchor_) {
278  ThreadState* new_thread_state = new ThreadState(this);
279  new_thread_state->AllocateSpace();
280  return new_thread_state;
281  }
282  return gotten;
283 }
284 
285 
286 // Gets the first in the list of archived threads.
288  return in_use_anchor_->Next();
289 }
290 
291 
293  if (next_ == thread_manager_->in_use_anchor_) return NULL;
294  return next_;
295 }
296 
297 
298 // Thread ids must start with 1, because in TLS having thread id 0 can't
299 // be distinguished from not having a thread id at all (since NULL is
300 // defined as 0.)
301 ThreadManager::ThreadManager()
302  : mutex_(OS::CreateMutex()),
303  mutex_owner_(ThreadId::Invalid()),
304  lazily_archived_thread_(ThreadId::Invalid()),
305  lazily_archived_thread_state_(NULL),
306  free_anchor_(NULL),
307  in_use_anchor_(NULL) {
308  free_anchor_ = new ThreadState(this);
309  in_use_anchor_ = new ThreadState(this);
310 }
311 
312 
313 ThreadManager::~ThreadManager() {
314  delete mutex_;
315  DeleteThreadStateList(free_anchor_);
316  DeleteThreadStateList(in_use_anchor_);
317 }
318 
319 
320 void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
321  // The list starts and ends with the anchor.
322  for (ThreadState* current = anchor->next_; current != anchor;) {
323  ThreadState* next = current->next_;
324  delete current;
325  current = next;
326  }
327  delete anchor;
328 }
329 
330 
332  ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
333  ASSERT(!IsArchived());
335  ThreadState* state = GetFreeThreadState();
336  state->Unlink();
337  Isolate::PerIsolateThreadData* per_thread =
338  isolate_->FindOrAllocatePerThreadDataForThisThread();
339  per_thread->set_thread_state(state);
340  lazily_archived_thread_ = ThreadId::Current();
341  lazily_archived_thread_state_ = state;
342  ASSERT(state->id().Equals(ThreadId::Invalid()));
343  state->set_id(CurrentId());
344  ASSERT(!state->id().Equals(ThreadId::Invalid()));
345 }
346 
347 
348 void ThreadManager::EagerlyArchiveThread() {
350  ThreadState* state = lazily_archived_thread_state_;
352  char* to = state->data();
353  // Ensure that data containing GC roots are archived first, and handle them
354  // in ThreadManager::Iterate(ObjectVisitor*).
355  to = isolate_->handle_scope_implementer()->ArchiveThread(to);
356  to = isolate_->ArchiveThread(to);
357  to = Relocatable::ArchiveState(isolate_, to);
358 #ifdef ENABLE_DEBUGGER_SUPPORT
359  to = isolate_->debug()->ArchiveDebug(to);
360 #endif
361  to = isolate_->stack_guard()->ArchiveStackGuard(to);
362  to = isolate_->regexp_stack()->ArchiveStack(to);
363  to = isolate_->bootstrapper()->ArchiveState(to);
364  lazily_archived_thread_ = ThreadId::Invalid();
365  lazily_archived_thread_state_ = NULL;
366 }
367 
368 
371  isolate_->FreeThreadResources();
372 #ifdef ENABLE_DEBUGGER_SUPPORT
373  isolate_->debug()->FreeThreadResources();
374 #endif
375  isolate_->stack_guard()->FreeThreadResources();
376  isolate_->regexp_stack()->FreeThreadResources();
377  isolate_->bootstrapper()->FreeThreadResources();
378 }
379 
380 
383  isolate_->FindPerThreadDataForThisThread();
384  return data != NULL && data->thread_state() != NULL;
385 }
386 
387 void ThreadManager::Iterate(ObjectVisitor* v) {
388  // Expecting no threads during serialization/deserialization
389  for (ThreadState* state = FirstThreadStateInUse();
390  state != NULL;
391  state = state->Next()) {
392  char* data = state->data();
393  data = HandleScopeImplementer::Iterate(v, data);
394  data = isolate_->Iterate(v, data);
395  data = Relocatable::Iterate(v, data);
396  }
397 }
398 
399 
401  for (ThreadState* state = FirstThreadStateInUse();
402  state != NULL;
403  state = state->Next()) {
404  char* data = state->data();
406  isolate_->IterateThread(v, data);
407  }
408 }
409 
410 
412  return ThreadId::Current();
413 }
414 
415 
417  for (ThreadState* state = FirstThreadStateInUse();
418  state != NULL;
419  state = state->Next()) {
420  if (thread_id.Equals(state->id())) {
421  state->set_terminate_on_restore(true);
422  }
423  }
424 }
425 
426 
427 ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
428  : Thread("v8:CtxtSwitcher"),
429  keep_going_(true),
430  sleep_ms_(every_n_ms),
431  isolate_(isolate) {
432 }
433 
434 
435 // Set the scheduling interval of V8 threads. This function starts the
436 // ContextSwitcher thread if needed.
437 void ContextSwitcher::StartPreemption(int every_n_ms) {
438  Isolate* isolate = Isolate::Current();
439  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
440  if (isolate->context_switcher() == NULL) {
441  // If the ContextSwitcher thread is not running at the moment start it now.
442  isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
443  isolate->context_switcher()->Start();
444  } else {
445  // ContextSwitcher thread is already running, so we just change the
446  // scheduling interval.
447  isolate->context_switcher()->sleep_ms_ = every_n_ms;
448  }
449 }
450 
451 
452 // Disable preemption of V8 threads. If multiple threads want to use V8 they
453 // must cooperatively schedule amongst them from this point on.
455  Isolate* isolate = Isolate::Current();
456  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
457  if (isolate->context_switcher() != NULL) {
458  // The ContextSwitcher thread is running. We need to stop it and release
459  // its resources.
460  isolate->context_switcher()->keep_going_ = false;
461  // Wait for the ContextSwitcher thread to exit.
462  isolate->context_switcher()->Join();
463  // Thread has exited, now we can delete it.
464  delete(isolate->context_switcher());
465  isolate->set_context_switcher(NULL);
466  }
467 }
468 
469 
470 // Main loop of the ContextSwitcher thread: Preempt the currently running V8
471 // thread at regular intervals.
472 void ContextSwitcher::Run() {
473  while (keep_going_) {
474  OS::Sleep(sleep_ms_);
475  isolate()->stack_guard()->Preempt();
476  }
477 }
478 
479 
480 // Acknowledge the preemption by the receiving thread.
483  // There is currently no accounting being done for this. But could be in the
484  // future, which is why we leave this in.
485 }
486 
487 
488 } // namespace internal
489 } // namespace v8
char * ArchiveState(char *to)
HandleScopeImplementer * handle_scope_implementer()
Definition: isolate.h:864
char * ArchiveThread(char *to)
Definition: api.cc:6508
void LinkInto(List list)
Definition: v8threads.cc:264
char * RestoreThread(char *from)
Definition: api.cc:6526
static int ArchiveSpacePerThread()
Definition: regexp-stack.h:86
Bootstrapper * bootstrapper()
Definition: isolate.h:818
PerIsolateThreadData * FindPerThreadDataForThisThread()
Definition: isolate.cc:352
RegExpStack * regexp_stack()
Definition: isolate.h:937
virtual int Unlock()=0
#define ASSERT(condition)
Definition: checks.h:270
char * RestoreState(char *from)
ThreadManager * thread_manager()
Definition: isolate.h:882
Locker(Isolate *isolate=NULL)
Definition: v8threads.cc:47
bool IsDefaultIsolate() const
Definition: isolate.h:469
void IterateArchivedThreads(ThreadVisitor *v)
Definition: v8threads.cc:400
StackGuard * stack_guard()
Definition: isolate.h:834
void set_id(ThreadId id)
Definition: v8threads.h:46
void TerminateExecution(ThreadId thread_id)
Definition: v8threads.cc:416
void FreeThreadResources()
Definition: isolate.h:661
char * RestoreStackGuard(char *from)
Definition: execution.cc:539
void ClearThread(const ExecutionAccess &lock)
Definition: execution.cc:586
char * ArchiveStack(char *to)
Definition: regexp-stack.cc:57
static void PreemptionReceived()
Definition: v8threads.cc:481
char * ArchiveStackGuard(char *to)
Definition: execution.cc:522
static ThreadId Current()
Definition: isolate.h:154
static void EnterDefaultIsolate()
Definition: isolate.cc:399
void Iterate(ObjectVisitor *v)
Definition: isolate.cc:475
char * RestoreStack(char *from)
Definition: regexp-stack.cc:67
static bool IsActive()
Definition: v8threads.cc:97
activate correct semantics for inheriting readonliness false
void Iterate(ObjectVisitor *v)
Definition: v8threads.cc:387
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static void Sleep(const int milliseconds)
void set_thread_state(ThreadState *value)
Definition: isolate.h:388
ThreadState * GetFreeThreadState()
Definition: v8threads.cc:275
static int ArchiveSpacePerThread()
Definition: execution.h:181
void IterateThread(ThreadVisitor *v)
Definition: isolate.cc:429
ThreadState * thread_state() const
Definition: isolate.h:387
Unlocker(Isolate *isolate=NULL)
Definition: v8threads.cc:118
void Iterate(v8::internal::ObjectVisitor *v)
static int ArchiveSpacePerThread()
Definition: isolate.h:660
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static void StartPreemption(int every_n_ms)
Definition: v8threads.cc:142
ContextSwitcher * context_switcher()
Definition: isolate.h:884
char * ArchiveThread(char *to)
Definition: isolate.cc:1379
static ThreadId Invalid()
Definition: isolate.h:157
static void StartPreemption(int every_n_ms)
Definition: v8threads.cc:437
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
char * RestoreThread(char *from)
Definition: isolate.cc:1393
void set_context_switcher(ContextSwitcher *switcher)
Definition: isolate.h:886
virtual int Lock()=0
static void StopPreemption()
Definition: v8threads.cc:147
void set_terminate_on_restore(bool terminate_on_restore)
Definition: v8threads.h:51
ThreadState * FirstThreadStateInUse()
Definition: v8threads.cc:287
static int ArchiveSpacePerThread()
static bool IsLocked(Isolate *isolate=NULL)
Definition: v8threads.cc:88
static bool Initialize()
Definition: api.cc:4269
void InitThread(const ExecutionAccess &lock)
Definition: execution.cc:592
ThreadState * Next()
Definition: v8threads.cc:292