v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-solaris.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for Solaris 10 goes here. For the POSIX comaptible
29 // parts the implementation is in platform-posix.cc.
30 
31 #ifdef __sparc
32 # error "V8 does not support the SPARC CPU architecture."
33 #endif
34 
35 #include <sys/stack.h> // for stack alignment
36 #include <unistd.h> // getpagesize(), usleep()
37 #include <sys/mman.h> // mmap()
38 #include <ucontext.h> // walkstack(), getcontext()
39 #include <dlfcn.h> // dladdr
40 #include <pthread.h>
41 #include <sched.h> // for sched_yield
42 #include <semaphore.h>
43 #include <time.h>
44 #include <sys/time.h> // gettimeofday(), timeradd()
45 #include <errno.h>
46 #include <ieeefp.h> // finite()
47 #include <signal.h> // sigemptyset(), etc
48 #include <sys/regset.h>
49 
50 
51 #undef MAP_TYPE
52 
53 #include "v8.h"
54 
55 #include "platform-posix.h"
56 #include "platform.h"
57 #include "v8threads.h"
58 #include "vm-state-inl.h"
59 
60 
61 // It seems there is a bug in some Solaris distributions (experienced in
62 // SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
63 // access signbit() despite the availability of other C99 math functions.
64 #ifndef signbit
65 // Test sign - usually defined in math.h
66 int signbit(double x) {
67  // We need to take care of the special case of both positive and negative
68  // versions of zero.
69  if (x == 0) {
70  return fpclass(x) & FP_NZERO;
71  } else {
72  // This won't detect negative NaN but that should be okay since we don't
73  // assume that behavior.
74  return x < 0;
75  }
76 }
77 #endif // signbit
78 
79 namespace v8 {
80 namespace internal {
81 
82 
83 // 0 is never a valid thread id on Solaris since the main thread is 1 and
84 // subsequent have their ids incremented from there
85 static const pthread_t kNoThread = (pthread_t) 0;
86 
87 
88 double ceiling(double x) {
89  return ceil(x);
90 }
91 
92 
93 static Mutex* limit_mutex = NULL;
94 
95 
96 void OS::PostSetUp() {
98 }
99 
100 
102  return 0; // Solaris runs on a lot of things.
103 }
104 
105 
107  // GCC generates code that requires 16 byte alignment such as movdqa.
108  return Max(STACK_ALIGN, 16);
109 }
110 
111 
112 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
113  __asm__ __volatile__("" : : : "memory");
114  *ptr = value;
115 }
116 
117 
118 const char* OS::LocalTimezone(double time) {
119  if (isnan(time)) return "";
120  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
121  struct tm* t = localtime(&tv);
122  if (NULL == t) return "";
123  return tzname[0]; // The location of the timezone string on Solaris.
124 }
125 
126 
127 double OS::LocalTimeOffset() {
128  // On Solaris, struct tm does not contain a tm_gmtoff field.
129  time_t utc = time(NULL);
130  ASSERT(utc != -1);
131  struct tm* loc = localtime(&utc);
132  ASSERT(loc != NULL);
133  return static_cast<double>((mktime(loc) - utc) * msPerSecond);
134 }
135 
136 
137 // We keep the lowest and highest addresses mapped as a quick way of
138 // determining that pointers are outside the heap (used mostly in assertions
139 // and verification). The estimate is conservative, i.e., not all addresses in
140 // 'allocated' space are actually allocated to our heap. The range is
141 // [lowest, highest), inclusive on the low and and exclusive on the high end.
142 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
143 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
144 
145 
146 static void UpdateAllocatedSpaceLimits(void* address, int size) {
147  ASSERT(limit_mutex != NULL);
148  ScopedLock lock(limit_mutex);
149 
150  lowest_ever_allocated = Min(lowest_ever_allocated, address);
151  highest_ever_allocated =
152  Max(highest_ever_allocated,
153  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
154 }
155 
156 
157 bool OS::IsOutsideAllocatedSpace(void* address) {
158  return address < lowest_ever_allocated || address >= highest_ever_allocated;
159 }
160 
161 
162 size_t OS::AllocateAlignment() {
163  return static_cast<size_t>(getpagesize());
164 }
165 
166 
167 void* OS::Allocate(const size_t requested,
168  size_t* allocated,
169  bool is_executable) {
170  const size_t msize = RoundUp(requested, getpagesize());
171  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
172  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
173 
174  if (mbase == MAP_FAILED) {
175  LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
176  return NULL;
177  }
178  *allocated = msize;
179  UpdateAllocatedSpaceLimits(mbase, msize);
180  return mbase;
181 }
182 
183 
184 void OS::Free(void* address, const size_t size) {
185  // TODO(1240712): munmap has a return value which is ignored here.
186  int result = munmap(address, size);
187  USE(result);
188  ASSERT(result == 0);
189 }
190 
191 
192 void OS::Sleep(int milliseconds) {
193  useconds_t ms = static_cast<useconds_t>(milliseconds);
194  usleep(1000 * ms);
195 }
196 
197 
198 void OS::Abort() {
199  // Redirect to std abort to signal abnormal program termination.
200  abort();
201 }
202 
203 
204 void OS::DebugBreak() {
205  asm("int $3");
206 }
207 
208 
209 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
210  public:
211  PosixMemoryMappedFile(FILE* file, void* memory, int size)
212  : file_(file), memory_(memory), size_(size) { }
213  virtual ~PosixMemoryMappedFile();
214  virtual void* memory() { return memory_; }
215  virtual int size() { return size_; }
216  private:
217  FILE* file_;
218  void* memory_;
219  int size_;
220 };
221 
222 
223 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
224  FILE* file = fopen(name, "r+");
225  if (file == NULL) return NULL;
226 
227  fseek(file, 0, SEEK_END);
228  int size = ftell(file);
229 
230  void* memory =
231  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
232  return new PosixMemoryMappedFile(file, memory, size);
233 }
234 
235 
236 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
237  void* initial) {
238  FILE* file = fopen(name, "w+");
239  if (file == NULL) return NULL;
240  int result = fwrite(initial, size, 1, file);
241  if (result < 1) {
242  fclose(file);
243  return NULL;
244  }
245  void* memory =
246  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
247  return new PosixMemoryMappedFile(file, memory, size);
248 }
249 
250 
252  if (memory_) munmap(memory_, size_);
253  fclose(file_);
254 }
255 
256 
258 }
259 
260 
261 void OS::SignalCodeMovingGC() {
262 }
263 
264 
265 struct StackWalker {
267  int index;
268 };
269 
270 
271 static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
272  struct StackWalker* walker = static_cast<struct StackWalker*>(data);
273  Dl_info info;
274 
275  int i = walker->index;
276 
277  walker->frames[i].address = reinterpret_cast<void*>(pc);
278 
279  // Make sure line termination is in place.
280  walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
281 
282  Vector<char> text = MutableCStrVector(walker->frames[i].text,
284 
285  if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
286  OS::SNPrintF(text, "[0x%p]", pc);
287  } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
288  // We have symbol info.
289  OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
290  } else {
291  // No local symbol info.
292  OS::SNPrintF(text,
293  "%s'0x%p [0x%p]",
294  info.dli_fname,
295  pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
296  pc);
297  }
298  walker->index++;
299  return 0;
300 }
301 
302 
303 int OS::StackWalk(Vector<OS::StackFrame> frames) {
304  ucontext_t ctx;
305  struct StackWalker walker = { frames, 0 };
306 
307  if (getcontext(&ctx) < 0) return kStackWalkError;
308 
309  if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
310  return kStackWalkError;
311  }
312 
313  return walker.index;
314 }
315 
316 
317 // Constants used for mmap.
318 static const int kMmapFd = -1;
319 static const int kMmapFdOffset = 0;
320 
321 
322 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
323 
324 VirtualMemory::VirtualMemory(size_t size) {
325  address_ = ReserveRegion(size);
326  size_ = size;
327 }
328 
329 
330 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
331  : address_(NULL), size_(0) {
332  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
333  size_t request_size = RoundUp(size + alignment,
334  static_cast<intptr_t>(OS::AllocateAlignment()));
335  void* reservation = mmap(OS::GetRandomMmapAddr(),
336  request_size,
337  PROT_NONE,
338  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
339  kMmapFd,
340  kMmapFdOffset);
341  if (reservation == MAP_FAILED) return;
342 
343  Address base = static_cast<Address>(reservation);
344  Address aligned_base = RoundUp(base, alignment);
345  ASSERT_LE(base, aligned_base);
346 
347  // Unmap extra memory reserved before and after the desired block.
348  if (aligned_base != base) {
349  size_t prefix_size = static_cast<size_t>(aligned_base - base);
350  OS::Free(base, prefix_size);
351  request_size -= prefix_size;
352  }
353 
354  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
355  ASSERT_LE(aligned_size, request_size);
356 
357  if (aligned_size != request_size) {
358  size_t suffix_size = request_size - aligned_size;
359  OS::Free(aligned_base + aligned_size, suffix_size);
360  request_size -= suffix_size;
361  }
362 
363  ASSERT(aligned_size == request_size);
364 
365  address_ = static_cast<void*>(aligned_base);
366  size_ = aligned_size;
367 }
368 
369 
371  if (IsReserved()) {
372  bool result = ReleaseRegion(address(), size());
373  ASSERT(result);
374  USE(result);
375  }
376 }
377 
378 
380  return address_ != NULL;
381 }
382 
383 
384 void VirtualMemory::Reset() {
385  address_ = NULL;
386  size_ = 0;
387 }
388 
389 
390 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
391  return CommitRegion(address, size, is_executable);
392 }
393 
394 
395 bool VirtualMemory::Uncommit(void* address, size_t size) {
396  return UncommitRegion(address, size);
397 }
398 
399 
400 bool VirtualMemory::Guard(void* address) {
401  OS::Guard(address, OS::CommitPageSize());
402  return true;
403 }
404 
405 
406 void* VirtualMemory::ReserveRegion(size_t size) {
407  void* result = mmap(OS::GetRandomMmapAddr(),
408  size,
409  PROT_NONE,
410  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
411  kMmapFd,
412  kMmapFdOffset);
413 
414  if (result == MAP_FAILED) return NULL;
415 
416  return result;
417 }
418 
419 
420 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
421  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
422  if (MAP_FAILED == mmap(base,
423  size,
424  prot,
425  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
426  kMmapFd,
427  kMmapFdOffset)) {
428  return false;
429  }
430 
431  UpdateAllocatedSpaceLimits(base, size);
432  return true;
433 }
434 
435 
436 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
437  return mmap(base,
438  size,
439  PROT_NONE,
440  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
441  kMmapFd,
442  kMmapFdOffset) != MAP_FAILED;
443 }
444 
445 
446 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
447  return munmap(base, size) == 0;
448 }
449 
450 
451 class Thread::PlatformData : public Malloced {
452  public:
453  PlatformData() : thread_(kNoThread) { }
454 
455  pthread_t thread_; // Thread handle for pthread.
456 };
457 
458 
459 Thread::Thread(const Options& options)
460  : data_(new PlatformData()),
461  stack_size_(options.stack_size()) {
462  set_name(options.name());
463 }
464 
465 
466 Thread::~Thread() {
467  delete data_;
468 }
469 
470 
471 static void* ThreadEntry(void* arg) {
472  Thread* thread = reinterpret_cast<Thread*>(arg);
473  // This is also initialized by the first argument to pthread_create() but we
474  // don't know which thread will run first (the original thread or the new
475  // one) so we initialize it here too.
476  thread->data()->thread_ = pthread_self();
477  ASSERT(thread->data()->thread_ != kNoThread);
478  thread->Run();
479  return NULL;
480 }
481 
482 
483 void Thread::set_name(const char* name) {
484  strncpy(name_, name, sizeof(name_));
485  name_[sizeof(name_) - 1] = '\0';
486 }
487 
488 
489 void Thread::Start() {
490  pthread_attr_t attr;
491  if (stack_size_ > 0) {
492  pthread_attr_init(&attr);
493  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
494  }
495  pthread_create(&data_->thread_, NULL, ThreadEntry, this);
496  ASSERT(data_->thread_ != kNoThread);
497 }
498 
499 
500 void Thread::Join() {
501  pthread_join(data_->thread_, NULL);
502 }
503 
504 
506  pthread_key_t key;
507  int result = pthread_key_create(&key, NULL);
508  USE(result);
509  ASSERT(result == 0);
510  return static_cast<LocalStorageKey>(key);
511 }
512 
513 
515  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
516  int result = pthread_key_delete(pthread_key);
517  USE(result);
518  ASSERT(result == 0);
519 }
520 
521 
523  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
524  return pthread_getspecific(pthread_key);
525 }
526 
527 
528 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
529  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
530  pthread_setspecific(pthread_key, value);
531 }
532 
533 
534 void Thread::YieldCPU() {
535  sched_yield();
536 }
537 
538 
539 class SolarisMutex : public Mutex {
540  public:
542  pthread_mutexattr_t attr;
543  pthread_mutexattr_init(&attr);
544  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
545  pthread_mutex_init(&mutex_, &attr);
546  }
547 
548  ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
549 
550  int Lock() { return pthread_mutex_lock(&mutex_); }
551 
552  int Unlock() { return pthread_mutex_unlock(&mutex_); }
553 
554  virtual bool TryLock() {
555  int result = pthread_mutex_trylock(&mutex_);
556  // Return false if the lock is busy and locking failed.
557  if (result == EBUSY) {
558  return false;
559  }
560  ASSERT(result == 0); // Verify no other errors.
561  return true;
562  }
563 
564  private:
565  pthread_mutex_t mutex_;
566 };
567 
568 
569 Mutex* OS::CreateMutex() {
570  return new SolarisMutex();
571 }
572 
573 
574 class SolarisSemaphore : public Semaphore {
575  public:
576  explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
577  virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
578 
579  virtual void Wait();
580  virtual bool Wait(int timeout);
581  virtual void Signal() { sem_post(&sem_); }
582  private:
583  sem_t sem_;
584 };
585 
586 
588  while (true) {
589  int result = sem_wait(&sem_);
590  if (result == 0) return; // Successfully got semaphore.
591  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
592  }
593 }
594 
595 
596 #ifndef TIMEVAL_TO_TIMESPEC
597 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
598  (ts)->tv_sec = (tv)->tv_sec; \
599  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
600 } while (false)
601 #endif
602 
603 
604 #ifndef timeradd
605 #define timeradd(a, b, result) \
606  do { \
607  (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
608  (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
609  if ((result)->tv_usec >= 1000000) { \
610  ++(result)->tv_sec; \
611  (result)->tv_usec -= 1000000; \
612  } \
613  } while (0)
614 #endif
615 
616 
617 bool SolarisSemaphore::Wait(int timeout) {
618  const long kOneSecondMicros = 1000000; // NOLINT
619 
620  // Split timeout into second and nanosecond parts.
621  struct timeval delta;
622  delta.tv_usec = timeout % kOneSecondMicros;
623  delta.tv_sec = timeout / kOneSecondMicros;
624 
625  struct timeval current_time;
626  // Get the current time.
627  if (gettimeofday(&current_time, NULL) == -1) {
628  return false;
629  }
630 
631  // Calculate time for end of timeout.
632  struct timeval end_time;
633  timeradd(&current_time, &delta, &end_time);
634 
635  struct timespec ts;
636  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
637  // Wait for semaphore signalled or timeout.
638  while (true) {
639  int result = sem_timedwait(&sem_, &ts);
640  if (result == 0) return true; // Successfully got semaphore.
641  if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
642  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
643  }
644 }
645 
646 
647 Semaphore* OS::CreateSemaphore(int count) {
648  return new SolarisSemaphore(count);
649 }
650 
651 
652 static pthread_t GetThreadID() {
653  return pthread_self();
654 }
655 
656 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
657  USE(info);
658  if (signal != SIGPROF) return;
659  Isolate* isolate = Isolate::UncheckedCurrent();
660  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
661  // We require a fully initialized and entered isolate.
662  return;
663  }
664  if (v8::Locker::IsActive() &&
665  !isolate->thread_manager()->IsLockedByCurrentThread()) {
666  return;
667  }
668 
669  Sampler* sampler = isolate->logger()->sampler();
670  if (sampler == NULL || !sampler->IsActive()) return;
671 
672  TickSample sample_obj;
673  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
674  if (sample == NULL) sample = &sample_obj;
675 
676  // Extracting the sample from the context is extremely machine dependent.
677  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
678  mcontext_t& mcontext = ucontext->uc_mcontext;
679  sample->state = isolate->current_vm_state();
680 
681  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
682  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
683  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
684 
685  sampler->SampleStack(sample);
686  sampler->Tick(sample);
687 }
688 
689 class Sampler::PlatformData : public Malloced {
690  public:
691  PlatformData() : vm_tid_(GetThreadID()) {}
692 
693  pthread_t vm_tid() const { return vm_tid_; }
694 
695  private:
696  pthread_t vm_tid_;
697 };
698 
699 
700 class SignalSender : public Thread {
701  public:
703  HALF_INTERVAL,
704  FULL_INTERVAL
705  };
706 
707  static const int kSignalSenderStackSize = 64 * KB;
708 
709  explicit SignalSender(int interval)
710  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
711  interval_(interval) {}
712 
713  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
714  static void TearDown() { delete mutex_; }
715 
716  static void InstallSignalHandler() {
717  struct sigaction sa;
718  sa.sa_sigaction = ProfilerSignalHandler;
719  sigemptyset(&sa.sa_mask);
720  sa.sa_flags = SA_RESTART | SA_SIGINFO;
721  signal_handler_installed_ =
722  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
723  }
724 
725  static void RestoreSignalHandler() {
726  if (signal_handler_installed_) {
727  sigaction(SIGPROF, &old_signal_handler_, 0);
728  signal_handler_installed_ = false;
729  }
730  }
731 
732  static void AddActiveSampler(Sampler* sampler) {
733  ScopedLock lock(mutex_);
735  if (instance_ == NULL) {
736  // Start a thread that will send SIGPROF signal to VM threads,
737  // when CPU profiling will be enabled.
738  instance_ = new SignalSender(sampler->interval());
739  instance_->Start();
740  } else {
741  ASSERT(instance_->interval_ == sampler->interval());
742  }
743  }
744 
745  static void RemoveActiveSampler(Sampler* sampler) {
746  ScopedLock lock(mutex_);
750  delete instance_;
751  instance_ = NULL;
752  RestoreSignalHandler();
753  }
754  }
755 
756  // Implement Thread::Run().
757  virtual void Run() {
759  while ((state = SamplerRegistry::GetState()) !=
761  bool cpu_profiling_enabled =
763  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
764  if (cpu_profiling_enabled && !signal_handler_installed_) {
765  InstallSignalHandler();
766  } else if (!cpu_profiling_enabled && signal_handler_installed_) {
767  RestoreSignalHandler();
768  }
769 
770  // When CPU profiling is enabled both JavaScript and C++ code is
771  // profiled. We must not suspend.
772  if (!cpu_profiling_enabled) {
773  if (rate_limiter_.SuspendIfNecessary()) continue;
774  }
775  if (cpu_profiling_enabled && runtime_profiler_enabled) {
776  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
777  return;
778  }
779  Sleep(HALF_INTERVAL);
780  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
781  return;
782  }
783  Sleep(HALF_INTERVAL);
784  } else {
785  if (cpu_profiling_enabled) {
786  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
787  this)) {
788  return;
789  }
790  }
791  if (runtime_profiler_enabled) {
792  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
793  NULL)) {
794  return;
795  }
796  }
797  Sleep(FULL_INTERVAL);
798  }
799  }
800  }
801 
802  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
803  if (!sampler->IsProfiling()) return;
804  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
805  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
806  }
807 
808  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
809  if (!sampler->isolate()->IsInitialized()) return;
810  sampler->isolate()->runtime_profiler()->NotifyTick();
811  }
812 
813  void SendProfilingSignal(pthread_t tid) {
814  if (!signal_handler_installed_) return;
815  pthread_kill(tid, SIGPROF);
816  }
817 
818  void Sleep(SleepInterval full_or_half) {
819  // Convert ms to us and subtract 100 us to compensate delays
820  // occuring during signal delivery.
821  useconds_t interval = interval_ * 1000 - 100;
822  if (full_or_half == HALF_INTERVAL) interval /= 2;
823  int result = usleep(interval);
824 #ifdef DEBUG
825  if (result != 0 && errno != EINTR) {
826  fprintf(stderr,
827  "SignalSender usleep error; interval = %u, errno = %d\n",
828  interval,
829  errno);
830  ASSERT(result == 0 || errno == EINTR);
831  }
832 #endif
833  USE(result);
834  }
835 
836  const int interval_;
837  RuntimeProfilerRateLimiter rate_limiter_;
838 
839  // Protects the process wide state below.
840  static Mutex* mutex_;
841  static SignalSender* instance_;
842  static bool signal_handler_installed_;
843  static struct sigaction old_signal_handler_;
844 
845  private:
847 };
848 
849 Mutex* SignalSender::mutex_ = NULL;
850 SignalSender* SignalSender::instance_ = NULL;
851 struct sigaction SignalSender::old_signal_handler_;
853 
854 
855 void OS::SetUp() {
856  // Seed the random number generator.
857  // Convert the current time to a 64-bit integer first, before converting it
858  // to an unsigned. Going directly will cause an overflow and the seed to be
859  // set to all ones. The seed will be identical for different instances that
860  // call this setup code within the same millisecond.
861  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
862  srandom(static_cast<unsigned int>(seed));
863  limit_mutex = CreateMutex();
865 }
866 
867 
868 void OS::TearDown() {
870  delete limit_mutex;
871 }
872 
873 
874 Sampler::Sampler(Isolate* isolate, int interval)
875  : isolate_(isolate),
876  interval_(interval),
877  profiling_(false),
878  active_(false),
879  samples_taken_(0) {
880  data_ = new PlatformData;
881 }
882 
883 
885  ASSERT(!IsActive());
886  delete data_;
887 }
888 
889 
890 void Sampler::Start() {
891  ASSERT(!IsActive());
892  SetActive(true);
894 }
895 
896 
897 void Sampler::Stop() {
898  ASSERT(IsActive());
900  SetActive(false);
901 }
902 
903 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
static void * GetThreadLocal(LocalStorageKey key)
static void Free(void *address, const size_t size)
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:772
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:207
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:761
Isolate * isolate()
Definition: platform.h:763
static void SignalCodeMovingGC()
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:530
static const int kStackWalkError
Definition: platform.h:245
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:247
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:739
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1868
unsigned int seed
Definition: test-strings.cc:18
#define timeradd(a, b, result)
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:756
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
intptr_t AtomicWord
Definition: atomicops.h:75
const Register pc
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static void DebugBreak()
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
Vector< OS::StackFrame > & frames
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1435
int signbit(double x)
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1847
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
static void SetUp()
void USE(T)
Definition: globals.h:289
static int ActivationFrameAlignment()
static size_t AllocateAlignment()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1856
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1830
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)