v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-openbsd.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
29 // comaptible parts the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <sys/resource.h>
36 #include <sys/syscall.h>
37 #include <sys/types.h>
38 #include <stdlib.h>
39 
40 #include <sys/types.h> // mmap & munmap
41 #include <sys/mman.h> // mmap & munmap
42 #include <sys/stat.h> // open
43 #include <fcntl.h> // open
44 #include <unistd.h> // sysconf
45 #include <execinfo.h> // backtrace, backtrace_symbols
46 #include <strings.h> // index
47 #include <errno.h>
48 #include <stdarg.h>
49 
50 #undef MAP_TYPE
51 
52 #include "v8.h"
53 
54 #include "platform-posix.h"
55 #include "platform.h"
56 #include "v8threads.h"
57 #include "vm-state-inl.h"
58 
59 
60 namespace v8 {
61 namespace internal {
62 
63 // 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
64 // name space and pid 0 is reserved (see man 2 kill).
65 static const pthread_t kNoThread = (pthread_t) 0;
66 
67 
68 double ceiling(double x) {
69  return ceil(x);
70 }
71 
72 
73 static Mutex* limit_mutex = NULL;
74 
75 
76 static void* GetRandomMmapAddr() {
77  Isolate* isolate = Isolate::UncheckedCurrent();
78  // Note that the current isolate isn't set up in a call path via
79  // CpuFeatures::Probe. We don't care about randomization in this case because
80  // the code page is immediately freed.
81  if (isolate != NULL) {
82 #ifdef V8_TARGET_ARCH_X64
83  uint64_t rnd1 = V8::RandomPrivate(isolate);
84  uint64_t rnd2 = V8::RandomPrivate(isolate);
85  uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
86  // Currently available CPUs have 48 bits of virtual addressing. Truncate
87  // the hint address to 46 bits to give the kernel a fighting chance of
88  // fulfilling our placement request.
89  raw_addr &= V8_UINT64_C(0x3ffffffff000);
90 #else
91  uint32_t raw_addr = V8::RandomPrivate(isolate);
92  // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
93  // variety of ASLR modes (PAE kernel, NX compat mode, etc).
94  raw_addr &= 0x3ffff000;
95  raw_addr += 0x20000000;
96 #endif
97  return reinterpret_cast<void*>(raw_addr);
98  }
99  return NULL;
100 }
101 
102 
103 void OS::PostSetUp() {
104  POSIXPostSetUp();
105 }
106 
107 
109  return 0;
110 }
111 
112 
114  // With gcc 4.4 the tree vectorization optimizer can generate code
115  // that requires 16 byte alignment such as movdqa on x86.
116  return 16;
117 }
118 
119 
120 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
121  __asm__ __volatile__("" : : : "memory");
122  // An x86 store acts as a release barrier.
123  *ptr = value;
124 }
125 
126 
127 const char* OS::LocalTimezone(double time) {
128  if (isnan(time)) return "";
129  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
130  struct tm* t = localtime(&tv);
131  if (NULL == t) return "";
132  return t->tm_zone;
133 }
134 
135 
136 double OS::LocalTimeOffset() {
137  time_t tv = time(NULL);
138  struct tm* t = localtime(&tv);
139  // tm_gmtoff includes any daylight savings offset, so subtract it.
140  return static_cast<double>(t->tm_gmtoff * msPerSecond -
141  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
142 }
143 
144 
145 // We keep the lowest and highest addresses mapped as a quick way of
146 // determining that pointers are outside the heap (used mostly in assertions
147 // and verification). The estimate is conservative, i.e., not all addresses in
148 // 'allocated' space are actually allocated to our heap. The range is
149 // [lowest, highest), inclusive on the low and and exclusive on the high end.
150 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
151 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
152 
153 
154 static void UpdateAllocatedSpaceLimits(void* address, int size) {
155  ASSERT(limit_mutex != NULL);
156  ScopedLock lock(limit_mutex);
157 
158  lowest_ever_allocated = Min(lowest_ever_allocated, address);
159  highest_ever_allocated =
160  Max(highest_ever_allocated,
161  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
162 }
163 
164 
165 bool OS::IsOutsideAllocatedSpace(void* address) {
166  return address < lowest_ever_allocated || address >= highest_ever_allocated;
167 }
168 
169 
170 size_t OS::AllocateAlignment() {
171  return sysconf(_SC_PAGESIZE);
172 }
173 
174 
175 void* OS::Allocate(const size_t requested,
176  size_t* allocated,
177  bool is_executable) {
178  const size_t msize = RoundUp(requested, AllocateAlignment());
179  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
180  void* addr = GetRandomMmapAddr();
181  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
182  if (mbase == MAP_FAILED) {
183  LOG(i::Isolate::Current(),
184  StringEvent("OS::Allocate", "mmap failed"));
185  return NULL;
186  }
187  *allocated = msize;
188  UpdateAllocatedSpaceLimits(mbase, msize);
189  return mbase;
190 }
191 
192 
193 void OS::Free(void* address, const size_t size) {
194  // TODO(1240712): munmap has a return value which is ignored here.
195  int result = munmap(address, size);
196  USE(result);
197  ASSERT(result == 0);
198 }
199 
200 
201 void OS::Sleep(int milliseconds) {
202  unsigned int ms = static_cast<unsigned int>(milliseconds);
203  usleep(1000 * ms);
204 }
205 
206 
207 void OS::Abort() {
208  // Redirect to std abort to signal abnormal program termination.
209  abort();
210 }
211 
212 
213 void OS::DebugBreak() {
214  asm("int $3");
215 }
216 
217 
218 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
219  public:
220  PosixMemoryMappedFile(FILE* file, void* memory, int size)
221  : file_(file), memory_(memory), size_(size) { }
222  virtual ~PosixMemoryMappedFile();
223  virtual void* memory() { return memory_; }
224  virtual int size() { return size_; }
225  private:
226  FILE* file_;
227  void* memory_;
228  int size_;
229 };
230 
231 
232 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
233  FILE* file = fopen(name, "r+");
234  if (file == NULL) return NULL;
235 
236  fseek(file, 0, SEEK_END);
237  int size = ftell(file);
238 
239  void* memory =
240  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
241  return new PosixMemoryMappedFile(file, memory, size);
242 }
243 
244 
245 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
246  void* initial) {
247  FILE* file = fopen(name, "w+");
248  if (file == NULL) return NULL;
249  int result = fwrite(initial, size, 1, file);
250  if (result < 1) {
251  fclose(file);
252  return NULL;
253  }
254  void* memory =
255  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
256  return new PosixMemoryMappedFile(file, memory, size);
257 }
258 
259 
261  if (memory_) OS::Free(memory_, size_);
262  fclose(file_);
263 }
264 
265 
267  // This function assumes that the layout of the file is as follows:
268  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
269  // If we encounter an unexpected situation we abort scanning further entries.
270  FILE* fp = fopen("/proc/self/maps", "r");
271  if (fp == NULL) return;
272 
273  // Allocate enough room to be able to store a full file name.
274  const int kLibNameLen = FILENAME_MAX + 1;
275  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
276 
277  i::Isolate* isolate = ISOLATE;
278  // This loop will terminate once the scanning hits an EOF.
279  while (true) {
280  uintptr_t start, end;
281  char attr_r, attr_w, attr_x, attr_p;
282  // Parse the addresses and permission bits at the beginning of the line.
283  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
284  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
285 
286  int c;
287  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
288  // Found a read-only executable entry. Skip characters until we reach
289  // the beginning of the filename or the end of the line.
290  do {
291  c = getc(fp);
292  } while ((c != EOF) && (c != '\n') && (c != '/'));
293  if (c == EOF) break; // EOF: Was unexpected, just exit.
294 
295  // Process the filename if found.
296  if (c == '/') {
297  ungetc(c, fp); // Push the '/' back into the stream to be read below.
298 
299  // Read to the end of the line. Exit if the read fails.
300  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
301 
302  // Drop the newline character read by fgets. We do not need to check
303  // for a zero-length string because we know that we at least read the
304  // '/' character.
305  lib_name[strlen(lib_name) - 1] = '\0';
306  } else {
307  // No library name found, just record the raw address range.
308  snprintf(lib_name, kLibNameLen,
309  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
310  }
311  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
312  } else {
313  // Entry not describing executable data. Skip to end of line to set up
314  // reading the next entry.
315  do {
316  c = getc(fp);
317  } while ((c != EOF) && (c != '\n'));
318  if (c == EOF) break;
319  }
320  }
321  free(lib_name);
322  fclose(fp);
323 }
324 
325 
326 void OS::SignalCodeMovingGC() {
327  // Support for ll_prof.py.
328  //
329  // The Linux profiler built into the kernel logs all mmap's with
330  // PROT_EXEC so that analysis tools can properly attribute ticks. We
331  // do a mmap with a name known by ll_prof.py and immediately munmap
332  // it. This injects a GC marker into the stream of events generated
333  // by the kernel and allows us to synchronize V8 code log and the
334  // kernel log.
335  int size = sysconf(_SC_PAGESIZE);
336  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
337  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
338  fileno(f), 0);
339  ASSERT(addr != MAP_FAILED);
340  OS::Free(addr, size);
341  fclose(f);
342 }
343 
344 
345 int OS::StackWalk(Vector<OS::StackFrame> frames) {
346  // backtrace is a glibc extension.
347  int frames_size = frames.length();
348  ScopedVector<void*> addresses(frames_size);
349 
350  int frames_count = backtrace(addresses.start(), frames_size);
351 
352  char** symbols = backtrace_symbols(addresses.start(), frames_count);
353  if (symbols == NULL) {
354  return kStackWalkError;
355  }
356 
357  for (int i = 0; i < frames_count; i++) {
358  frames[i].address = addresses[i];
359  // Format a text representation of the frame based on the information
360  // available.
362  "%s",
363  symbols[i]);
364  // Make sure line termination is in place.
365  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
366  }
367 
368  free(symbols);
369 
370  return frames_count;
371 }
372 
373 
374 // Constants used for mmap.
375 static const int kMmapFd = -1;
376 static const int kMmapFdOffset = 0;
377 
378 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
379 
380 VirtualMemory::VirtualMemory(size_t size) {
381  address_ = ReserveRegion(size);
382  size_ = size;
383 }
384 
385 
386 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
387  : address_(NULL), size_(0) {
388  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
389  size_t request_size = RoundUp(size + alignment,
390  static_cast<intptr_t>(OS::AllocateAlignment()));
391  void* reservation = mmap(GetRandomMmapAddr(),
392  request_size,
393  PROT_NONE,
394  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
395  kMmapFd,
396  kMmapFdOffset);
397  if (reservation == MAP_FAILED) return;
398 
399  Address base = static_cast<Address>(reservation);
400  Address aligned_base = RoundUp(base, alignment);
401  ASSERT_LE(base, aligned_base);
402 
403  // Unmap extra memory reserved before and after the desired block.
404  if (aligned_base != base) {
405  size_t prefix_size = static_cast<size_t>(aligned_base - base);
406  OS::Free(base, prefix_size);
407  request_size -= prefix_size;
408  }
409 
410  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
411  ASSERT_LE(aligned_size, request_size);
412 
413  if (aligned_size != request_size) {
414  size_t suffix_size = request_size - aligned_size;
415  OS::Free(aligned_base + aligned_size, suffix_size);
416  request_size -= suffix_size;
417  }
418 
419  ASSERT(aligned_size == request_size);
420 
421  address_ = static_cast<void*>(aligned_base);
422  size_ = aligned_size;
423 }
424 
425 
427  if (IsReserved()) {
428  bool result = ReleaseRegion(address(), size());
429  ASSERT(result);
430  USE(result);
431  }
432 }
433 
434 
436  return address_ != NULL;
437 }
438 
439 
440 void VirtualMemory::Reset() {
441  address_ = NULL;
442  size_ = 0;
443 }
444 
445 
446 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
447  return CommitRegion(address, size, is_executable);
448 }
449 
450 
451 bool VirtualMemory::Uncommit(void* address, size_t size) {
452  return UncommitRegion(address, size);
453 }
454 
455 
456 bool VirtualMemory::Guard(void* address) {
457  OS::Guard(address, OS::CommitPageSize());
458  return true;
459 }
460 
461 
462 void* VirtualMemory::ReserveRegion(size_t size) {
463  void* result = mmap(GetRandomMmapAddr(),
464  size,
465  PROT_NONE,
466  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
467  kMmapFd,
468  kMmapFdOffset);
469 
470  if (result == MAP_FAILED) return NULL;
471 
472  return result;
473 }
474 
475 
476 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
477  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
478  if (MAP_FAILED == mmap(base,
479  size,
480  prot,
481  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
482  kMmapFd,
483  kMmapFdOffset)) {
484  return false;
485  }
486 
487  UpdateAllocatedSpaceLimits(base, size);
488  return true;
489 }
490 
491 
492 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
493  return mmap(base,
494  size,
495  PROT_NONE,
496  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
497  kMmapFd,
498  kMmapFdOffset) != MAP_FAILED;
499 }
500 
501 
502 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
503  return munmap(base, size) == 0;
504 }
505 
506 
507 class Thread::PlatformData : public Malloced {
508  public:
509  PlatformData() : thread_(kNoThread) {}
510 
511  pthread_t thread_; // Thread handle for pthread.
512 };
513 
514 Thread::Thread(const Options& options)
515  : data_(new PlatformData()),
516  stack_size_(options.stack_size()) {
517  set_name(options.name());
518 }
519 
520 
521 Thread::~Thread() {
522  delete data_;
523 }
524 
525 
526 static void* ThreadEntry(void* arg) {
527  Thread* thread = reinterpret_cast<Thread*>(arg);
528  // This is also initialized by the first argument to pthread_create() but we
529  // don't know which thread will run first (the original thread or the new
530  // one) so we initialize it here too.
531 #ifdef PR_SET_NAME
532  prctl(PR_SET_NAME,
533  reinterpret_cast<unsigned long>(thread->name()), // NOLINT
534  0, 0, 0);
535 #endif
536  thread->data()->thread_ = pthread_self();
537  ASSERT(thread->data()->thread_ != kNoThread);
538  thread->Run();
539  return NULL;
540 }
541 
542 
543 void Thread::set_name(const char* name) {
544  strncpy(name_, name, sizeof(name_));
545  name_[sizeof(name_) - 1] = '\0';
546 }
547 
548 
549 void Thread::Start() {
550  pthread_attr_t* attr_ptr = NULL;
551  pthread_attr_t attr;
552  if (stack_size_ > 0) {
553  pthread_attr_init(&attr);
554  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
555  attr_ptr = &attr;
556  }
557  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
558  ASSERT(data_->thread_ != kNoThread);
559 }
560 
561 
562 void Thread::Join() {
563  pthread_join(data_->thread_, NULL);
564 }
565 
566 
568  pthread_key_t key;
569  int result = pthread_key_create(&key, NULL);
570  USE(result);
571  ASSERT(result == 0);
572  return static_cast<LocalStorageKey>(key);
573 }
574 
575 
577  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
578  int result = pthread_key_delete(pthread_key);
579  USE(result);
580  ASSERT(result == 0);
581 }
582 
583 
585  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
586  return pthread_getspecific(pthread_key);
587 }
588 
589 
590 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
591  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
592  pthread_setspecific(pthread_key, value);
593 }
594 
595 
596 void Thread::YieldCPU() {
597  sched_yield();
598 }
599 
600 
601 class OpenBSDMutex : public Mutex {
602  public:
604  pthread_mutexattr_t attrs;
605  int result = pthread_mutexattr_init(&attrs);
606  ASSERT(result == 0);
607  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
608  ASSERT(result == 0);
609  result = pthread_mutex_init(&mutex_, &attrs);
610  ASSERT(result == 0);
611  USE(result);
612  }
613 
614  virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
615 
616  virtual int Lock() {
617  int result = pthread_mutex_lock(&mutex_);
618  return result;
619  }
620 
621  virtual int Unlock() {
622  int result = pthread_mutex_unlock(&mutex_);
623  return result;
624  }
625 
626  virtual bool TryLock() {
627  int result = pthread_mutex_trylock(&mutex_);
628  // Return false if the lock is busy and locking failed.
629  if (result == EBUSY) {
630  return false;
631  }
632  ASSERT(result == 0); // Verify no other errors.
633  return true;
634  }
635 
636  private:
637  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
638 };
639 
640 
641 Mutex* OS::CreateMutex() {
642  return new OpenBSDMutex();
643 }
644 
645 
646 class OpenBSDSemaphore : public Semaphore {
647  public:
648  explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
649  virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
650 
651  virtual void Wait();
652  virtual bool Wait(int timeout);
653  virtual void Signal() { sem_post(&sem_); }
654  private:
655  sem_t sem_;
656 };
657 
658 
660  while (true) {
661  int result = sem_wait(&sem_);
662  if (result == 0) return; // Successfully got semaphore.
663  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
664  }
665 }
666 
667 
668 #ifndef TIMEVAL_TO_TIMESPEC
669 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
670  (ts)->tv_sec = (tv)->tv_sec; \
671  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
672 } while (false)
673 #endif
674 
675 
676 bool OpenBSDSemaphore::Wait(int timeout) {
677  const long kOneSecondMicros = 1000000; // NOLINT
678 
679  // Split timeout into second and nanosecond parts.
680  struct timeval delta;
681  delta.tv_usec = timeout % kOneSecondMicros;
682  delta.tv_sec = timeout / kOneSecondMicros;
683 
684  struct timeval current_time;
685  // Get the current time.
686  if (gettimeofday(&current_time, NULL) == -1) {
687  return false;
688  }
689 
690  // Calculate time for end of timeout.
691  struct timeval end_time;
692  timeradd(&current_time, &delta, &end_time);
693 
694  struct timespec ts;
695  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
696 
697  int to = ts.tv_sec;
698 
699  while (true) {
700  int result = sem_trywait(&sem_);
701  if (result == 0) return true; // Successfully got semaphore.
702  if (!to) return false; // Timeout.
703  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
704  usleep(ts.tv_nsec / 1000);
705  to--;
706  }
707 }
708 
709 Semaphore* OS::CreateSemaphore(int count) {
710  return new OpenBSDSemaphore(count);
711 }
712 
713 
714 static pthread_t GetThreadID() {
715  return pthread_self();
716 }
717 
718 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
719  USE(info);
720  if (signal != SIGPROF) return;
721  Isolate* isolate = Isolate::UncheckedCurrent();
722  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
723  // We require a fully initialized and entered isolate.
724  return;
725  }
726  if (v8::Locker::IsActive() &&
727  !isolate->thread_manager()->IsLockedByCurrentThread()) {
728  return;
729  }
730 
731  Sampler* sampler = isolate->logger()->sampler();
732  if (sampler == NULL || !sampler->IsActive()) return;
733 
734  TickSample sample_obj;
735  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
736  if (sample == NULL) sample = &sample_obj;
737 
738  // Extracting the sample from the context is extremely machine dependent.
739  sample->state = isolate->current_vm_state();
740  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
741 #ifdef __NetBSD__
742  mcontext_t& mcontext = ucontext->uc_mcontext;
743 #if V8_HOST_ARCH_IA32
744  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
745  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
746  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
747 #elif V8_HOST_ARCH_X64
748  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
749  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
750  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
751 #endif // V8_HOST_ARCH
752 #else // OpenBSD
753 #if V8_HOST_ARCH_IA32
754  sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
755  sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
756  sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
757 #elif V8_HOST_ARCH_X64
758  sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
759  sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
760  sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
761 #endif // V8_HOST_ARCH
762 #endif // __NetBSD__
763  sampler->SampleStack(sample);
764  sampler->Tick(sample);
765 }
766 
767 
768 class Sampler::PlatformData : public Malloced {
769  public:
770  PlatformData() : vm_tid_(GetThreadID()) {}
771 
772  pthread_t vm_tid() const { return vm_tid_; }
773 
774  private:
775  pthread_t vm_tid_;
776 };
777 
778 
779 class SignalSender : public Thread {
780  public:
782  HALF_INTERVAL,
783  FULL_INTERVAL
784  };
785 
786  static const int kSignalSenderStackSize = 64 * KB;
787 
788  explicit SignalSender(int interval)
789  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
790  vm_tgid_(getpid()),
791  interval_(interval) {}
792 
793  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
794  static void TearDown() { delete mutex_; }
795 
796  static void InstallSignalHandler() {
797  struct sigaction sa;
798  sa.sa_sigaction = ProfilerSignalHandler;
799  sigemptyset(&sa.sa_mask);
800  sa.sa_flags = SA_RESTART | SA_SIGINFO;
801  signal_handler_installed_ =
802  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
803  }
804 
805  static void RestoreSignalHandler() {
806  if (signal_handler_installed_) {
807  sigaction(SIGPROF, &old_signal_handler_, 0);
808  signal_handler_installed_ = false;
809  }
810  }
811 
812  static void AddActiveSampler(Sampler* sampler) {
813  ScopedLock lock(mutex_);
815  if (instance_ == NULL) {
816  // Start a thread that will send SIGPROF signal to VM threads,
817  // when CPU profiling will be enabled.
818  instance_ = new SignalSender(sampler->interval());
819  instance_->Start();
820  } else {
821  ASSERT(instance_->interval_ == sampler->interval());
822  }
823  }
824 
825  static void RemoveActiveSampler(Sampler* sampler) {
826  ScopedLock lock(mutex_);
830  delete instance_;
831  instance_ = NULL;
832  RestoreSignalHandler();
833  }
834  }
835 
836  // Implement Thread::Run().
837  virtual void Run() {
839  while ((state = SamplerRegistry::GetState()) !=
841  bool cpu_profiling_enabled =
843  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
844  if (cpu_profiling_enabled && !signal_handler_installed_) {
845  InstallSignalHandler();
846  } else if (!cpu_profiling_enabled && signal_handler_installed_) {
847  RestoreSignalHandler();
848  }
849  // When CPU profiling is enabled both JavaScript and C++ code is
850  // profiled. We must not suspend.
851  if (!cpu_profiling_enabled) {
852  if (rate_limiter_.SuspendIfNecessary()) continue;
853  }
854  if (cpu_profiling_enabled && runtime_profiler_enabled) {
855  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
856  return;
857  }
858  Sleep(HALF_INTERVAL);
859  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
860  return;
861  }
862  Sleep(HALF_INTERVAL);
863  } else {
864  if (cpu_profiling_enabled) {
865  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
866  this)) {
867  return;
868  }
869  }
870  if (runtime_profiler_enabled) {
871  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
872  NULL)) {
873  return;
874  }
875  }
876  Sleep(FULL_INTERVAL);
877  }
878  }
879  }
880 
881  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
882  if (!sampler->IsProfiling()) return;
883  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
884  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
885  }
886 
887  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
888  if (!sampler->isolate()->IsInitialized()) return;
889  sampler->isolate()->runtime_profiler()->NotifyTick();
890  }
891 
892  void SendProfilingSignal(pthread_t tid) {
893  if (!signal_handler_installed_) return;
894  pthread_kill(tid, SIGPROF);
895  }
896 
897  void Sleep(SleepInterval full_or_half) {
898  // Convert ms to us and subtract 100 us to compensate delays
899  // occuring during signal delivery.
900  useconds_t interval = interval_ * 1000 - 100;
901  if (full_or_half == HALF_INTERVAL) interval /= 2;
902  int result = usleep(interval);
903 #ifdef DEBUG
904  if (result != 0 && errno != EINTR) {
905  fprintf(stderr,
906  "SignalSender usleep error; interval = %u, errno = %d\n",
907  interval,
908  errno);
909  ASSERT(result == 0 || errno == EINTR);
910  }
911 #endif
912  USE(result);
913  }
914 
915  const int vm_tgid_;
916  const int interval_;
917  RuntimeProfilerRateLimiter rate_limiter_;
918 
919  // Protects the process wide state below.
920  static Mutex* mutex_;
921  static SignalSender* instance_;
922  static bool signal_handler_installed_;
923  static struct sigaction old_signal_handler_;
924 
925  private:
927 };
928 
929 
930 Mutex* SignalSender::mutex_ = NULL;
931 SignalSender* SignalSender::instance_ = NULL;
932 struct sigaction SignalSender::old_signal_handler_;
934 
935 
936 void OS::SetUp() {
937  // Seed the random number generator. We preserve microsecond resolution.
938  uint64_t seed = Ticks() ^ (getpid() << 16);
939  srandom(static_cast<unsigned int>(seed));
940  limit_mutex = CreateMutex();
942 }
943 
944 
945 void OS::TearDown() {
947  delete limit_mutex;
948 }
949 
950 
951 Sampler::Sampler(Isolate* isolate, int interval)
952  : isolate_(isolate),
953  interval_(interval),
954  profiling_(false),
955  active_(false),
956  samples_taken_(0) {
957  data_ = new PlatformData;
958 }
959 
960 
962  ASSERT(!IsActive());
963  delete data_;
964 }
965 
966 
967 void Sampler::Start() {
968  ASSERT(!IsActive());
969  SetActive(true);
971 }
972 
973 
974 void Sampler::Stop() {
975  ASSERT(IsActive());
977  SetActive(false);
978 }
979 
980 
981 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
static void * GetThreadLocal(LocalStorageKey key)
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:189
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:772
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:207
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:761
Isolate * isolate()
Definition: platform.h:763
static void SignalCodeMovingGC()
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:530
static const int kStackWalkError
Definition: platform.h:245
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:247
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:739
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1868
unsigned int seed
Definition: test-strings.cc:18
#define timeradd(a, b, result)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:756
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
intptr_t AtomicWord
Definition: atomicops.h:75
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static void DebugBreak()
int backtrace(void **, int) __attribute__((weak_import))
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1435
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static uint32_t RandomPrivate(Isolate *isolate)
Definition: v8.cc:181
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1847
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
static void SetUp()
void USE(T)
Definition: globals.h:289
static int ActivationFrameAlignment()
static size_t AllocateAlignment()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1856
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1830
const Register fp
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)