v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-freebsd.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <sys/resource.h>
36 #include <sys/types.h>
37 #include <sys/ucontext.h>
38 #include <stdlib.h>
39 
40 #include <sys/types.h> // mmap & munmap
41 #include <sys/mman.h> // mmap & munmap
42 #include <sys/stat.h> // open
43 #include <sys/fcntl.h> // open
44 #include <unistd.h> // getpagesize
45 // If you don't have execinfo.h then you need devel/libexecinfo from ports.
46 #include <execinfo.h> // backtrace, backtrace_symbols
47 #include <strings.h> // index
48 #include <errno.h>
49 #include <stdarg.h>
50 #include <limits.h>
51 
52 #undef MAP_TYPE
53 
54 #include "v8.h"
55 #include "v8threads.h"
56 
57 #include "platform-posix.h"
58 #include "platform.h"
59 #include "vm-state-inl.h"
60 
61 
62 namespace v8 {
63 namespace internal {
64 
65 // 0 is never a valid thread id on FreeBSD since tids and pids share a
66 // name space and pid 0 is used to kill the group (see man 2 kill).
67 static const pthread_t kNoThread = (pthread_t) 0;
68 
69 
70 double ceiling(double x) {
71  // Correct as on OS X
72  if (-1.0 < x && x < 0.0) {
73  return -0.0;
74  } else {
75  return ceil(x);
76  }
77 }
78 
79 
80 static Mutex* limit_mutex = NULL;
81 
82 
83 void OS::PostSetUp() {
85 }
86 
87 
88 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
89  __asm__ __volatile__("" : : : "memory");
90  *ptr = value;
91 }
92 
93 
95  return 0; // FreeBSD runs on anything.
96 }
97 
98 
100  // 16 byte alignment on FreeBSD
101  return 16;
102 }
103 
104 
105 const char* OS::LocalTimezone(double time) {
106  if (isnan(time)) return "";
107  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
108  struct tm* t = localtime(&tv);
109  if (NULL == t) return "";
110  return t->tm_zone;
111 }
112 
113 
114 double OS::LocalTimeOffset() {
115  time_t tv = time(NULL);
116  struct tm* t = localtime(&tv);
117  // tm_gmtoff includes any daylight savings offset, so subtract it.
118  return static_cast<double>(t->tm_gmtoff * msPerSecond -
119  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
120 }
121 
122 
123 // We keep the lowest and highest addresses mapped as a quick way of
124 // determining that pointers are outside the heap (used mostly in assertions
125 // and verification). The estimate is conservative, i.e., not all addresses in
126 // 'allocated' space are actually allocated to our heap. The range is
127 // [lowest, highest), inclusive on the low and and exclusive on the high end.
128 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
129 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
130 
131 
132 static void UpdateAllocatedSpaceLimits(void* address, int size) {
133  ASSERT(limit_mutex != NULL);
134  ScopedLock lock(limit_mutex);
135 
136  lowest_ever_allocated = Min(lowest_ever_allocated, address);
137  highest_ever_allocated =
138  Max(highest_ever_allocated,
139  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
140 }
141 
142 
143 bool OS::IsOutsideAllocatedSpace(void* address) {
144  return address < lowest_ever_allocated || address >= highest_ever_allocated;
145 }
146 
147 
148 size_t OS::AllocateAlignment() {
149  return getpagesize();
150 }
151 
152 
153 void* OS::Allocate(const size_t requested,
154  size_t* allocated,
155  bool executable) {
156  const size_t msize = RoundUp(requested, getpagesize());
157  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
158  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
159 
160  if (mbase == MAP_FAILED) {
161  LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
162  return NULL;
163  }
164  *allocated = msize;
165  UpdateAllocatedSpaceLimits(mbase, msize);
166  return mbase;
167 }
168 
169 
170 void OS::Free(void* buf, const size_t length) {
171  // TODO(1240712): munmap has a return value which is ignored here.
172  int result = munmap(buf, length);
173  USE(result);
174  ASSERT(result == 0);
175 }
176 
177 
178 void OS::Sleep(int milliseconds) {
179  unsigned int ms = static_cast<unsigned int>(milliseconds);
180  usleep(1000 * ms);
181 }
182 
183 
184 void OS::Abort() {
185  // Redirect to std abort to signal abnormal program termination.
186  abort();
187 }
188 
189 
190 void OS::DebugBreak() {
191 #if (defined(__arm__) || defined(__thumb__))
192 # if defined(CAN_USE_ARMV5_INSTRUCTIONS)
193  asm("bkpt 0");
194 # endif
195 #else
196  asm("int $3");
197 #endif
198 }
199 
200 
201 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
202  public:
203  PosixMemoryMappedFile(FILE* file, void* memory, int size)
204  : file_(file), memory_(memory), size_(size) { }
205  virtual ~PosixMemoryMappedFile();
206  virtual void* memory() { return memory_; }
207  virtual int size() { return size_; }
208  private:
209  FILE* file_;
210  void* memory_;
211  int size_;
212 };
213 
214 
215 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
216  FILE* file = fopen(name, "r+");
217  if (file == NULL) return NULL;
218 
219  fseek(file, 0, SEEK_END);
220  int size = ftell(file);
221 
222  void* memory =
223  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
224  return new PosixMemoryMappedFile(file, memory, size);
225 }
226 
227 
228 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
229  void* initial) {
230  FILE* file = fopen(name, "w+");
231  if (file == NULL) return NULL;
232  int result = fwrite(initial, size, 1, file);
233  if (result < 1) {
234  fclose(file);
235  return NULL;
236  }
237  void* memory =
238  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
239  return new PosixMemoryMappedFile(file, memory, size);
240 }
241 
242 
244  if (memory_) munmap(memory_, size_);
245  fclose(file_);
246 }
247 
248 
249 static unsigned StringToLong(char* buffer) {
250  return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
251 }
252 
253 
255  static const int MAP_LENGTH = 1024;
256  int fd = open("/proc/self/maps", O_RDONLY);
257  if (fd < 0) return;
258  while (true) {
259  char addr_buffer[11];
260  addr_buffer[0] = '0';
261  addr_buffer[1] = 'x';
262  addr_buffer[10] = 0;
263  int result = read(fd, addr_buffer + 2, 8);
264  if (result < 8) break;
265  unsigned start = StringToLong(addr_buffer);
266  result = read(fd, addr_buffer + 2, 1);
267  if (result < 1) break;
268  if (addr_buffer[2] != '-') break;
269  result = read(fd, addr_buffer + 2, 8);
270  if (result < 8) break;
271  unsigned end = StringToLong(addr_buffer);
272  char buffer[MAP_LENGTH];
273  int bytes_read = -1;
274  do {
275  bytes_read++;
276  if (bytes_read >= MAP_LENGTH - 1)
277  break;
278  result = read(fd, buffer + bytes_read, 1);
279  if (result < 1) break;
280  } while (buffer[bytes_read] != '\n');
281  buffer[bytes_read] = 0;
282  // Ignore mappings that are not executable.
283  if (buffer[3] != 'x') continue;
284  char* start_of_path = index(buffer, '/');
285  // There may be no filename in this line. Skip to next.
286  if (start_of_path == NULL) continue;
287  buffer[bytes_read] = 0;
288  LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
289  }
290  close(fd);
291 }
292 
293 
294 void OS::SignalCodeMovingGC() {
295 }
296 
297 
298 int OS::StackWalk(Vector<OS::StackFrame> frames) {
299  int frames_size = frames.length();
300  ScopedVector<void*> addresses(frames_size);
301 
302  int frames_count = backtrace(addresses.start(), frames_size);
303 
304  char** symbols = backtrace_symbols(addresses.start(), frames_count);
305  if (symbols == NULL) {
306  return kStackWalkError;
307  }
308 
309  for (int i = 0; i < frames_count; i++) {
310  frames[i].address = addresses[i];
311  // Format a text representation of the frame based on the information
312  // available.
314  "%s",
315  symbols[i]);
316  // Make sure line termination is in place.
317  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
318  }
319 
320  free(symbols);
321 
322  return frames_count;
323 }
324 
325 
326 // Constants used for mmap.
327 static const int kMmapFd = -1;
328 static const int kMmapFdOffset = 0;
329 
330 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
331 
332 VirtualMemory::VirtualMemory(size_t size) {
333  address_ = ReserveRegion(size);
334  size_ = size;
335 }
336 
337 
338 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
339  : address_(NULL), size_(0) {
340  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
341  size_t request_size = RoundUp(size + alignment,
342  static_cast<intptr_t>(OS::AllocateAlignment()));
343  void* reservation = mmap(OS::GetRandomMmapAddr(),
344  request_size,
345  PROT_NONE,
346  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
347  kMmapFd,
348  kMmapFdOffset);
349  if (reservation == MAP_FAILED) return;
350 
351  Address base = static_cast<Address>(reservation);
352  Address aligned_base = RoundUp(base, alignment);
353  ASSERT_LE(base, aligned_base);
354 
355  // Unmap extra memory reserved before and after the desired block.
356  if (aligned_base != base) {
357  size_t prefix_size = static_cast<size_t>(aligned_base - base);
358  OS::Free(base, prefix_size);
359  request_size -= prefix_size;
360  }
361 
362  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
363  ASSERT_LE(aligned_size, request_size);
364 
365  if (aligned_size != request_size) {
366  size_t suffix_size = request_size - aligned_size;
367  OS::Free(aligned_base + aligned_size, suffix_size);
368  request_size -= suffix_size;
369  }
370 
371  ASSERT(aligned_size == request_size);
372 
373  address_ = static_cast<void*>(aligned_base);
374  size_ = aligned_size;
375 }
376 
377 
379  if (IsReserved()) {
380  bool result = ReleaseRegion(address(), size());
381  ASSERT(result);
382  USE(result);
383  }
384 }
385 
386 
388  return address_ != NULL;
389 }
390 
391 
393  address_ = NULL;
394  size_ = 0;
395 }
396 
397 
398 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
399  return CommitRegion(address, size, is_executable);
400 }
401 
402 
403 bool VirtualMemory::Uncommit(void* address, size_t size) {
404  return UncommitRegion(address, size);
405 }
406 
407 
408 bool VirtualMemory::Guard(void* address) {
409  OS::Guard(address, OS::CommitPageSize());
410  return true;
411 }
412 
413 
414 void* VirtualMemory::ReserveRegion(size_t size) {
415  void* result = mmap(OS::GetRandomMmapAddr(),
416  size,
417  PROT_NONE,
418  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
419  kMmapFd,
420  kMmapFdOffset);
421 
422  if (result == MAP_FAILED) return NULL;
423 
424  return result;
425 }
426 
427 
428 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
429  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
430  if (MAP_FAILED == mmap(base,
431  size,
432  prot,
433  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
434  kMmapFd,
435  kMmapFdOffset)) {
436  return false;
437  }
438 
439  UpdateAllocatedSpaceLimits(base, size);
440  return true;
441 }
442 
443 
444 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
445  return mmap(base,
446  size,
447  PROT_NONE,
448  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
449  kMmapFd,
450  kMmapFdOffset) != MAP_FAILED;
451 }
452 
453 
454 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
455  return munmap(base, size) == 0;
456 }
457 
458 
459 class Thread::PlatformData : public Malloced {
460  public:
461  pthread_t thread_; // Thread handle for pthread.
462 };
463 
464 
465 Thread::Thread(const Options& options)
466  : data_(new PlatformData),
467  stack_size_(options.stack_size()) {
468  set_name(options.name());
469 }
470 
471 
472 Thread::~Thread() {
473  delete data_;
474 }
475 
476 
477 static void* ThreadEntry(void* arg) {
478  Thread* thread = reinterpret_cast<Thread*>(arg);
479  // This is also initialized by the first argument to pthread_create() but we
480  // don't know which thread will run first (the original thread or the new
481  // one) so we initialize it here too.
482  thread->data()->thread_ = pthread_self();
483  ASSERT(thread->data()->thread_ != kNoThread);
484  thread->Run();
485  return NULL;
486 }
487 
488 
489 void Thread::set_name(const char* name) {
490  strncpy(name_, name, sizeof(name_));
491  name_[sizeof(name_) - 1] = '\0';
492 }
493 
494 
495 void Thread::Start() {
496  pthread_attr_t* attr_ptr = NULL;
497  pthread_attr_t attr;
498  if (stack_size_ > 0) {
499  pthread_attr_init(&attr);
500  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
501  attr_ptr = &attr;
502  }
503  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
504  ASSERT(data_->thread_ != kNoThread);
505 }
506 
507 
508 void Thread::Join() {
509  pthread_join(data_->thread_, NULL);
510 }
511 
512 
514  pthread_key_t key;
515  int result = pthread_key_create(&key, NULL);
516  USE(result);
517  ASSERT(result == 0);
518  return static_cast<LocalStorageKey>(key);
519 }
520 
521 
522 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
523  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
524  int result = pthread_key_delete(pthread_key);
525  USE(result);
526  ASSERT(result == 0);
527 }
528 
529 
530 void* Thread::GetThreadLocal(LocalStorageKey key) {
531  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
532  return pthread_getspecific(pthread_key);
533 }
534 
535 
536 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
537  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
538  pthread_setspecific(pthread_key, value);
539 }
540 
541 
542 void Thread::YieldCPU() {
543  sched_yield();
544 }
545 
546 
547 class FreeBSDMutex : public Mutex {
548  public:
550  pthread_mutexattr_t attrs;
551  int result = pthread_mutexattr_init(&attrs);
552  ASSERT(result == 0);
553  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
554  ASSERT(result == 0);
555  result = pthread_mutex_init(&mutex_, &attrs);
556  ASSERT(result == 0);
557  USE(result);
558  }
559 
560  virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
561 
562  virtual int Lock() {
563  int result = pthread_mutex_lock(&mutex_);
564  return result;
565  }
566 
567  virtual int Unlock() {
568  int result = pthread_mutex_unlock(&mutex_);
569  return result;
570  }
571 
572  virtual bool TryLock() {
573  int result = pthread_mutex_trylock(&mutex_);
574  // Return false if the lock is busy and locking failed.
575  if (result == EBUSY) {
576  return false;
577  }
578  ASSERT(result == 0); // Verify no other errors.
579  return true;
580  }
581 
582  private:
583  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
584 };
585 
586 
587 Mutex* OS::CreateMutex() {
588  return new FreeBSDMutex();
589 }
590 
591 
592 class FreeBSDSemaphore : public Semaphore {
593  public:
594  explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
595  virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
596 
597  virtual void Wait();
598  virtual bool Wait(int timeout);
599  virtual void Signal() { sem_post(&sem_); }
600  private:
601  sem_t sem_;
602 };
603 
604 
606  while (true) {
607  int result = sem_wait(&sem_);
608  if (result == 0) return; // Successfully got semaphore.
609  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
610  }
611 }
612 
613 
614 bool FreeBSDSemaphore::Wait(int timeout) {
615  const long kOneSecondMicros = 1000000; // NOLINT
616 
617  // Split timeout into second and nanosecond parts.
618  struct timeval delta;
619  delta.tv_usec = timeout % kOneSecondMicros;
620  delta.tv_sec = timeout / kOneSecondMicros;
621 
622  struct timeval current_time;
623  // Get the current time.
624  if (gettimeofday(&current_time, NULL) == -1) {
625  return false;
626  }
627 
628  // Calculate time for end of timeout.
629  struct timeval end_time;
630  timeradd(&current_time, &delta, &end_time);
631 
632  struct timespec ts;
633  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
634  while (true) {
635  int result = sem_timedwait(&sem_, &ts);
636  if (result == 0) return true; // Successfully got semaphore.
637  if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
638  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
639  }
640 }
641 
642 
643 Semaphore* OS::CreateSemaphore(int count) {
644  return new FreeBSDSemaphore(count);
645 }
646 
647 
648 static pthread_t GetThreadID() {
649  pthread_t thread_id = pthread_self();
650  return thread_id;
651 }
652 
653 
654 class Sampler::PlatformData : public Malloced {
655  public:
656  PlatformData() : vm_tid_(GetThreadID()) {}
657 
658  pthread_t vm_tid() const { return vm_tid_; }
659 
660  private:
661  pthread_t vm_tid_;
662 };
663 
664 
665 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
666  USE(info);
667  if (signal != SIGPROF) return;
668  Isolate* isolate = Isolate::UncheckedCurrent();
669  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
670  // We require a fully initialized and entered isolate.
671  return;
672  }
673  if (v8::Locker::IsActive() &&
674  !isolate->thread_manager()->IsLockedByCurrentThread()) {
675  return;
676  }
677 
678  Sampler* sampler = isolate->logger()->sampler();
679  if (sampler == NULL || !sampler->IsActive()) return;
680 
681  TickSample sample_obj;
682  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
683  if (sample == NULL) sample = &sample_obj;
684 
685  // Extracting the sample from the context is extremely machine dependent.
686  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
687  mcontext_t& mcontext = ucontext->uc_mcontext;
688  sample->state = isolate->current_vm_state();
689 #if V8_HOST_ARCH_IA32
690  sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
691  sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
692  sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
693 #elif V8_HOST_ARCH_X64
694  sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
695  sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
696  sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
697 #elif V8_HOST_ARCH_ARM
698  sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
699  sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
700  sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
701 #endif
702  sampler->SampleStack(sample);
703  sampler->Tick(sample);
704 }
705 
706 
707 class SignalSender : public Thread {
708  public:
712  };
713 
714  static const int kSignalSenderStackSize = 64 * KB;
715 
716  explicit SignalSender(int interval)
717  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
718  interval_(interval) {}
719 
720  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
721  static void TearDown() { delete mutex_; }
722 
723  static void AddActiveSampler(Sampler* sampler) {
724  ScopedLock lock(mutex_);
726  if (instance_ == NULL) {
727  // Install a signal handler.
728  struct sigaction sa;
729  sa.sa_sigaction = ProfilerSignalHandler;
730  sigemptyset(&sa.sa_mask);
731  sa.sa_flags = SA_RESTART | SA_SIGINFO;
733  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
734 
735  // Start a thread that sends SIGPROF signal to VM threads.
736  instance_ = new SignalSender(sampler->interval());
737  instance_->Start();
738  } else {
739  ASSERT(instance_->interval_ == sampler->interval());
740  }
741  }
742 
743  static void RemoveActiveSampler(Sampler* sampler) {
744  ScopedLock lock(mutex_);
748  delete instance_;
749  instance_ = NULL;
750 
751  // Restore the old signal handler.
753  sigaction(SIGPROF, &old_signal_handler_, 0);
755  }
756  }
757  }
758 
759  // Implement Thread::Run().
760  virtual void Run() {
762  while ((state = SamplerRegistry::GetState()) !=
764  bool cpu_profiling_enabled =
766  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
767  // When CPU profiling is enabled both JavaScript and C++ code is
768  // profiled. We must not suspend.
769  if (!cpu_profiling_enabled) {
770  if (rate_limiter_.SuspendIfNecessary()) continue;
771  }
772  if (cpu_profiling_enabled && runtime_profiler_enabled) {
774  return;
775  }
778  return;
779  }
781  } else {
782  if (cpu_profiling_enabled) {
784  this)) {
785  return;
786  }
787  }
788  if (runtime_profiler_enabled) {
790  NULL)) {
791  return;
792  }
793  }
795  }
796  }
797  }
798 
799  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
800  if (!sampler->IsProfiling()) return;
801  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
802  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
803  }
804 
805  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
806  if (!sampler->isolate()->IsInitialized()) return;
807  sampler->isolate()->runtime_profiler()->NotifyTick();
808  }
809 
810  void SendProfilingSignal(pthread_t tid) {
811  if (!signal_handler_installed_) return;
812  pthread_kill(tid, SIGPROF);
813  }
814 
815  void Sleep(SleepInterval full_or_half) {
816  // Convert ms to us and subtract 100 us to compensate delays
817  // occuring during signal delivery.
818  useconds_t interval = interval_ * 1000 - 100;
819  if (full_or_half == HALF_INTERVAL) interval /= 2;
820  int result = usleep(interval);
821 #ifdef DEBUG
822  if (result != 0 && errno != EINTR) {
823  fprintf(stderr,
824  "SignalSender usleep error; interval = %u, errno = %d\n",
825  interval,
826  errno);
827  ASSERT(result == 0 || errno == EINTR);
828  }
829 #endif
830  USE(result);
831  }
832 
833  const int interval_;
834  RuntimeProfilerRateLimiter rate_limiter_;
835 
836  // Protects the process wide state below.
837  static Mutex* mutex_;
840  static struct sigaction old_signal_handler_;
841 
842  private:
844 };
845 
848 struct sigaction SignalSender::old_signal_handler_;
850 
851 
852 void OS::SetUp() {
853  // Seed the random number generator.
854  // Convert the current time to a 64-bit integer first, before converting it
855  // to an unsigned. Going directly can cause an overflow and the seed to be
856  // set to all ones. The seed will be identical for different instances that
857  // call this setup code within the same millisecond.
858  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
859  srandom(static_cast<unsigned int>(seed));
860  limit_mutex = CreateMutex();
862 }
863 
864 
865 void OS::TearDown() {
867  delete limit_mutex;
868 }
869 
870 
871 Sampler::Sampler(Isolate* isolate, int interval)
872  : isolate_(isolate),
873  interval_(interval),
874  profiling_(false),
875  active_(false),
876  samples_taken_(0) {
877  data_ = new PlatformData;
878 }
879 
880 
882  ASSERT(!IsActive());
883  delete data_;
884 }
885 
886 
887 void Sampler::Start() {
888  ASSERT(!IsActive());
889  SetActive(true);
891 }
892 
893 
894 void Sampler::Stop() {
895  ASSERT(IsActive());
897  SetActive(false);
898 }
899 
900 
901 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
static void * GetThreadLocal(LocalStorageKey key)
static void Free(void *address, const size_t size)
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:772
StateTag current_vm_state()
Definition: isolate.h:1003
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:207
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:761
Isolate * isolate()
Definition: platform.h:763
static void SignalCodeMovingGC()
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:530
Sampler * sampler()
Definition: log.cc:1764
static const int kStackWalkError
Definition: platform.h:245
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:247
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
static struct sigaction old_signal_handler_
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:739
ThreadManager * thread_manager()
Definition: isolate.h:882
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1868
unsigned int seed
Definition: test-strings.cc:18
#define timeradd(a, b, result)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:756
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
intptr_t AtomicWord
Definition: atomicops.h:75
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static void DebugBreak()
int backtrace(void **, int) __attribute__((weak_import))
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1435
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1847
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
static void SetUp()
void USE(T)
Definition: globals.h:289
static int ActivationFrameAlignment()
static const int kSignalSenderStackSize
Logger * logger()
Definition: isolate.h:828
static size_t AllocateAlignment()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1856
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
RuntimeProfilerRateLimiter rate_limiter_
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1830
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)