v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-macos.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for MacOS goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <dlfcn.h>
32 #include <unistd.h>
33 #include <sys/mman.h>
34 #include <mach/mach_init.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/getsect.h>
37 
38 #include <AvailabilityMacros.h>
39 
40 #include <pthread.h>
41 #include <semaphore.h>
42 #include <signal.h>
43 #include <libkern/OSAtomic.h>
44 #include <mach/mach.h>
45 #include <mach/semaphore.h>
46 #include <mach/task.h>
47 #include <mach/vm_statistics.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/types.h>
51 #include <sys/sysctl.h>
52 #include <stdarg.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <errno.h>
56 
57 #undef MAP_TYPE
58 
59 #include "v8.h"
60 
61 #include "platform-posix.h"
62 #include "platform.h"
63 #include "vm-state-inl.h"
64 
65 // Manually define these here as weak imports, rather than including execinfo.h.
66 // This lets us launch on 10.4 which does not have these calls.
67 extern "C" {
68  extern int backtrace(void**, int) __attribute__((weak_import));
69  extern char** backtrace_symbols(void* const*, int)
70  __attribute__((weak_import));
71  extern void backtrace_symbols_fd(void* const*, int, int)
72  __attribute__((weak_import));
73 }
74 
75 
76 namespace v8 {
77 namespace internal {
78 
79 // 0 is never a valid thread id on MacOSX since a pthread_t is
80 // a pointer.
81 static const pthread_t kNoThread = (pthread_t) 0;
82 
83 
84 double ceiling(double x) {
85  // Correct Mac OS X Leopard 'ceil' behavior.
86  if (-1.0 < x && x < 0.0) {
87  return -0.0;
88  } else {
89  return ceil(x);
90  }
91 }
92 
93 
94 static Mutex* limit_mutex = NULL;
95 
96 
97 void OS::PostSetUp() {
99 }
100 
101 
102 // We keep the lowest and highest addresses mapped as a quick way of
103 // determining that pointers are outside the heap (used mostly in assertions
104 // and verification). The estimate is conservative, i.e., not all addresses in
105 // 'allocated' space are actually allocated to our heap. The range is
106 // [lowest, highest), inclusive on the low and and exclusive on the high end.
107 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
108 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
109 
110 
111 static void UpdateAllocatedSpaceLimits(void* address, int size) {
112  ASSERT(limit_mutex != NULL);
113  ScopedLock lock(limit_mutex);
114 
115  lowest_ever_allocated = Min(lowest_ever_allocated, address);
116  highest_ever_allocated =
117  Max(highest_ever_allocated,
118  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
119 }
120 
121 
122 bool OS::IsOutsideAllocatedSpace(void* address) {
123  return address < lowest_ever_allocated || address >= highest_ever_allocated;
124 }
125 
126 
127 size_t OS::AllocateAlignment() {
128  return getpagesize();
129 }
130 
131 
132 // Constants used for mmap.
133 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
134 // defined tag 255 This helps identify V8-allocated regions in memory analysis
135 // tools like vmmap(1).
136 static const int kMmapFd = VM_MAKE_TAG(255);
137 static const off_t kMmapFdOffset = 0;
138 
139 
140 void* OS::Allocate(const size_t requested,
141  size_t* allocated,
142  bool is_executable) {
143  const size_t msize = RoundUp(requested, getpagesize());
144  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
145  void* mbase = mmap(OS::GetRandomMmapAddr(),
146  msize,
147  prot,
148  MAP_PRIVATE | MAP_ANON,
149  kMmapFd,
150  kMmapFdOffset);
151  if (mbase == MAP_FAILED) {
152  LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
153  return NULL;
154  }
155  *allocated = msize;
156  UpdateAllocatedSpaceLimits(mbase, msize);
157  return mbase;
158 }
159 
160 
161 void OS::Free(void* address, const size_t size) {
162  // TODO(1240712): munmap has a return value which is ignored here.
163  int result = munmap(address, size);
164  USE(result);
165  ASSERT(result == 0);
166 }
167 
168 
169 void OS::Sleep(int milliseconds) {
170  usleep(1000 * milliseconds);
171 }
172 
173 
174 void OS::Abort() {
175  // Redirect to std abort to signal abnormal program termination
176  abort();
177 }
178 
179 
180 void OS::DebugBreak() {
181  asm("int $3");
182 }
183 
184 
185 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
186  public:
187  PosixMemoryMappedFile(FILE* file, void* memory, int size)
188  : file_(file), memory_(memory), size_(size) { }
189  virtual ~PosixMemoryMappedFile();
190  virtual void* memory() { return memory_; }
191  virtual int size() { return size_; }
192  private:
193  FILE* file_;
194  void* memory_;
195  int size_;
196 };
197 
198 
199 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
200  FILE* file = fopen(name, "r+");
201  if (file == NULL) return NULL;
202 
203  fseek(file, 0, SEEK_END);
204  int size = ftell(file);
205 
206  void* memory =
207  mmap(OS::GetRandomMmapAddr(),
208  size,
209  PROT_READ | PROT_WRITE,
210  MAP_SHARED,
211  fileno(file),
212  0);
213  return new PosixMemoryMappedFile(file, memory, size);
214 }
215 
216 
217 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
218  void* initial) {
219  FILE* file = fopen(name, "w+");
220  if (file == NULL) return NULL;
221  int result = fwrite(initial, size, 1, file);
222  if (result < 1) {
223  fclose(file);
224  return NULL;
225  }
226  void* memory =
227  mmap(OS::GetRandomMmapAddr(),
228  size,
229  PROT_READ | PROT_WRITE,
230  MAP_SHARED,
231  fileno(file),
232  0);
233  return new PosixMemoryMappedFile(file, memory, size);
234 }
235 
236 
237 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
238  if (memory_) OS::Free(memory_, size_);
239  fclose(file_);
240 }
241 
242 
243 void OS::LogSharedLibraryAddresses() {
244  unsigned int images_count = _dyld_image_count();
245  for (unsigned int i = 0; i < images_count; ++i) {
246  const mach_header* header = _dyld_get_image_header(i);
247  if (header == NULL) continue;
248 #if V8_HOST_ARCH_X64
249  uint64_t size;
250  char* code_ptr = getsectdatafromheader_64(
251  reinterpret_cast<const mach_header_64*>(header),
252  SEG_TEXT,
253  SECT_TEXT,
254  &size);
255 #else
256  unsigned int size;
257  char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
258 #endif
259  if (code_ptr == NULL) continue;
260  const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
261  const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
262  LOG(Isolate::Current(),
263  SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
264  }
265 }
266 
267 
268 void OS::SignalCodeMovingGC() {
269 }
270 
271 
272 uint64_t OS::CpuFeaturesImpliedByPlatform() {
273  // MacOSX requires all these to install so we can assume they are present.
274  // These constants are defined by the CPUid instructions.
275  const uint64_t one = 1;
276  return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
277 }
278 
279 
280 int OS::ActivationFrameAlignment() {
281  // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
282  // Function Call Guide".
283  return 16;
284 }
285 
286 
287 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
288  OSMemoryBarrier();
289  *ptr = value;
290 }
291 
292 
293 const char* OS::LocalTimezone(double time) {
294  if (isnan(time)) return "";
295  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
296  struct tm* t = localtime(&tv);
297  if (NULL == t) return "";
298  return t->tm_zone;
299 }
300 
301 
302 double OS::LocalTimeOffset() {
303  time_t tv = time(NULL);
304  struct tm* t = localtime(&tv);
305  // tm_gmtoff includes any daylight savings offset, so subtract it.
306  return static_cast<double>(t->tm_gmtoff * msPerSecond -
307  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
308 }
309 
310 
311 int OS::StackWalk(Vector<StackFrame> frames) {
312  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
313  if (backtrace == NULL)
314  return 0;
315 
316  int frames_size = frames.length();
317  ScopedVector<void*> addresses(frames_size);
318 
319  int frames_count = backtrace(addresses.start(), frames_size);
320 
321  char** symbols = backtrace_symbols(addresses.start(), frames_count);
322  if (symbols == NULL) {
323  return kStackWalkError;
324  }
325 
326  for (int i = 0; i < frames_count; i++) {
327  frames[i].address = addresses[i];
328  // Format a text representation of the frame based on the information
329  // available.
330  SNPrintF(MutableCStrVector(frames[i].text,
331  kStackWalkMaxTextLen),
332  "%s",
333  symbols[i]);
334  // Make sure line termination is in place.
335  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
336  }
337 
338  free(symbols);
339 
340  return frames_count;
341 }
342 
343 
344 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
345 
346 
347 VirtualMemory::VirtualMemory(size_t size)
348  : address_(ReserveRegion(size)), size_(size) { }
349 
350 
351 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
352  : address_(NULL), size_(0) {
353  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
354  size_t request_size = RoundUp(size + alignment,
355  static_cast<intptr_t>(OS::AllocateAlignment()));
356  void* reservation = mmap(OS::GetRandomMmapAddr(),
357  request_size,
358  PROT_NONE,
359  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
360  kMmapFd,
361  kMmapFdOffset);
362  if (reservation == MAP_FAILED) return;
363 
364  Address base = static_cast<Address>(reservation);
365  Address aligned_base = RoundUp(base, alignment);
366  ASSERT_LE(base, aligned_base);
367 
368  // Unmap extra memory reserved before and after the desired block.
369  if (aligned_base != base) {
370  size_t prefix_size = static_cast<size_t>(aligned_base - base);
371  OS::Free(base, prefix_size);
372  request_size -= prefix_size;
373  }
374 
375  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
376  ASSERT_LE(aligned_size, request_size);
377 
378  if (aligned_size != request_size) {
379  size_t suffix_size = request_size - aligned_size;
380  OS::Free(aligned_base + aligned_size, suffix_size);
381  request_size -= suffix_size;
382  }
383 
384  ASSERT(aligned_size == request_size);
385 
386  address_ = static_cast<void*>(aligned_base);
387  size_ = aligned_size;
388 }
389 
390 
392  if (IsReserved()) {
393  bool result = ReleaseRegion(address(), size());
394  ASSERT(result);
395  USE(result);
396  }
397 }
398 
399 
400 void VirtualMemory::Reset() {
401  address_ = NULL;
402  size_ = 0;
403 }
404 
405 
406 void* VirtualMemory::ReserveRegion(size_t size) {
407  void* result = mmap(OS::GetRandomMmapAddr(),
408  size,
409  PROT_NONE,
410  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
411  kMmapFd,
412  kMmapFdOffset);
413 
414  if (result == MAP_FAILED) return NULL;
415 
416  return result;
417 }
418 
419 
421  return address_ != NULL;
422 }
423 
424 
425 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
426  return CommitRegion(address, size, is_executable);
427 }
428 
429 
430 bool VirtualMemory::Guard(void* address) {
431  OS::Guard(address, OS::CommitPageSize());
432  return true;
433 }
434 
435 
436 bool VirtualMemory::CommitRegion(void* address,
437  size_t size,
438  bool is_executable) {
439  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
440  if (MAP_FAILED == mmap(address,
441  size,
442  prot,
443  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
444  kMmapFd,
445  kMmapFdOffset)) {
446  return false;
447  }
448 
449  UpdateAllocatedSpaceLimits(address, size);
450  return true;
451 }
452 
453 
454 bool VirtualMemory::Uncommit(void* address, size_t size) {
455  return UncommitRegion(address, size);
456 }
457 
458 
459 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
460  return mmap(address,
461  size,
462  PROT_NONE,
463  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
464  kMmapFd,
465  kMmapFdOffset) != MAP_FAILED;
466 }
467 
468 
469 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
470  return munmap(address, size) == 0;
471 }
472 
473 
474 class Thread::PlatformData : public Malloced {
475  public:
476  PlatformData() : thread_(kNoThread) {}
477  pthread_t thread_; // Thread handle for pthread.
478 };
479 
480 
481 Thread::Thread(const Options& options)
482  : data_(new PlatformData),
483  stack_size_(options.stack_size()) {
484  set_name(options.name());
485 }
486 
487 
488 Thread::~Thread() {
489  delete data_;
490 }
491 
492 
493 static void SetThreadName(const char* name) {
494  // pthread_setname_np is only available in 10.6 or later, so test
495  // for it at runtime.
496  int (*dynamic_pthread_setname_np)(const char*);
497  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
498  dlsym(RTLD_DEFAULT, "pthread_setname_np");
499  if (!dynamic_pthread_setname_np)
500  return;
501 
502  // Mac OS X does not expose the length limit of the name, so hardcode it.
503  static const int kMaxNameLength = 63;
504  USE(kMaxNameLength);
505  ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
506  dynamic_pthread_setname_np(name);
507 }
508 
509 
510 static void* ThreadEntry(void* arg) {
511  Thread* thread = reinterpret_cast<Thread*>(arg);
512  // This is also initialized by the first argument to pthread_create() but we
513  // don't know which thread will run first (the original thread or the new
514  // one) so we initialize it here too.
515  thread->data()->thread_ = pthread_self();
516  SetThreadName(thread->name());
517  ASSERT(thread->data()->thread_ != kNoThread);
518  thread->Run();
519  return NULL;
520 }
521 
522 
523 void Thread::set_name(const char* name) {
524  strncpy(name_, name, sizeof(name_));
525  name_[sizeof(name_) - 1] = '\0';
526 }
527 
528 
529 void Thread::Start() {
530  pthread_attr_t* attr_ptr = NULL;
531  pthread_attr_t attr;
532  if (stack_size_ > 0) {
533  pthread_attr_init(&attr);
534  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
535  attr_ptr = &attr;
536  }
537  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
538  ASSERT(data_->thread_ != kNoThread);
539 }
540 
541 
542 void Thread::Join() {
543  pthread_join(data_->thread_, NULL);
544 }
545 
546 
547 #ifdef V8_FAST_TLS_SUPPORTED
548 
549 static Atomic32 tls_base_offset_initialized = 0;
550 intptr_t kMacTlsBaseOffset = 0;
551 
552 // It's safe to do the initialization more that once, but it has to be
553 // done at least once.
554 static void InitializeTlsBaseOffset() {
555  const size_t kBufferSize = 128;
556  char buffer[kBufferSize];
557  size_t buffer_size = kBufferSize;
558  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
559  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
560  V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
561  }
562  // The buffer now contains a string of the form XX.YY.ZZ, where
563  // XX is the major kernel version component.
564  // Make sure the buffer is 0-terminated.
565  buffer[kBufferSize - 1] = '\0';
566  char* period_pos = strchr(buffer, '.');
567  *period_pos = '\0';
568  int kernel_version_major =
569  static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
570  // The constants below are taken from pthreads.s from the XNU kernel
571  // sources archive at www.opensource.apple.com.
572  if (kernel_version_major < 11) {
573  // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
574  // same offsets.
575 #if defined(V8_HOST_ARCH_IA32)
576  kMacTlsBaseOffset = 0x48;
577 #else
578  kMacTlsBaseOffset = 0x60;
579 #endif
580  } else {
581  // 11.x.x (Lion) changed the offset.
582  kMacTlsBaseOffset = 0;
583  }
584 
585  Release_Store(&tls_base_offset_initialized, 1);
586 }
587 
588 static void CheckFastTls(Thread::LocalStorageKey key) {
589  void* expected = reinterpret_cast<void*>(0x1234CAFE);
590  Thread::SetThreadLocal(key, expected);
591  void* actual = Thread::GetExistingThreadLocal(key);
592  if (expected != actual) {
593  V8_Fatal(__FILE__, __LINE__,
594  "V8 failed to initialize fast TLS on current kernel");
595  }
597 }
598 
599 #endif // V8_FAST_TLS_SUPPORTED
600 
601 
603 #ifdef V8_FAST_TLS_SUPPORTED
604  bool check_fast_tls = false;
605  if (tls_base_offset_initialized == 0) {
606  check_fast_tls = true;
607  InitializeTlsBaseOffset();
608  }
609 #endif
610  pthread_key_t key;
611  int result = pthread_key_create(&key, NULL);
612  USE(result);
613  ASSERT(result == 0);
614  LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
615 #ifdef V8_FAST_TLS_SUPPORTED
616  // If we just initialized fast TLS support, make sure it works.
617  if (check_fast_tls) CheckFastTls(typed_key);
618 #endif
619  return typed_key;
620 }
621 
622 
624  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
625  int result = pthread_key_delete(pthread_key);
626  USE(result);
627  ASSERT(result == 0);
628 }
629 
630 
632  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
633  return pthread_getspecific(pthread_key);
634 }
635 
636 
637 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
638  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
639  pthread_setspecific(pthread_key, value);
640 }
641 
642 
643 void Thread::YieldCPU() {
644  sched_yield();
645 }
646 
647 
648 class MacOSMutex : public Mutex {
649  public:
651  pthread_mutexattr_t attr;
652  pthread_mutexattr_init(&attr);
653  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
654  pthread_mutex_init(&mutex_, &attr);
655  }
656 
657  virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
658 
659  virtual int Lock() { return pthread_mutex_lock(&mutex_); }
660  virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
661 
662  virtual bool TryLock() {
663  int result = pthread_mutex_trylock(&mutex_);
664  // Return false if the lock is busy and locking failed.
665  if (result == EBUSY) {
666  return false;
667  }
668  ASSERT(result == 0); // Verify no other errors.
669  return true;
670  }
671 
672  private:
673  pthread_mutex_t mutex_;
674 };
675 
676 
677 Mutex* OS::CreateMutex() {
678  return new MacOSMutex();
679 }
680 
681 
682 class MacOSSemaphore : public Semaphore {
683  public:
684  explicit MacOSSemaphore(int count) {
685  int r;
686  r = semaphore_create(mach_task_self(),
687  &semaphore_,
688  SYNC_POLICY_FIFO,
689  count);
690  ASSERT(r == KERN_SUCCESS);
691  }
692 
694  int r;
695  r = semaphore_destroy(mach_task_self(), semaphore_);
696  ASSERT(r == KERN_SUCCESS);
697  }
698 
699  void Wait() {
700  int r;
701  do {
702  r = semaphore_wait(semaphore_);
703  ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
704  } while (r == KERN_ABORTED);
705  }
706 
707  bool Wait(int timeout);
708 
709  void Signal() { semaphore_signal(semaphore_); }
710 
711  private:
712  semaphore_t semaphore_;
713 };
714 
715 
716 bool MacOSSemaphore::Wait(int timeout) {
717  mach_timespec_t ts;
718  ts.tv_sec = timeout / 1000000;
719  ts.tv_nsec = (timeout % 1000000) * 1000;
720  return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
721 }
722 
723 
724 Semaphore* OS::CreateSemaphore(int count) {
725  return new MacOSSemaphore(count);
726 }
727 
728 
729 class Sampler::PlatformData : public Malloced {
730  public:
731  PlatformData() : profiled_thread_(mach_thread_self()) {}
732 
734  // Deallocate Mach port for thread.
735  mach_port_deallocate(mach_task_self(), profiled_thread_);
736  }
737 
738  thread_act_t profiled_thread() { return profiled_thread_; }
739 
740  private:
741  // Note: for profiled_thread_ Mach primitives are used instead of PThread's
742  // because the latter doesn't provide thread manipulation primitives required.
743  // For details, consult "Mac OS X Internals" book, Section 7.3.
744  thread_act_t profiled_thread_;
745 };
746 
747 
748 class SamplerThread : public Thread {
749  public:
750  static const int kSamplerThreadStackSize = 64 * KB;
751 
752  explicit SamplerThread(int interval)
753  : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
754  interval_(interval) {}
755 
756  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
757  static void TearDown() { delete mutex_; }
758 
759  static void AddActiveSampler(Sampler* sampler) {
760  ScopedLock lock(mutex_);
762  if (instance_ == NULL) {
763  instance_ = new SamplerThread(sampler->interval());
764  instance_->Start();
765  } else {
766  ASSERT(instance_->interval_ == sampler->interval());
767  }
768  }
769 
770  static void RemoveActiveSampler(Sampler* sampler) {
771  ScopedLock lock(mutex_);
775  delete instance_;
776  instance_ = NULL;
777  }
778  }
779 
780  // Implement Thread::Run().
781  virtual void Run() {
783  while ((state = SamplerRegistry::GetState()) !=
785  bool cpu_profiling_enabled =
787  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
788  // When CPU profiling is enabled both JavaScript and C++ code is
789  // profiled. We must not suspend.
790  if (!cpu_profiling_enabled) {
791  if (rate_limiter_.SuspendIfNecessary()) continue;
792  }
793  if (cpu_profiling_enabled) {
794  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
795  return;
796  }
797  }
798  if (runtime_profiler_enabled) {
799  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
800  return;
801  }
802  }
803  OS::Sleep(interval_);
804  }
805  }
806 
807  static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
808  if (!sampler->isolate()->IsInitialized()) return;
809  if (!sampler->IsProfiling()) return;
810  SamplerThread* sampler_thread =
811  reinterpret_cast<SamplerThread*>(raw_sampler_thread);
812  sampler_thread->SampleContext(sampler);
813  }
814 
815  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
816  if (!sampler->isolate()->IsInitialized()) return;
817  sampler->isolate()->runtime_profiler()->NotifyTick();
818  }
819 
820  void SampleContext(Sampler* sampler) {
821  thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
822  TickSample sample_obj;
824  if (sample == NULL) sample = &sample_obj;
825 
826  if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
827 
828 #if V8_HOST_ARCH_X64
829  thread_state_flavor_t flavor = x86_THREAD_STATE64;
830  x86_thread_state64_t state;
831  mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
832 #if __DARWIN_UNIX03
833 #define REGISTER_FIELD(name) __r ## name
834 #else
835 #define REGISTER_FIELD(name) r ## name
836 #endif // __DARWIN_UNIX03
837 #elif V8_HOST_ARCH_IA32
838  thread_state_flavor_t flavor = i386_THREAD_STATE;
839  i386_thread_state_t state;
840  mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
841 #if __DARWIN_UNIX03
842 #define REGISTER_FIELD(name) __e ## name
843 #else
844 #define REGISTER_FIELD(name) e ## name
845 #endif // __DARWIN_UNIX03
846 #else
847 #error Unsupported Mac OS X host architecture.
848 #endif // V8_HOST_ARCH
849 
850  if (thread_get_state(profiled_thread,
851  flavor,
852  reinterpret_cast<natural_t*>(&state),
853  &count) == KERN_SUCCESS) {
854  sample->state = sampler->isolate()->current_vm_state();
855  sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
856  sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
857  sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
858  sampler->SampleStack(sample);
859  sampler->Tick(sample);
860  }
861  thread_resume(profiled_thread);
862  }
863 
864  const int interval_;
865  RuntimeProfilerRateLimiter rate_limiter_;
866 
867  // Protects the process wide state below.
868  static Mutex* mutex_;
869  static SamplerThread* instance_;
870 
871  private:
873 };
874 
875 #undef REGISTER_FIELD
876 
877 
878 Mutex* SamplerThread::mutex_ = NULL;
879 SamplerThread* SamplerThread::instance_ = NULL;
880 
881 
882 void OS::SetUp() {
883  // Seed the random number generator. We preserve microsecond resolution.
884  uint64_t seed = Ticks() ^ (getpid() << 16);
885  srandom(static_cast<unsigned int>(seed));
886  limit_mutex = CreateMutex();
888 }
889 
890 
891 void OS::TearDown() {
893  delete limit_mutex;
894 }
895 
896 
897 Sampler::Sampler(Isolate* isolate, int interval)
898  : isolate_(isolate),
899  interval_(interval),
900  profiling_(false),
901  active_(false),
902  samples_taken_(0) {
903  data_ = new PlatformData;
904 }
905 
906 
908  ASSERT(!IsActive());
909  delete data_;
910 }
911 
912 
913 void Sampler::Start() {
914  ASSERT(!IsActive());
915  SetActive(true);
917 }
918 
919 
920 void Sampler::Stop() {
921  ASSERT(IsActive());
923  SetActive(false);
924 }
925 
926 
927 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
static void * GetThreadLocal(LocalStorageKey key)
static void RemoveActiveSampler(Sampler *sampler)
void backtrace_symbols_fd(void *const *, int, int) __attribute__((weak_import))
static void Free(void *address, const size_t size)
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:772
StateTag current_vm_state()
Definition: isolate.h:1003
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:207
bool IsActive() const
Definition: platform.h:761
Isolate * isolate()
Definition: platform.h:763
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:530
static SamplerThread * instance_
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
static void AddActiveSampler(Sampler *sampler)
#define ASSERT(condition)
Definition: checks.h:270
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak))
int interval() const
Definition: platform.h:739
int isnan(double x)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1868
unsigned int seed
Definition: test-strings.cc:18
const Register sp
void V8_Fatal(const char *file, int line, const char *format,...)
Definition: checks.cc:38
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:756
const Register ip
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
bool Commit(void *address, size_t size, bool is_executable)
static void * GetExistingThreadLocal(LocalStorageKey key)
Definition: platform.h:514
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
intptr_t AtomicWord
Definition: atomicops.h:75
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static void DoCpuProfile(Sampler *sampler, void *raw_sampler_thread)
int backtrace(void **, int) __attribute__((weak_import))
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable memory(in Mbytes)") DEFINE_bool(gc_global
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static Semaphore * CreateSemaphore(int count)
static const int kMaxThreadNameLength
Definition: platform.h:525
static bool ReleaseRegion(void *base, size_t size)
void SampleContext(Sampler *sampler)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void SetThreadLocal(LocalStorageKey key, void *value)
static State GetState()
Definition: log.cc:1847
void SampleStack(TickSample *sample)
Definition: platform.h:742
static bool UncommitRegion(void *base, size_t size)
static void SetUp()
void USE(T)
Definition: globals.h:289
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static size_t AllocateAlignment()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
int32_t Atomic32
Definition: atomicops.h:57
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1856
Sampler(Isolate *isolate, int interval)
virtual void Tick(TickSample *sample)=0
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1830
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoRuntimeProfile(Sampler *sampler, void *ignored)