v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-macos.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for MacOS goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <dlfcn.h>
32 #include <unistd.h>
33 #include <sys/mman.h>
34 #include <mach/mach_init.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/getsect.h>
37 
38 #include <AvailabilityMacros.h>
39 
40 #include <pthread.h>
41 #include <semaphore.h>
42 #include <signal.h>
43 #include <libkern/OSAtomic.h>
44 #include <mach/mach.h>
45 #include <mach/semaphore.h>
46 #include <mach/task.h>
47 #include <mach/vm_statistics.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #include <sys/types.h>
51 #include <sys/sysctl.h>
52 #include <stdarg.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <errno.h>
56 
57 #undef MAP_TYPE
58 
59 #include "v8.h"
60 
61 #include "platform-posix.h"
62 #include "platform.h"
63 #include "vm-state-inl.h"
64 
65 // Manually define these here as weak imports, rather than including execinfo.h.
66 // This lets us launch on 10.4 which does not have these calls.
67 extern "C" {
68  extern int backtrace(void**, int) __attribute__((weak_import));
69  extern char** backtrace_symbols(void* const*, int)
70  __attribute__((weak_import));
71  extern void backtrace_symbols_fd(void* const*, int, int)
72  __attribute__((weak_import));
73 }
74 
75 
76 namespace v8 {
77 namespace internal {
78 
79 // 0 is never a valid thread id on MacOSX since a pthread_t is
80 // a pointer.
81 static const pthread_t kNoThread = (pthread_t) 0;
82 
83 
84 double ceiling(double x) {
85  // Correct Mac OS X Leopard 'ceil' behavior.
86  if (-1.0 < x && x < 0.0) {
87  return -0.0;
88  } else {
89  return ceil(x);
90  }
91 }
92 
93 
94 static Mutex* limit_mutex = NULL;
95 
96 
97 void OS::PostSetUp() {
99 }
100 
101 
102 // We keep the lowest and highest addresses mapped as a quick way of
103 // determining that pointers are outside the heap (used mostly in assertions
104 // and verification). The estimate is conservative, i.e., not all addresses in
105 // 'allocated' space are actually allocated to our heap. The range is
106 // [lowest, highest), inclusive on the low and and exclusive on the high end.
107 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
108 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
109 
110 
111 static void UpdateAllocatedSpaceLimits(void* address, int size) {
112  ASSERT(limit_mutex != NULL);
113  ScopedLock lock(limit_mutex);
114 
115  lowest_ever_allocated = Min(lowest_ever_allocated, address);
116  highest_ever_allocated =
117  Max(highest_ever_allocated,
118  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
119 }
120 
121 
122 bool OS::IsOutsideAllocatedSpace(void* address) {
123  return address < lowest_ever_allocated || address >= highest_ever_allocated;
124 }
125 
126 
127 size_t OS::AllocateAlignment() {
128  return getpagesize();
129 }
130 
131 
132 // Constants used for mmap.
133 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
134 // defined tag 255 This helps identify V8-allocated regions in memory analysis
135 // tools like vmmap(1).
136 static const int kMmapFd = VM_MAKE_TAG(255);
137 static const off_t kMmapFdOffset = 0;
138 
139 
140 void* OS::Allocate(const size_t requested,
141  size_t* allocated,
142  bool is_executable) {
143  const size_t msize = RoundUp(requested, getpagesize());
144  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
145  void* mbase = mmap(OS::GetRandomMmapAddr(),
146  msize,
147  prot,
148  MAP_PRIVATE | MAP_ANON,
149  kMmapFd,
150  kMmapFdOffset);
151  if (mbase == MAP_FAILED) {
152  LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
153  return NULL;
154  }
155  *allocated = msize;
156  UpdateAllocatedSpaceLimits(mbase, msize);
157  return mbase;
158 }
159 
160 
161 void OS::Free(void* address, const size_t size) {
162  // TODO(1240712): munmap has a return value which is ignored here.
163  int result = munmap(address, size);
164  USE(result);
165  ASSERT(result == 0);
166 }
167 
168 
169 void OS::Sleep(int milliseconds) {
170  usleep(1000 * milliseconds);
171 }
172 
173 
174 void OS::Abort() {
175  // Redirect to std abort to signal abnormal program termination
176  abort();
177 }
178 
179 
180 void OS::DebugBreak() {
181  asm("int $3");
182 }
183 
184 
185 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
186  public:
187  PosixMemoryMappedFile(FILE* file, void* memory, int size)
188  : file_(file), memory_(memory), size_(size) { }
189  virtual ~PosixMemoryMappedFile();
190  virtual void* memory() { return memory_; }
191  virtual int size() { return size_; }
192  private:
193  FILE* file_;
194  void* memory_;
195  int size_;
196 };
197 
198 
199 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
200  FILE* file = fopen(name, "r+");
201  if (file == NULL) return NULL;
202 
203  fseek(file, 0, SEEK_END);
204  int size = ftell(file);
205 
206  void* memory =
207  mmap(OS::GetRandomMmapAddr(),
208  size,
209  PROT_READ | PROT_WRITE,
210  MAP_SHARED,
211  fileno(file),
212  0);
213  return new PosixMemoryMappedFile(file, memory, size);
214 }
215 
216 
217 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
218  void* initial) {
219  FILE* file = fopen(name, "w+");
220  if (file == NULL) return NULL;
221  int result = fwrite(initial, size, 1, file);
222  if (result < 1) {
223  fclose(file);
224  return NULL;
225  }
226  void* memory =
227  mmap(OS::GetRandomMmapAddr(),
228  size,
229  PROT_READ | PROT_WRITE,
230  MAP_SHARED,
231  fileno(file),
232  0);
233  return new PosixMemoryMappedFile(file, memory, size);
234 }
235 
236 
237 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
238  if (memory_) OS::Free(memory_, size_);
239  fclose(file_);
240 }
241 
242 
243 void OS::LogSharedLibraryAddresses() {
244  unsigned int images_count = _dyld_image_count();
245  for (unsigned int i = 0; i < images_count; ++i) {
246  const mach_header* header = _dyld_get_image_header(i);
247  if (header == NULL) continue;
248 #if V8_HOST_ARCH_X64
249  uint64_t size;
250  char* code_ptr = getsectdatafromheader_64(
251  reinterpret_cast<const mach_header_64*>(header),
252  SEG_TEXT,
253  SECT_TEXT,
254  &size);
255 #else
256  unsigned int size;
257  char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
258 #endif
259  if (code_ptr == NULL) continue;
260  const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
261  const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
262  LOG(Isolate::Current(),
263  SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
264  }
265 }
266 
267 
268 void OS::SignalCodeMovingGC() {
269 }
270 
271 
272 uint64_t OS::CpuFeaturesImpliedByPlatform() {
273  // MacOSX requires all these to install so we can assume they are present.
274  // These constants are defined by the CPUid instructions.
275  const uint64_t one = 1;
276  return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
277 }
278 
279 
280 int OS::ActivationFrameAlignment() {
281  // OS X activation frames must be 16 byte-aligned; see "Mac OS X ABI
282  // Function Call Guide".
283  return 16;
284 }
285 
286 
287 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
288  OSMemoryBarrier();
289  *ptr = value;
290 }
291 
292 
293 const char* OS::LocalTimezone(double time) {
294  if (isnan(time)) return "";
295  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
296  struct tm* t = localtime(&tv);
297  if (NULL == t) return "";
298  return t->tm_zone;
299 }
300 
301 
302 double OS::LocalTimeOffset() {
303  time_t tv = time(NULL);
304  struct tm* t = localtime(&tv);
305  // tm_gmtoff includes any daylight savings offset, so subtract it.
306  return static_cast<double>(t->tm_gmtoff * msPerSecond -
307  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
308 }
309 
310 
311 int OS::StackWalk(Vector<StackFrame> frames) {
312  // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
313  if (backtrace == NULL)
314  return 0;
315 
316  int frames_size = frames.length();
317  ScopedVector<void*> addresses(frames_size);
318 
319  int frames_count = backtrace(addresses.start(), frames_size);
320 
321  char** symbols = backtrace_symbols(addresses.start(), frames_count);
322  if (symbols == NULL) {
323  return kStackWalkError;
324  }
325 
326  for (int i = 0; i < frames_count; i++) {
327  frames[i].address = addresses[i];
328  // Format a text representation of the frame based on the information
329  // available.
330  SNPrintF(MutableCStrVector(frames[i].text,
331  kStackWalkMaxTextLen),
332  "%s",
333  symbols[i]);
334  // Make sure line termination is in place.
335  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
336  }
337 
338  free(symbols);
339 
340  return frames_count;
341 }
342 
343 
344 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
345 
346 
347 VirtualMemory::VirtualMemory(size_t size)
348  : address_(ReserveRegion(size)), size_(size) { }
349 
350 
351 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
352  : address_(NULL), size_(0) {
353  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
354  size_t request_size = RoundUp(size + alignment,
355  static_cast<intptr_t>(OS::AllocateAlignment()));
356  void* reservation = mmap(OS::GetRandomMmapAddr(),
357  request_size,
358  PROT_NONE,
359  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
360  kMmapFd,
361  kMmapFdOffset);
362  if (reservation == MAP_FAILED) return;
363 
364  Address base = static_cast<Address>(reservation);
365  Address aligned_base = RoundUp(base, alignment);
366  ASSERT_LE(base, aligned_base);
367 
368  // Unmap extra memory reserved before and after the desired block.
369  if (aligned_base != base) {
370  size_t prefix_size = static_cast<size_t>(aligned_base - base);
371  OS::Free(base, prefix_size);
372  request_size -= prefix_size;
373  }
374 
375  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
376  ASSERT_LE(aligned_size, request_size);
377 
378  if (aligned_size != request_size) {
379  size_t suffix_size = request_size - aligned_size;
380  OS::Free(aligned_base + aligned_size, suffix_size);
381  request_size -= suffix_size;
382  }
383 
384  ASSERT(aligned_size == request_size);
385 
386  address_ = static_cast<void*>(aligned_base);
387  size_ = aligned_size;
388 }
389 
390 
392  if (IsReserved()) {
393  bool result = ReleaseRegion(address(), size());
394  ASSERT(result);
395  USE(result);
396  }
397 }
398 
399 
400 void VirtualMemory::Reset() {
401  address_ = NULL;
402  size_ = 0;
403 }
404 
405 
406 void* VirtualMemory::ReserveRegion(size_t size) {
407  void* result = mmap(OS::GetRandomMmapAddr(),
408  size,
409  PROT_NONE,
410  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
411  kMmapFd,
412  kMmapFdOffset);
413 
414  if (result == MAP_FAILED) return NULL;
415 
416  return result;
417 }
418 
419 
421  return address_ != NULL;
422 }
423 
424 
425 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
426  return CommitRegion(address, size, is_executable);
427 }
428 
429 
430 bool VirtualMemory::Guard(void* address) {
431  OS::Guard(address, OS::CommitPageSize());
432  return true;
433 }
434 
435 
436 bool VirtualMemory::CommitRegion(void* address,
437  size_t size,
438  bool is_executable) {
439  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
440  if (MAP_FAILED == mmap(address,
441  size,
442  prot,
443  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
444  kMmapFd,
445  kMmapFdOffset)) {
446  return false;
447  }
448 
449  UpdateAllocatedSpaceLimits(address, size);
450  return true;
451 }
452 
453 
454 bool VirtualMemory::Uncommit(void* address, size_t size) {
455  return UncommitRegion(address, size);
456 }
457 
458 
459 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
460  return mmap(address,
461  size,
462  PROT_NONE,
463  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
464  kMmapFd,
465  kMmapFdOffset) != MAP_FAILED;
466 }
467 
468 
469 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
470  return munmap(address, size) == 0;
471 }
472 
473 
474 class Thread::PlatformData : public Malloced {
475  public:
476  PlatformData() : thread_(kNoThread) {}
477  pthread_t thread_; // Thread handle for pthread.
478 };
479 
480 
481 Thread::Thread(const Options& options)
482  : data_(new PlatformData),
483  stack_size_(options.stack_size()) {
484  set_name(options.name());
485 }
486 
487 
488 Thread::~Thread() {
489  delete data_;
490 }
491 
492 
493 static void SetThreadName(const char* name) {
494  // pthread_setname_np is only available in 10.6 or later, so test
495  // for it at runtime.
496  int (*dynamic_pthread_setname_np)(const char*);
497  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
498  dlsym(RTLD_DEFAULT, "pthread_setname_np");
499  if (!dynamic_pthread_setname_np)
500  return;
501 
502  // Mac OS X does not expose the length limit of the name, so hardcode it.
503  static const int kMaxNameLength = 63;
504  USE(kMaxNameLength);
505  ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
506  dynamic_pthread_setname_np(name);
507 }
508 
509 
510 static void* ThreadEntry(void* arg) {
511  Thread* thread = reinterpret_cast<Thread*>(arg);
512  // This is also initialized by the first argument to pthread_create() but we
513  // don't know which thread will run first (the original thread or the new
514  // one) so we initialize it here too.
515  thread->data()->thread_ = pthread_self();
516  SetThreadName(thread->name());
517  ASSERT(thread->data()->thread_ != kNoThread);
518  thread->Run();
519  return NULL;
520 }
521 
522 
523 void Thread::set_name(const char* name) {
524  strncpy(name_, name, sizeof(name_));
525  name_[sizeof(name_) - 1] = '\0';
526 }
527 
528 
529 void Thread::Start() {
530  pthread_attr_t* attr_ptr = NULL;
531  pthread_attr_t attr;
532  if (stack_size_ > 0) {
533  pthread_attr_init(&attr);
534  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
535  attr_ptr = &attr;
536  }
537  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
538  ASSERT(data_->thread_ != kNoThread);
539 }
540 
541 
542 void Thread::Join() {
543  pthread_join(data_->thread_, NULL);
544 }
545 
546 
547 #ifdef V8_FAST_TLS_SUPPORTED
548 
549 static Atomic32 tls_base_offset_initialized = 0;
550 intptr_t kMacTlsBaseOffset = 0;
551 
552 // It's safe to do the initialization more that once, but it has to be
553 // done at least once.
554 static void InitializeTlsBaseOffset() {
555  const size_t kBufferSize = 128;
556  char buffer[kBufferSize];
557  size_t buffer_size = kBufferSize;
558  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
559  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
560  V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
561  }
562  // The buffer now contains a string of the form XX.YY.ZZ, where
563  // XX is the major kernel version component.
564  // Make sure the buffer is 0-terminated.
565  buffer[kBufferSize - 1] = '\0';
566  char* period_pos = strchr(buffer, '.');
567  *period_pos = '\0';
568  int kernel_version_major =
569  static_cast<int>(strtol(buffer, NULL, 10)); // NOLINT
570  // The constants below are taken from pthreads.s from the XNU kernel
571  // sources archive at www.opensource.apple.com.
572  if (kernel_version_major < 11) {
573  // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
574  // same offsets.
575 #if defined(V8_HOST_ARCH_IA32)
576  kMacTlsBaseOffset = 0x48;
577 #else
578  kMacTlsBaseOffset = 0x60;
579 #endif
580  } else {
581  // 11.x.x (Lion) changed the offset.
582  kMacTlsBaseOffset = 0;
583  }
584 
585  Release_Store(&tls_base_offset_initialized, 1);
586 }
587 
588 static void CheckFastTls(Thread::LocalStorageKey key) {
589  void* expected = reinterpret_cast<void*>(0x1234CAFE);
590  Thread::SetThreadLocal(key, expected);
591  void* actual = Thread::GetExistingThreadLocal(key);
592  if (expected != actual) {
593  V8_Fatal(__FILE__, __LINE__,
594  "V8 failed to initialize fast TLS on current kernel");
595  }
597 }
598 
599 #endif // V8_FAST_TLS_SUPPORTED
600 
601 
603 #ifdef V8_FAST_TLS_SUPPORTED
604  bool check_fast_tls = false;
605  if (tls_base_offset_initialized == 0) {
606  check_fast_tls = true;
607  InitializeTlsBaseOffset();
608  }
609 #endif
610  pthread_key_t key;
611  int result = pthread_key_create(&key, NULL);
612  USE(result);
613  ASSERT(result == 0);
614  LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
615 #ifdef V8_FAST_TLS_SUPPORTED
616  // If we just initialized fast TLS support, make sure it works.
617  if (check_fast_tls) CheckFastTls(typed_key);
618 #endif
619  return typed_key;
620 }
621 
622 
624  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
625  int result = pthread_key_delete(pthread_key);
626  USE(result);
627  ASSERT(result == 0);
628 }
629 
630 
632  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
633  return pthread_getspecific(pthread_key);
634 }
635 
636 
637 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
638  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
639  pthread_setspecific(pthread_key, value);
640 }
641 
642 
643 void Thread::YieldCPU() {
644  sched_yield();
645 }
646 
647 
648 class MacOSMutex : public Mutex {
649  public:
651  pthread_mutexattr_t attr;
652  pthread_mutexattr_init(&attr);
653  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
654  pthread_mutex_init(&mutex_, &attr);
655  }
656 
657  virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
658 
659  virtual int Lock() { return pthread_mutex_lock(&mutex_); }
660  virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
661 
662  virtual bool TryLock() {
663  int result = pthread_mutex_trylock(&mutex_);
664  // Return false if the lock is busy and locking failed.
665  if (result == EBUSY) {
666  return false;
667  }
668  ASSERT(result == 0); // Verify no other errors.
669  return true;
670  }
671 
672  private:
673  pthread_mutex_t mutex_;
674 };
675 
676 
677 Mutex* OS::CreateMutex() {
678  return new MacOSMutex();
679 }
680 
681 
682 class MacOSSemaphore : public Semaphore {
683  public:
684  explicit MacOSSemaphore(int count) {
685  semaphore_create(mach_task_self(), &semaphore_, SYNC_POLICY_FIFO, count);
686  }
687 
689  semaphore_destroy(mach_task_self(), semaphore_);
690  }
691 
692  // The MacOS mach semaphore documentation claims it does not have spurious
693  // wakeups, the way pthreads semaphores do. So the code from the linux
694  // platform is not needed here.
695  void Wait() { semaphore_wait(semaphore_); }
696 
697  bool Wait(int timeout);
698 
699  void Signal() { semaphore_signal(semaphore_); }
700 
701  private:
702  semaphore_t semaphore_;
703 };
704 
705 
706 bool MacOSSemaphore::Wait(int timeout) {
707  mach_timespec_t ts;
708  ts.tv_sec = timeout / 1000000;
709  ts.tv_nsec = (timeout % 1000000) * 1000;
710  return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
711 }
712 
713 
714 Semaphore* OS::CreateSemaphore(int count) {
715  return new MacOSSemaphore(count);
716 }
717 
718 
719 class Sampler::PlatformData : public Malloced {
720  public:
721  PlatformData() : profiled_thread_(mach_thread_self()) {}
722 
724  // Deallocate Mach port for thread.
725  mach_port_deallocate(mach_task_self(), profiled_thread_);
726  }
727 
728  thread_act_t profiled_thread() { return profiled_thread_; }
729 
730  private:
731  // Note: for profiled_thread_ Mach primitives are used instead of PThread's
732  // because the latter doesn't provide thread manipulation primitives required.
733  // For details, consult "Mac OS X Internals" book, Section 7.3.
734  thread_act_t profiled_thread_;
735 };
736 
737 
738 class SamplerThread : public Thread {
739  public:
740  static const int kSamplerThreadStackSize = 64 * KB;
741 
742  explicit SamplerThread(int interval)
743  : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
744  interval_(interval) {}
745 
746  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
747  static void TearDown() { delete mutex_; }
748 
749  static void AddActiveSampler(Sampler* sampler) {
750  ScopedLock lock(mutex_);
752  if (instance_ == NULL) {
753  instance_ = new SamplerThread(sampler->interval());
754  instance_->Start();
755  } else {
756  ASSERT(instance_->interval_ == sampler->interval());
757  }
758  }
759 
760  static void RemoveActiveSampler(Sampler* sampler) {
761  ScopedLock lock(mutex_);
765  delete instance_;
766  instance_ = NULL;
767  }
768  }
769 
770  // Implement Thread::Run().
771  virtual void Run() {
773  while ((state = SamplerRegistry::GetState()) !=
775  bool cpu_profiling_enabled =
777  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
778  // When CPU profiling is enabled both JavaScript and C++ code is
779  // profiled. We must not suspend.
780  if (!cpu_profiling_enabled) {
781  if (rate_limiter_.SuspendIfNecessary()) continue;
782  }
783  if (cpu_profiling_enabled) {
784  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
785  return;
786  }
787  }
788  if (runtime_profiler_enabled) {
789  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
790  return;
791  }
792  }
793  OS::Sleep(interval_);
794  }
795  }
796 
797  static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
798  if (!sampler->isolate()->IsInitialized()) return;
799  if (!sampler->IsProfiling()) return;
800  SamplerThread* sampler_thread =
801  reinterpret_cast<SamplerThread*>(raw_sampler_thread);
802  sampler_thread->SampleContext(sampler);
803  }
804 
805  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
806  if (!sampler->isolate()->IsInitialized()) return;
807  sampler->isolate()->runtime_profiler()->NotifyTick();
808  }
809 
810  void SampleContext(Sampler* sampler) {
811  thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
812  TickSample sample_obj;
814  if (sample == NULL) sample = &sample_obj;
815 
816  if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
817 
818 #if V8_HOST_ARCH_X64
819  thread_state_flavor_t flavor = x86_THREAD_STATE64;
820  x86_thread_state64_t state;
821  mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
822 #if __DARWIN_UNIX03
823 #define REGISTER_FIELD(name) __r ## name
824 #else
825 #define REGISTER_FIELD(name) r ## name
826 #endif // __DARWIN_UNIX03
827 #elif V8_HOST_ARCH_IA32
828  thread_state_flavor_t flavor = i386_THREAD_STATE;
829  i386_thread_state_t state;
830  mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
831 #if __DARWIN_UNIX03
832 #define REGISTER_FIELD(name) __e ## name
833 #else
834 #define REGISTER_FIELD(name) e ## name
835 #endif // __DARWIN_UNIX03
836 #else
837 #error Unsupported Mac OS X host architecture.
838 #endif // V8_HOST_ARCH
839 
840  if (thread_get_state(profiled_thread,
841  flavor,
842  reinterpret_cast<natural_t*>(&state),
843  &count) == KERN_SUCCESS) {
844  sample->state = sampler->isolate()->current_vm_state();
845  sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
846  sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
847  sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
848  sampler->SampleStack(sample);
849  sampler->Tick(sample);
850  }
851  thread_resume(profiled_thread);
852  }
853 
854  const int interval_;
855  RuntimeProfilerRateLimiter rate_limiter_;
856 
857  // Protects the process wide state below.
858  static Mutex* mutex_;
859  static SamplerThread* instance_;
860 
861  private:
863 };
864 
865 #undef REGISTER_FIELD
866 
867 
868 Mutex* SamplerThread::mutex_ = NULL;
869 SamplerThread* SamplerThread::instance_ = NULL;
870 
871 
872 void OS::SetUp() {
873  // Seed the random number generator. We preserve microsecond resolution.
874  uint64_t seed = Ticks() ^ (getpid() << 16);
875  srandom(static_cast<unsigned int>(seed));
876  limit_mutex = CreateMutex();
878 }
879 
880 
881 void OS::TearDown() {
883  delete limit_mutex;
884 }
885 
886 
887 Sampler::Sampler(Isolate* isolate, int interval)
888  : isolate_(isolate),
889  interval_(interval),
890  profiling_(false),
891  active_(false),
892  samples_taken_(0) {
893  data_ = new PlatformData;
894 }
895 
896 
898  ASSERT(!IsActive());
899  delete data_;
900 }
901 
902 
903 void Sampler::Start() {
904  ASSERT(!IsActive());
905  SetActive(true);
907 }
908 
909 
910 void Sampler::Stop() {
911  ASSERT(IsActive());
913  SetActive(false);
914 }
915 
916 
917 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static void * GetThreadLocal(LocalStorageKey key)
static void RemoveActiveSampler(Sampler *sampler)
void backtrace_symbols_fd(void *const *, int, int) __attribute__((weak_import))
static void Free(void *address, const size_t size)
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:745
StateTag current_vm_state()
Definition: isolate.h:991
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:221
bool IsActive() const
Definition: platform.h:734
Isolate * isolate()
Definition: platform.h:736
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:529
static SamplerThread * instance_
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
static void AddActiveSampler(Sampler *sampler)
#define ASSERT(condition)
Definition: checks.h:270
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak))
int interval() const
Definition: platform.h:712
int isnan(double x)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1783
unsigned int seed
Definition: test-strings.cc:17
const Register sp
void V8_Fatal(const char *file, int line, const char *format,...)
Definition: checks.cc:38
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:811
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:729
const Register ip
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
bool Commit(void *address, size_t size, bool is_executable)
static void * GetExistingThreadLocal(LocalStorageKey key)
Definition: platform.h:487
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:321
intptr_t AtomicWord
Definition: atomicops.h:72
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static void DoCpuProfile(Sampler *sampler, void *raw_sampler_thread)
int backtrace(void **, int) __attribute__((weak_import))
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static Semaphore * CreateSemaphore(int count)
static const int kMaxThreadNameLength
Definition: platform.h:498
static bool ReleaseRegion(void *base, size_t size)
void SampleContext(Sampler *sampler)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void SetThreadLocal(LocalStorageKey key, void *value)
static State GetState()
Definition: log.cc:1762
void SampleStack(TickSample *sample)
Definition: platform.h:715
static bool UncommitRegion(void *base, size_t size)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void SetUp()
void USE(T)
Definition: globals.h:303
static size_t AllocateAlignment()
int32_t Atomic32
Definition: atomicops.h:57
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1771
Sampler(Isolate *isolate, int interval)
virtual void Tick(TickSample *sample)=0
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1745
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoRuntimeProfile(Sampler *sampler, void *ignored)