v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-openbsd.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
29 // comaptible parts the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <sys/resource.h>
36 #include <sys/syscall.h>
37 #include <sys/types.h>
38 #include <stdlib.h>
39 
40 #include <sys/types.h> // mmap & munmap
41 #include <sys/mman.h> // mmap & munmap
42 #include <sys/stat.h> // open
43 #include <fcntl.h> // open
44 #include <unistd.h> // sysconf
45 #include <execinfo.h> // backtrace, backtrace_symbols
46 #include <strings.h> // index
47 #include <errno.h>
48 #include <stdarg.h>
49 
50 #undef MAP_TYPE
51 
52 #include "v8.h"
53 
54 #include "platform-posix.h"
55 #include "platform.h"
56 #include "v8threads.h"
57 #include "vm-state-inl.h"
58 
59 
60 namespace v8 {
61 namespace internal {
62 
63 // 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
64 // name space and pid 0 is reserved (see man 2 kill).
65 static const pthread_t kNoThread = (pthread_t) 0;
66 
67 
68 double ceiling(double x) {
69  return ceil(x);
70 }
71 
72 
73 static Mutex* limit_mutex = NULL;
74 
75 
76 static void* GetRandomMmapAddr() {
77  Isolate* isolate = Isolate::UncheckedCurrent();
78  // Note that the current isolate isn't set up in a call path via
79  // CpuFeatures::Probe. We don't care about randomization in this case because
80  // the code page is immediately freed.
81  if (isolate != NULL) {
82 #ifdef V8_TARGET_ARCH_X64
83  uint64_t rnd1 = V8::RandomPrivate(isolate);
84  uint64_t rnd2 = V8::RandomPrivate(isolate);
85  uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
86  // Currently available CPUs have 48 bits of virtual addressing. Truncate
87  // the hint address to 46 bits to give the kernel a fighting chance of
88  // fulfilling our placement request.
89  raw_addr &= V8_UINT64_C(0x3ffffffff000);
90 #else
91  uint32_t raw_addr = V8::RandomPrivate(isolate);
92  // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
93  // variety of ASLR modes (PAE kernel, NX compat mode, etc).
94  raw_addr &= 0x3ffff000;
95  raw_addr += 0x20000000;
96 #endif
97  return reinterpret_cast<void*>(raw_addr);
98  }
99  return NULL;
100 }
101 
102 
103 void OS::PostSetUp() {
104  POSIXPostSetUp();
105 }
106 
107 
109  return 0;
110 }
111 
112 
114  // With gcc 4.4 the tree vectorization optimizer can generate code
115  // that requires 16 byte alignment such as movdqa on x86.
116  return 16;
117 }
118 
119 
120 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
121  __asm__ __volatile__("" : : : "memory");
122  // An x86 store acts as a release barrier.
123  *ptr = value;
124 }
125 
126 
127 const char* OS::LocalTimezone(double time) {
128  if (isnan(time)) return "";
129  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
130  struct tm* t = localtime(&tv);
131  if (NULL == t) return "";
132  return t->tm_zone;
133 }
134 
135 
136 double OS::LocalTimeOffset() {
137  time_t tv = time(NULL);
138  struct tm* t = localtime(&tv);
139  // tm_gmtoff includes any daylight savings offset, so subtract it.
140  return static_cast<double>(t->tm_gmtoff * msPerSecond -
141  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
142 }
143 
144 
145 // We keep the lowest and highest addresses mapped as a quick way of
146 // determining that pointers are outside the heap (used mostly in assertions
147 // and verification). The estimate is conservative, i.e., not all addresses in
148 // 'allocated' space are actually allocated to our heap. The range is
149 // [lowest, highest), inclusive on the low and and exclusive on the high end.
150 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
151 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
152 
153 
154 static void UpdateAllocatedSpaceLimits(void* address, int size) {
155  ASSERT(limit_mutex != NULL);
156  ScopedLock lock(limit_mutex);
157 
158  lowest_ever_allocated = Min(lowest_ever_allocated, address);
159  highest_ever_allocated =
160  Max(highest_ever_allocated,
161  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
162 }
163 
164 
165 bool OS::IsOutsideAllocatedSpace(void* address) {
166  return address < lowest_ever_allocated || address >= highest_ever_allocated;
167 }
168 
169 
170 size_t OS::AllocateAlignment() {
171  return sysconf(_SC_PAGESIZE);
172 }
173 
174 
175 void* OS::Allocate(const size_t requested,
176  size_t* allocated,
177  bool is_executable) {
178  const size_t msize = RoundUp(requested, AllocateAlignment());
179  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
180  void* addr = GetRandomMmapAddr();
181  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
182  if (mbase == MAP_FAILED) {
183  LOG(i::Isolate::Current(),
184  StringEvent("OS::Allocate", "mmap failed"));
185  return NULL;
186  }
187  *allocated = msize;
188  UpdateAllocatedSpaceLimits(mbase, msize);
189  return mbase;
190 }
191 
192 
193 void OS::Free(void* address, const size_t size) {
194  // TODO(1240712): munmap has a return value which is ignored here.
195  int result = munmap(address, size);
196  USE(result);
197  ASSERT(result == 0);
198 }
199 
200 
201 void OS::Sleep(int milliseconds) {
202  unsigned int ms = static_cast<unsigned int>(milliseconds);
203  usleep(1000 * ms);
204 }
205 
206 
207 void OS::Abort() {
208  // Redirect to std abort to signal abnormal program termination.
209  abort();
210 }
211 
212 
213 void OS::DebugBreak() {
214  asm("int $3");
215 }
216 
217 
218 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
219  public:
220  PosixMemoryMappedFile(FILE* file, void* memory, int size)
221  : file_(file), memory_(memory), size_(size) { }
222  virtual ~PosixMemoryMappedFile();
223  virtual void* memory() { return memory_; }
224  virtual int size() { return size_; }
225  private:
226  FILE* file_;
227  void* memory_;
228  int size_;
229 };
230 
231 
232 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
233  FILE* file = fopen(name, "r+");
234  if (file == NULL) return NULL;
235 
236  fseek(file, 0, SEEK_END);
237  int size = ftell(file);
238 
239  void* memory =
240  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
241  return new PosixMemoryMappedFile(file, memory, size);
242 }
243 
244 
245 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
246  void* initial) {
247  FILE* file = fopen(name, "w+");
248  if (file == NULL) return NULL;
249  int result = fwrite(initial, size, 1, file);
250  if (result < 1) {
251  fclose(file);
252  return NULL;
253  }
254  void* memory =
255  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
256  return new PosixMemoryMappedFile(file, memory, size);
257 }
258 
259 
261  if (memory_) OS::Free(memory_, size_);
262  fclose(file_);
263 }
264 
265 
267  // This function assumes that the layout of the file is as follows:
268  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
269  // If we encounter an unexpected situation we abort scanning further entries.
270  FILE* fp = fopen("/proc/self/maps", "r");
271  if (fp == NULL) return;
272 
273  // Allocate enough room to be able to store a full file name.
274  const int kLibNameLen = FILENAME_MAX + 1;
275  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
276 
277  i::Isolate* isolate = ISOLATE;
278  // This loop will terminate once the scanning hits an EOF.
279  while (true) {
280  uintptr_t start, end;
281  char attr_r, attr_w, attr_x, attr_p;
282  // Parse the addresses and permission bits at the beginning of the line.
283  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
284  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
285 
286  int c;
287  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
288  // Found a read-only executable entry. Skip characters until we reach
289  // the beginning of the filename or the end of the line.
290  do {
291  c = getc(fp);
292  } while ((c != EOF) && (c != '\n') && (c != '/'));
293  if (c == EOF) break; // EOF: Was unexpected, just exit.
294 
295  // Process the filename if found.
296  if (c == '/') {
297  ungetc(c, fp); // Push the '/' back into the stream to be read below.
298 
299  // Read to the end of the line. Exit if the read fails.
300  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
301 
302  // Drop the newline character read by fgets. We do not need to check
303  // for a zero-length string because we know that we at least read the
304  // '/' character.
305  lib_name[strlen(lib_name) - 1] = '\0';
306  } else {
307  // No library name found, just record the raw address range.
308  snprintf(lib_name, kLibNameLen,
309  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
310  }
311  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
312  } else {
313  // Entry not describing executable data. Skip to end of line to set up
314  // reading the next entry.
315  do {
316  c = getc(fp);
317  } while ((c != EOF) && (c != '\n'));
318  if (c == EOF) break;
319  }
320  }
321  free(lib_name);
322  fclose(fp);
323 }
324 
325 
326 static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
327 
328 
329 void OS::SignalCodeMovingGC() {
330  // Support for ll_prof.py.
331  //
332  // The Linux profiler built into the kernel logs all mmap's with
333  // PROT_EXEC so that analysis tools can properly attribute ticks. We
334  // do a mmap with a name known by ll_prof.py and immediately munmap
335  // it. This injects a GC marker into the stream of events generated
336  // by the kernel and allows us to synchronize V8 code log and the
337  // kernel log.
338  int size = sysconf(_SC_PAGESIZE);
339  FILE* f = fopen(kGCFakeMmap, "w+");
340  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
341  fileno(f), 0);
342  ASSERT(addr != MAP_FAILED);
343  OS::Free(addr, size);
344  fclose(f);
345 }
346 
347 
348 int OS::StackWalk(Vector<OS::StackFrame> frames) {
349  // backtrace is a glibc extension.
350  int frames_size = frames.length();
351  ScopedVector<void*> addresses(frames_size);
352 
353  int frames_count = backtrace(addresses.start(), frames_size);
354 
355  char** symbols = backtrace_symbols(addresses.start(), frames_count);
356  if (symbols == NULL) {
357  return kStackWalkError;
358  }
359 
360  for (int i = 0; i < frames_count; i++) {
361  frames[i].address = addresses[i];
362  // Format a text representation of the frame based on the information
363  // available.
365  "%s",
366  symbols[i]);
367  // Make sure line termination is in place.
368  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
369  }
370 
371  free(symbols);
372 
373  return frames_count;
374 }
375 
376 
377 // Constants used for mmap.
378 static const int kMmapFd = -1;
379 static const int kMmapFdOffset = 0;
380 
381 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
382 
383 VirtualMemory::VirtualMemory(size_t size) {
384  address_ = ReserveRegion(size);
385  size_ = size;
386 }
387 
388 
389 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
390  : address_(NULL), size_(0) {
391  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
392  size_t request_size = RoundUp(size + alignment,
393  static_cast<intptr_t>(OS::AllocateAlignment()));
394  void* reservation = mmap(GetRandomMmapAddr(),
395  request_size,
396  PROT_NONE,
397  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
398  kMmapFd,
399  kMmapFdOffset);
400  if (reservation == MAP_FAILED) return;
401 
402  Address base = static_cast<Address>(reservation);
403  Address aligned_base = RoundUp(base, alignment);
404  ASSERT_LE(base, aligned_base);
405 
406  // Unmap extra memory reserved before and after the desired block.
407  if (aligned_base != base) {
408  size_t prefix_size = static_cast<size_t>(aligned_base - base);
409  OS::Free(base, prefix_size);
410  request_size -= prefix_size;
411  }
412 
413  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
414  ASSERT_LE(aligned_size, request_size);
415 
416  if (aligned_size != request_size) {
417  size_t suffix_size = request_size - aligned_size;
418  OS::Free(aligned_base + aligned_size, suffix_size);
419  request_size -= suffix_size;
420  }
421 
422  ASSERT(aligned_size == request_size);
423 
424  address_ = static_cast<void*>(aligned_base);
425  size_ = aligned_size;
426 }
427 
428 
430  if (IsReserved()) {
431  bool result = ReleaseRegion(address(), size());
432  ASSERT(result);
433  USE(result);
434  }
435 }
436 
437 
439  return address_ != NULL;
440 }
441 
442 
443 void VirtualMemory::Reset() {
444  address_ = NULL;
445  size_ = 0;
446 }
447 
448 
449 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
450  return CommitRegion(address, size, is_executable);
451 }
452 
453 
454 bool VirtualMemory::Uncommit(void* address, size_t size) {
455  return UncommitRegion(address, size);
456 }
457 
458 
459 bool VirtualMemory::Guard(void* address) {
460  OS::Guard(address, OS::CommitPageSize());
461  return true;
462 }
463 
464 
465 void* VirtualMemory::ReserveRegion(size_t size) {
466  void* result = mmap(GetRandomMmapAddr(),
467  size,
468  PROT_NONE,
469  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
470  kMmapFd,
471  kMmapFdOffset);
472 
473  if (result == MAP_FAILED) return NULL;
474 
475  return result;
476 }
477 
478 
479 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
480  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
481  if (MAP_FAILED == mmap(base,
482  size,
483  prot,
484  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
485  kMmapFd,
486  kMmapFdOffset)) {
487  return false;
488  }
489 
490  UpdateAllocatedSpaceLimits(base, size);
491  return true;
492 }
493 
494 
495 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
496  return mmap(base,
497  size,
498  PROT_NONE,
499  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
500  kMmapFd,
501  kMmapFdOffset) != MAP_FAILED;
502 }
503 
504 
505 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
506  return munmap(base, size) == 0;
507 }
508 
509 
510 class Thread::PlatformData : public Malloced {
511  public:
512  PlatformData() : thread_(kNoThread) {}
513 
514  pthread_t thread_; // Thread handle for pthread.
515 };
516 
517 Thread::Thread(const Options& options)
518  : data_(new PlatformData()),
519  stack_size_(options.stack_size()) {
520  set_name(options.name());
521 }
522 
523 
524 Thread::~Thread() {
525  delete data_;
526 }
527 
528 
529 static void* ThreadEntry(void* arg) {
530  Thread* thread = reinterpret_cast<Thread*>(arg);
531  // This is also initialized by the first argument to pthread_create() but we
532  // don't know which thread will run first (the original thread or the new
533  // one) so we initialize it here too.
534 #ifdef PR_SET_NAME
535  prctl(PR_SET_NAME,
536  reinterpret_cast<unsigned long>(thread->name()), // NOLINT
537  0, 0, 0);
538 #endif
539  thread->data()->thread_ = pthread_self();
540  ASSERT(thread->data()->thread_ != kNoThread);
541  thread->Run();
542  return NULL;
543 }
544 
545 
546 void Thread::set_name(const char* name) {
547  strncpy(name_, name, sizeof(name_));
548  name_[sizeof(name_) - 1] = '\0';
549 }
550 
551 
552 void Thread::Start() {
553  pthread_attr_t* attr_ptr = NULL;
554  pthread_attr_t attr;
555  if (stack_size_ > 0) {
556  pthread_attr_init(&attr);
557  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
558  attr_ptr = &attr;
559  }
560  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
561  ASSERT(data_->thread_ != kNoThread);
562 }
563 
564 
565 void Thread::Join() {
566  pthread_join(data_->thread_, NULL);
567 }
568 
569 
571  pthread_key_t key;
572  int result = pthread_key_create(&key, NULL);
573  USE(result);
574  ASSERT(result == 0);
575  return static_cast<LocalStorageKey>(key);
576 }
577 
578 
580  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
581  int result = pthread_key_delete(pthread_key);
582  USE(result);
583  ASSERT(result == 0);
584 }
585 
586 
588  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
589  return pthread_getspecific(pthread_key);
590 }
591 
592 
593 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
594  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
595  pthread_setspecific(pthread_key, value);
596 }
597 
598 
599 void Thread::YieldCPU() {
600  sched_yield();
601 }
602 
603 
604 class OpenBSDMutex : public Mutex {
605  public:
607  pthread_mutexattr_t attrs;
608  int result = pthread_mutexattr_init(&attrs);
609  ASSERT(result == 0);
610  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
611  ASSERT(result == 0);
612  result = pthread_mutex_init(&mutex_, &attrs);
613  ASSERT(result == 0);
614  USE(result);
615  }
616 
617  virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
618 
619  virtual int Lock() {
620  int result = pthread_mutex_lock(&mutex_);
621  return result;
622  }
623 
624  virtual int Unlock() {
625  int result = pthread_mutex_unlock(&mutex_);
626  return result;
627  }
628 
629  virtual bool TryLock() {
630  int result = pthread_mutex_trylock(&mutex_);
631  // Return false if the lock is busy and locking failed.
632  if (result == EBUSY) {
633  return false;
634  }
635  ASSERT(result == 0); // Verify no other errors.
636  return true;
637  }
638 
639  private:
640  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
641 };
642 
643 
644 Mutex* OS::CreateMutex() {
645  return new OpenBSDMutex();
646 }
647 
648 
649 class OpenBSDSemaphore : public Semaphore {
650  public:
651  explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
652  virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
653 
654  virtual void Wait();
655  virtual bool Wait(int timeout);
656  virtual void Signal() { sem_post(&sem_); }
657  private:
658  sem_t sem_;
659 };
660 
661 
663  while (true) {
664  int result = sem_wait(&sem_);
665  if (result == 0) return; // Successfully got semaphore.
666  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
667  }
668 }
669 
670 
671 #ifndef TIMEVAL_TO_TIMESPEC
672 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
673  (ts)->tv_sec = (tv)->tv_sec; \
674  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
675 } while (false)
676 #endif
677 
678 
679 bool OpenBSDSemaphore::Wait(int timeout) {
680  const long kOneSecondMicros = 1000000; // NOLINT
681 
682  // Split timeout into second and nanosecond parts.
683  struct timeval delta;
684  delta.tv_usec = timeout % kOneSecondMicros;
685  delta.tv_sec = timeout / kOneSecondMicros;
686 
687  struct timeval current_time;
688  // Get the current time.
689  if (gettimeofday(&current_time, NULL) == -1) {
690  return false;
691  }
692 
693  // Calculate time for end of timeout.
694  struct timeval end_time;
695  timeradd(&current_time, &delta, &end_time);
696 
697  struct timespec ts;
698  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
699 
700  int to = ts.tv_sec;
701 
702  while (true) {
703  int result = sem_trywait(&sem_);
704  if (result == 0) return true; // Successfully got semaphore.
705  if (!to) return false; // Timeout.
706  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
707  usleep(ts.tv_nsec / 1000);
708  to--;
709  }
710 }
711 
712 Semaphore* OS::CreateSemaphore(int count) {
713  return new OpenBSDSemaphore(count);
714 }
715 
716 
717 static pthread_t GetThreadID() {
718  return pthread_self();
719 }
720 
721 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
722  USE(info);
723  if (signal != SIGPROF) return;
724  Isolate* isolate = Isolate::UncheckedCurrent();
725  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
726  // We require a fully initialized and entered isolate.
727  return;
728  }
729  if (v8::Locker::IsActive() &&
730  !isolate->thread_manager()->IsLockedByCurrentThread()) {
731  return;
732  }
733 
734  Sampler* sampler = isolate->logger()->sampler();
735  if (sampler == NULL || !sampler->IsActive()) return;
736 
737  TickSample sample_obj;
738  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
739  if (sample == NULL) sample = &sample_obj;
740 
741  // Extracting the sample from the context is extremely machine dependent.
742  sample->state = isolate->current_vm_state();
743  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
744 #ifdef __NetBSD__
745  mcontext_t& mcontext = ucontext->uc_mcontext;
746 #if V8_HOST_ARCH_IA32
747  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
748  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
749  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
750 #elif V8_HOST_ARCH_X64
751  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
752  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
753  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
754 #endif // V8_HOST_ARCH
755 #else // OpenBSD
756 #if V8_HOST_ARCH_IA32
757  sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
758  sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
759  sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
760 #elif V8_HOST_ARCH_X64
761  sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
762  sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
763  sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
764 #endif // V8_HOST_ARCH
765 #endif // __NetBSD__
766  sampler->SampleStack(sample);
767  sampler->Tick(sample);
768 }
769 
770 
771 class Sampler::PlatformData : public Malloced {
772  public:
773  PlatformData() : vm_tid_(GetThreadID()) {}
774 
775  pthread_t vm_tid() const { return vm_tid_; }
776 
777  private:
778  pthread_t vm_tid_;
779 };
780 
781 
782 class SignalSender : public Thread {
783  public:
785  HALF_INTERVAL,
786  FULL_INTERVAL
787  };
788 
789  static const int kSignalSenderStackSize = 64 * KB;
790 
791  explicit SignalSender(int interval)
792  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
793  vm_tgid_(getpid()),
794  interval_(interval) {}
795 
796  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
797  static void TearDown() { delete mutex_; }
798 
799  static void InstallSignalHandler() {
800  struct sigaction sa;
801  sa.sa_sigaction = ProfilerSignalHandler;
802  sigemptyset(&sa.sa_mask);
803  sa.sa_flags = SA_RESTART | SA_SIGINFO;
804  signal_handler_installed_ =
805  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
806  }
807 
808  static void RestoreSignalHandler() {
809  if (signal_handler_installed_) {
810  sigaction(SIGPROF, &old_signal_handler_, 0);
811  signal_handler_installed_ = false;
812  }
813  }
814 
815  static void AddActiveSampler(Sampler* sampler) {
816  ScopedLock lock(mutex_);
818  if (instance_ == NULL) {
819  // Start a thread that will send SIGPROF signal to VM threads,
820  // when CPU profiling will be enabled.
821  instance_ = new SignalSender(sampler->interval());
822  instance_->Start();
823  } else {
824  ASSERT(instance_->interval_ == sampler->interval());
825  }
826  }
827 
828  static void RemoveActiveSampler(Sampler* sampler) {
829  ScopedLock lock(mutex_);
833  delete instance_;
834  instance_ = NULL;
835  RestoreSignalHandler();
836  }
837  }
838 
839  // Implement Thread::Run().
840  virtual void Run() {
842  while ((state = SamplerRegistry::GetState()) !=
844  bool cpu_profiling_enabled =
846  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
847  if (cpu_profiling_enabled && !signal_handler_installed_) {
848  InstallSignalHandler();
849  } else if (!cpu_profiling_enabled && signal_handler_installed_) {
850  RestoreSignalHandler();
851  }
852  // When CPU profiling is enabled both JavaScript and C++ code is
853  // profiled. We must not suspend.
854  if (!cpu_profiling_enabled) {
855  if (rate_limiter_.SuspendIfNecessary()) continue;
856  }
857  if (cpu_profiling_enabled && runtime_profiler_enabled) {
858  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
859  return;
860  }
861  Sleep(HALF_INTERVAL);
862  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
863  return;
864  }
865  Sleep(HALF_INTERVAL);
866  } else {
867  if (cpu_profiling_enabled) {
868  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
869  this)) {
870  return;
871  }
872  }
873  if (runtime_profiler_enabled) {
874  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
875  NULL)) {
876  return;
877  }
878  }
879  Sleep(FULL_INTERVAL);
880  }
881  }
882  }
883 
884  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
885  if (!sampler->IsProfiling()) return;
886  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
887  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
888  }
889 
890  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
891  if (!sampler->isolate()->IsInitialized()) return;
892  sampler->isolate()->runtime_profiler()->NotifyTick();
893  }
894 
895  void SendProfilingSignal(pthread_t tid) {
896  if (!signal_handler_installed_) return;
897  pthread_kill(tid, SIGPROF);
898  }
899 
900  void Sleep(SleepInterval full_or_half) {
901  // Convert ms to us and subtract 100 us to compensate delays
902  // occuring during signal delivery.
903  useconds_t interval = interval_ * 1000 - 100;
904  if (full_or_half == HALF_INTERVAL) interval /= 2;
905  int result = usleep(interval);
906 #ifdef DEBUG
907  if (result != 0 && errno != EINTR) {
908  fprintf(stderr,
909  "SignalSender usleep error; interval = %u, errno = %d\n",
910  interval,
911  errno);
912  ASSERT(result == 0 || errno == EINTR);
913  }
914 #endif
915  USE(result);
916  }
917 
918  const int vm_tgid_;
919  const int interval_;
920  RuntimeProfilerRateLimiter rate_limiter_;
921 
922  // Protects the process wide state below.
923  static Mutex* mutex_;
924  static SignalSender* instance_;
925  static bool signal_handler_installed_;
926  static struct sigaction old_signal_handler_;
927 
928  private:
930 };
931 
932 
933 Mutex* SignalSender::mutex_ = NULL;
934 SignalSender* SignalSender::instance_ = NULL;
935 struct sigaction SignalSender::old_signal_handler_;
937 
938 
939 void OS::SetUp() {
940  // Seed the random number generator. We preserve microsecond resolution.
941  uint64_t seed = Ticks() ^ (getpid() << 16);
942  srandom(static_cast<unsigned int>(seed));
943  limit_mutex = CreateMutex();
945 }
946 
947 
948 void OS::TearDown() {
950  delete limit_mutex;
951 }
952 
953 
954 Sampler::Sampler(Isolate* isolate, int interval)
955  : isolate_(isolate),
956  interval_(interval),
957  profiling_(false),
958  active_(false),
959  samples_taken_(0) {
960  data_ = new PlatformData;
961 }
962 
963 
965  ASSERT(!IsActive());
966  delete data_;
967 }
968 
969 
970 void Sampler::Start() {
971  ASSERT(!IsActive());
972  SetActive(true);
974 }
975 
976 
977 void Sampler::Stop() {
978  ASSERT(IsActive());
980  SetActive(false);
981 }
982 
983 
984 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static void * GetThreadLocal(LocalStorageKey key)
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:204
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:745
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:221
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:734
Isolate * isolate()
Definition: platform.h:736
static void SignalCodeMovingGC()
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:529
static const int kStackWalkError
Definition: platform.h:223
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:225
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:712
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1783
unsigned int seed
Definition: test-strings.cc:17
#define timeradd(a, b, result)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:811
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:729
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:321
intptr_t AtomicWord
Definition: atomicops.h:72
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static void DebugBreak()
int backtrace(void **, int) __attribute__((weak_import))
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1410
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static uint32_t RandomPrivate(Isolate *isolate)
Definition: v8.cc:178
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1762
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void SetUp()
void USE(T)
Definition: globals.h:303
static int ActivationFrameAlignment()
static size_t AllocateAlignment()
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1771
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1745
const Register fp
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)