v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-freebsd.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for FreeBSD goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <sys/resource.h>
36 #include <sys/types.h>
37 #include <sys/ucontext.h>
38 #include <stdlib.h>
39 
40 #include <sys/types.h> // mmap & munmap
41 #include <sys/mman.h> // mmap & munmap
42 #include <sys/stat.h> // open
43 #include <sys/fcntl.h> // open
44 #include <unistd.h> // getpagesize
45 // If you don't have execinfo.h then you need devel/libexecinfo from ports.
46 #include <execinfo.h> // backtrace, backtrace_symbols
47 #include <strings.h> // index
48 #include <errno.h>
49 #include <stdarg.h>
50 #include <limits.h>
51 
52 #undef MAP_TYPE
53 
54 #include "v8.h"
55 #include "v8threads.h"
56 
57 #include "platform-posix.h"
58 #include "platform.h"
59 #include "vm-state-inl.h"
60 
61 
62 namespace v8 {
63 namespace internal {
64 
65 // 0 is never a valid thread id on FreeBSD since tids and pids share a
66 // name space and pid 0 is used to kill the group (see man 2 kill).
67 static const pthread_t kNoThread = (pthread_t) 0;
68 
69 
70 double ceiling(double x) {
71  // Correct as on OS X
72  if (-1.0 < x && x < 0.0) {
73  return -0.0;
74  } else {
75  return ceil(x);
76  }
77 }
78 
79 
80 static Mutex* limit_mutex = NULL;
81 
82 
83 void OS::PostSetUp() {
85 }
86 
87 
88 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
89  __asm__ __volatile__("" : : : "memory");
90  *ptr = value;
91 }
92 
93 
95  return 0; // FreeBSD runs on anything.
96 }
97 
98 
100  // 16 byte alignment on FreeBSD
101  return 16;
102 }
103 
104 
105 const char* OS::LocalTimezone(double time) {
106  if (isnan(time)) return "";
107  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
108  struct tm* t = localtime(&tv);
109  if (NULL == t) return "";
110  return t->tm_zone;
111 }
112 
113 
114 double OS::LocalTimeOffset() {
115  time_t tv = time(NULL);
116  struct tm* t = localtime(&tv);
117  // tm_gmtoff includes any daylight savings offset, so subtract it.
118  return static_cast<double>(t->tm_gmtoff * msPerSecond -
119  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
120 }
121 
122 
123 // We keep the lowest and highest addresses mapped as a quick way of
124 // determining that pointers are outside the heap (used mostly in assertions
125 // and verification). The estimate is conservative, i.e., not all addresses in
126 // 'allocated' space are actually allocated to our heap. The range is
127 // [lowest, highest), inclusive on the low and and exclusive on the high end.
128 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
129 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
130 
131 
132 static void UpdateAllocatedSpaceLimits(void* address, int size) {
133  ASSERT(limit_mutex != NULL);
134  ScopedLock lock(limit_mutex);
135 
136  lowest_ever_allocated = Min(lowest_ever_allocated, address);
137  highest_ever_allocated =
138  Max(highest_ever_allocated,
139  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
140 }
141 
142 
143 bool OS::IsOutsideAllocatedSpace(void* address) {
144  return address < lowest_ever_allocated || address >= highest_ever_allocated;
145 }
146 
147 
148 size_t OS::AllocateAlignment() {
149  return getpagesize();
150 }
151 
152 
153 void* OS::Allocate(const size_t requested,
154  size_t* allocated,
155  bool executable) {
156  const size_t msize = RoundUp(requested, getpagesize());
157  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
158  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
159 
160  if (mbase == MAP_FAILED) {
161  LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
162  return NULL;
163  }
164  *allocated = msize;
165  UpdateAllocatedSpaceLimits(mbase, msize);
166  return mbase;
167 }
168 
169 
170 void OS::Free(void* buf, const size_t length) {
171  // TODO(1240712): munmap has a return value which is ignored here.
172  int result = munmap(buf, length);
173  USE(result);
174  ASSERT(result == 0);
175 }
176 
177 
178 void OS::Sleep(int milliseconds) {
179  unsigned int ms = static_cast<unsigned int>(milliseconds);
180  usleep(1000 * ms);
181 }
182 
183 
184 void OS::Abort() {
185  // Redirect to std abort to signal abnormal program termination.
186  abort();
187 }
188 
189 
190 void OS::DebugBreak() {
191 #if (defined(__arm__) || defined(__thumb__))
192 # if defined(CAN_USE_ARMV5_INSTRUCTIONS)
193  asm("bkpt 0");
194 # endif
195 #else
196  asm("int $3");
197 #endif
198 }
199 
200 
201 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
202  public:
203  PosixMemoryMappedFile(FILE* file, void* memory, int size)
204  : file_(file), memory_(memory), size_(size) { }
205  virtual ~PosixMemoryMappedFile();
206  virtual void* memory() { return memory_; }
207  virtual int size() { return size_; }
208  private:
209  FILE* file_;
210  void* memory_;
211  int size_;
212 };
213 
214 
215 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
216  FILE* file = fopen(name, "r+");
217  if (file == NULL) return NULL;
218 
219  fseek(file, 0, SEEK_END);
220  int size = ftell(file);
221 
222  void* memory =
223  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
224  return new PosixMemoryMappedFile(file, memory, size);
225 }
226 
227 
228 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
229  void* initial) {
230  FILE* file = fopen(name, "w+");
231  if (file == NULL) return NULL;
232  int result = fwrite(initial, size, 1, file);
233  if (result < 1) {
234  fclose(file);
235  return NULL;
236  }
237  void* memory =
238  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
239  return new PosixMemoryMappedFile(file, memory, size);
240 }
241 
242 
244  if (memory_) munmap(memory_, size_);
245  fclose(file_);
246 }
247 
248 
249 static unsigned StringToLong(char* buffer) {
250  return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
251 }
252 
253 
255  static const int MAP_LENGTH = 1024;
256  int fd = open("/proc/self/maps", O_RDONLY);
257  if (fd < 0) return;
258  while (true) {
259  char addr_buffer[11];
260  addr_buffer[0] = '0';
261  addr_buffer[1] = 'x';
262  addr_buffer[10] = 0;
263  int result = read(fd, addr_buffer + 2, 8);
264  if (result < 8) break;
265  unsigned start = StringToLong(addr_buffer);
266  result = read(fd, addr_buffer + 2, 1);
267  if (result < 1) break;
268  if (addr_buffer[2] != '-') break;
269  result = read(fd, addr_buffer + 2, 8);
270  if (result < 8) break;
271  unsigned end = StringToLong(addr_buffer);
272  char buffer[MAP_LENGTH];
273  int bytes_read = -1;
274  do {
275  bytes_read++;
276  if (bytes_read >= MAP_LENGTH - 1)
277  break;
278  result = read(fd, buffer + bytes_read, 1);
279  if (result < 1) break;
280  } while (buffer[bytes_read] != '\n');
281  buffer[bytes_read] = 0;
282  // Ignore mappings that are not executable.
283  if (buffer[3] != 'x') continue;
284  char* start_of_path = index(buffer, '/');
285  // There may be no filename in this line. Skip to next.
286  if (start_of_path == NULL) continue;
287  buffer[bytes_read] = 0;
288  LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
289  }
290  close(fd);
291 }
292 
293 
294 void OS::SignalCodeMovingGC() {
295 }
296 
297 
298 int OS::StackWalk(Vector<OS::StackFrame> frames) {
299  int frames_size = frames.length();
300  ScopedVector<void*> addresses(frames_size);
301 
302  int frames_count = backtrace(addresses.start(), frames_size);
303 
304  char** symbols = backtrace_symbols(addresses.start(), frames_count);
305  if (symbols == NULL) {
306  return kStackWalkError;
307  }
308 
309  for (int i = 0; i < frames_count; i++) {
310  frames[i].address = addresses[i];
311  // Format a text representation of the frame based on the information
312  // available.
314  "%s",
315  symbols[i]);
316  // Make sure line termination is in place.
317  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
318  }
319 
320  free(symbols);
321 
322  return frames_count;
323 }
324 
325 
326 // Constants used for mmap.
327 static const int kMmapFd = -1;
328 static const int kMmapFdOffset = 0;
329 
330 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
331 
332 VirtualMemory::VirtualMemory(size_t size) {
333  address_ = ReserveRegion(size);
334  size_ = size;
335 }
336 
337 
338 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
339  : address_(NULL), size_(0) {
340  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
341  size_t request_size = RoundUp(size + alignment,
342  static_cast<intptr_t>(OS::AllocateAlignment()));
343  void* reservation = mmap(OS::GetRandomMmapAddr(),
344  request_size,
345  PROT_NONE,
346  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
347  kMmapFd,
348  kMmapFdOffset);
349  if (reservation == MAP_FAILED) return;
350 
351  Address base = static_cast<Address>(reservation);
352  Address aligned_base = RoundUp(base, alignment);
353  ASSERT_LE(base, aligned_base);
354 
355  // Unmap extra memory reserved before and after the desired block.
356  if (aligned_base != base) {
357  size_t prefix_size = static_cast<size_t>(aligned_base - base);
358  OS::Free(base, prefix_size);
359  request_size -= prefix_size;
360  }
361 
362  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
363  ASSERT_LE(aligned_size, request_size);
364 
365  if (aligned_size != request_size) {
366  size_t suffix_size = request_size - aligned_size;
367  OS::Free(aligned_base + aligned_size, suffix_size);
368  request_size -= suffix_size;
369  }
370 
371  ASSERT(aligned_size == request_size);
372 
373  address_ = static_cast<void*>(aligned_base);
374  size_ = aligned_size;
375 }
376 
377 
379  if (IsReserved()) {
380  bool result = ReleaseRegion(address(), size());
381  ASSERT(result);
382  USE(result);
383  }
384 }
385 
386 
388  return address_ != NULL;
389 }
390 
391 
393  address_ = NULL;
394  size_ = 0;
395 }
396 
397 
398 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
399  return CommitRegion(address, size, is_executable);
400 }
401 
402 
403 bool VirtualMemory::Uncommit(void* address, size_t size) {
404  return UncommitRegion(address, size);
405 }
406 
407 
408 bool VirtualMemory::Guard(void* address) {
409  OS::Guard(address, OS::CommitPageSize());
410  return true;
411 }
412 
413 
414 void* VirtualMemory::ReserveRegion(size_t size) {
415  void* result = mmap(OS::GetRandomMmapAddr(),
416  size,
417  PROT_NONE,
418  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
419  kMmapFd,
420  kMmapFdOffset);
421 
422  if (result == MAP_FAILED) return NULL;
423 
424  return result;
425 }
426 
427 
428 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
429  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
430  if (MAP_FAILED == mmap(base,
431  size,
432  prot,
433  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
434  kMmapFd,
435  kMmapFdOffset)) {
436  return false;
437  }
438 
439  UpdateAllocatedSpaceLimits(base, size);
440  return true;
441 }
442 
443 
444 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
445  return mmap(base,
446  size,
447  PROT_NONE,
448  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
449  kMmapFd,
450  kMmapFdOffset) != MAP_FAILED;
451 }
452 
453 
454 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
455  return munmap(base, size) == 0;
456 }
457 
458 
459 class Thread::PlatformData : public Malloced {
460  public:
461  pthread_t thread_; // Thread handle for pthread.
462 };
463 
464 
465 Thread::Thread(const Options& options)
466  : data_(new PlatformData),
467  stack_size_(options.stack_size()) {
468  set_name(options.name());
469 }
470 
471 
472 Thread::~Thread() {
473  delete data_;
474 }
475 
476 
477 static void* ThreadEntry(void* arg) {
478  Thread* thread = reinterpret_cast<Thread*>(arg);
479  // This is also initialized by the first argument to pthread_create() but we
480  // don't know which thread will run first (the original thread or the new
481  // one) so we initialize it here too.
482  thread->data()->thread_ = pthread_self();
483  ASSERT(thread->data()->thread_ != kNoThread);
484  thread->Run();
485  return NULL;
486 }
487 
488 
489 void Thread::set_name(const char* name) {
490  strncpy(name_, name, sizeof(name_));
491  name_[sizeof(name_) - 1] = '\0';
492 }
493 
494 
495 void Thread::Start() {
496  pthread_attr_t* attr_ptr = NULL;
497  pthread_attr_t attr;
498  if (stack_size_ > 0) {
499  pthread_attr_init(&attr);
500  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
501  attr_ptr = &attr;
502  }
503  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
504  ASSERT(data_->thread_ != kNoThread);
505 }
506 
507 
508 void Thread::Join() {
509  pthread_join(data_->thread_, NULL);
510 }
511 
512 
514  pthread_key_t key;
515  int result = pthread_key_create(&key, NULL);
516  USE(result);
517  ASSERT(result == 0);
518  return static_cast<LocalStorageKey>(key);
519 }
520 
521 
522 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
523  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
524  int result = pthread_key_delete(pthread_key);
525  USE(result);
526  ASSERT(result == 0);
527 }
528 
529 
530 void* Thread::GetThreadLocal(LocalStorageKey key) {
531  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
532  return pthread_getspecific(pthread_key);
533 }
534 
535 
536 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
537  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
538  pthread_setspecific(pthread_key, value);
539 }
540 
541 
542 void Thread::YieldCPU() {
543  sched_yield();
544 }
545 
546 
547 class FreeBSDMutex : public Mutex {
548  public:
550  pthread_mutexattr_t attrs;
551  int result = pthread_mutexattr_init(&attrs);
552  ASSERT(result == 0);
553  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
554  ASSERT(result == 0);
555  result = pthread_mutex_init(&mutex_, &attrs);
556  ASSERT(result == 0);
557  USE(result);
558  }
559 
560  virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
561 
562  virtual int Lock() {
563  int result = pthread_mutex_lock(&mutex_);
564  return result;
565  }
566 
567  virtual int Unlock() {
568  int result = pthread_mutex_unlock(&mutex_);
569  return result;
570  }
571 
572  virtual bool TryLock() {
573  int result = pthread_mutex_trylock(&mutex_);
574  // Return false if the lock is busy and locking failed.
575  if (result == EBUSY) {
576  return false;
577  }
578  ASSERT(result == 0); // Verify no other errors.
579  return true;
580  }
581 
582  private:
583  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
584 };
585 
586 
587 Mutex* OS::CreateMutex() {
588  return new FreeBSDMutex();
589 }
590 
591 
592 class FreeBSDSemaphore : public Semaphore {
593  public:
594  explicit FreeBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
595  virtual ~FreeBSDSemaphore() { sem_destroy(&sem_); }
596 
597  virtual void Wait();
598  virtual bool Wait(int timeout);
599  virtual void Signal() { sem_post(&sem_); }
600  private:
601  sem_t sem_;
602 };
603 
604 
606  while (true) {
607  int result = sem_wait(&sem_);
608  if (result == 0) return; // Successfully got semaphore.
609  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
610  }
611 }
612 
613 
614 bool FreeBSDSemaphore::Wait(int timeout) {
615  const long kOneSecondMicros = 1000000; // NOLINT
616 
617  // Split timeout into second and nanosecond parts.
618  struct timeval delta;
619  delta.tv_usec = timeout % kOneSecondMicros;
620  delta.tv_sec = timeout / kOneSecondMicros;
621 
622  struct timeval current_time;
623  // Get the current time.
624  if (gettimeofday(&current_time, NULL) == -1) {
625  return false;
626  }
627 
628  // Calculate time for end of timeout.
629  struct timeval end_time;
630  timeradd(&current_time, &delta, &end_time);
631 
632  struct timespec ts;
633  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
634  while (true) {
635  int result = sem_timedwait(&sem_, &ts);
636  if (result == 0) return true; // Successfully got semaphore.
637  if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
638  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
639  }
640 }
641 
642 
643 Semaphore* OS::CreateSemaphore(int count) {
644  return new FreeBSDSemaphore(count);
645 }
646 
647 
648 static pthread_t GetThreadID() {
649  pthread_t thread_id = pthread_self();
650  return thread_id;
651 }
652 
653 
654 class Sampler::PlatformData : public Malloced {
655  public:
656  PlatformData() : vm_tid_(GetThreadID()) {}
657 
658  pthread_t vm_tid() const { return vm_tid_; }
659 
660  private:
661  pthread_t vm_tid_;
662 };
663 
664 
665 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
666  USE(info);
667  if (signal != SIGPROF) return;
668  Isolate* isolate = Isolate::UncheckedCurrent();
669  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
670  // We require a fully initialized and entered isolate.
671  return;
672  }
673  if (v8::Locker::IsActive() &&
674  !isolate->thread_manager()->IsLockedByCurrentThread()) {
675  return;
676  }
677 
678  Sampler* sampler = isolate->logger()->sampler();
679  if (sampler == NULL || !sampler->IsActive()) return;
680 
681  TickSample sample_obj;
682  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
683  if (sample == NULL) sample = &sample_obj;
684 
685  // Extracting the sample from the context is extremely machine dependent.
686  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
687  mcontext_t& mcontext = ucontext->uc_mcontext;
688  sample->state = isolate->current_vm_state();
689 #if V8_HOST_ARCH_IA32
690  sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
691  sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
692  sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
693 #elif V8_HOST_ARCH_X64
694  sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
695  sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
696  sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
697 #elif V8_HOST_ARCH_ARM
698  sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
699  sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
700  sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
701 #endif
702  sampler->SampleStack(sample);
703  sampler->Tick(sample);
704 }
705 
706 
707 class SignalSender : public Thread {
708  public:
712  };
713 
714  static const int kSignalSenderStackSize = 64 * KB;
715 
716  explicit SignalSender(int interval)
717  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
718  interval_(interval) {}
719 
720  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
721  static void TearDown() { delete mutex_; }
722 
723  static void AddActiveSampler(Sampler* sampler) {
724  ScopedLock lock(mutex_);
726  if (instance_ == NULL) {
727  // Install a signal handler.
728  struct sigaction sa;
729  sa.sa_sigaction = ProfilerSignalHandler;
730  sigemptyset(&sa.sa_mask);
731  sa.sa_flags = SA_RESTART | SA_SIGINFO;
733  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
734 
735  // Start a thread that sends SIGPROF signal to VM threads.
736  instance_ = new SignalSender(sampler->interval());
737  instance_->Start();
738  } else {
739  ASSERT(instance_->interval_ == sampler->interval());
740  }
741  }
742 
743  static void RemoveActiveSampler(Sampler* sampler) {
744  ScopedLock lock(mutex_);
748  delete instance_;
749  instance_ = NULL;
750 
751  // Restore the old signal handler.
753  sigaction(SIGPROF, &old_signal_handler_, 0);
755  }
756  }
757  }
758 
759  // Implement Thread::Run().
760  virtual void Run() {
762  while ((state = SamplerRegistry::GetState()) !=
764  bool cpu_profiling_enabled =
766  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
767  // When CPU profiling is enabled both JavaScript and C++ code is
768  // profiled. We must not suspend.
769  if (!cpu_profiling_enabled) {
770  if (rate_limiter_.SuspendIfNecessary()) continue;
771  }
772  if (cpu_profiling_enabled && runtime_profiler_enabled) {
774  return;
775  }
778  return;
779  }
781  } else {
782  if (cpu_profiling_enabled) {
784  this)) {
785  return;
786  }
787  }
788  if (runtime_profiler_enabled) {
790  NULL)) {
791  return;
792  }
793  }
795  }
796  }
797  }
798 
799  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
800  if (!sampler->IsProfiling()) return;
801  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
802  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
803  }
804 
805  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
806  if (!sampler->isolate()->IsInitialized()) return;
807  sampler->isolate()->runtime_profiler()->NotifyTick();
808  }
809 
810  void SendProfilingSignal(pthread_t tid) {
811  if (!signal_handler_installed_) return;
812  pthread_kill(tid, SIGPROF);
813  }
814 
815  void Sleep(SleepInterval full_or_half) {
816  // Convert ms to us and subtract 100 us to compensate delays
817  // occuring during signal delivery.
818  useconds_t interval = interval_ * 1000 - 100;
819  if (full_or_half == HALF_INTERVAL) interval /= 2;
820  int result = usleep(interval);
821 #ifdef DEBUG
822  if (result != 0 && errno != EINTR) {
823  fprintf(stderr,
824  "SignalSender usleep error; interval = %u, errno = %d\n",
825  interval,
826  errno);
827  ASSERT(result == 0 || errno == EINTR);
828  }
829 #endif
830  USE(result);
831  }
832 
833  const int interval_;
834  RuntimeProfilerRateLimiter rate_limiter_;
835 
836  // Protects the process wide state below.
837  static Mutex* mutex_;
840  static struct sigaction old_signal_handler_;
841 
842  private:
844 };
845 
848 struct sigaction SignalSender::old_signal_handler_;
850 
851 
852 void OS::SetUp() {
853  // Seed the random number generator.
854  // Convert the current time to a 64-bit integer first, before converting it
855  // to an unsigned. Going directly can cause an overflow and the seed to be
856  // set to all ones. The seed will be identical for different instances that
857  // call this setup code within the same millisecond.
858  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
859  srandom(static_cast<unsigned int>(seed));
860  limit_mutex = CreateMutex();
862 }
863 
864 
865 void OS::TearDown() {
867  delete limit_mutex;
868 }
869 
870 
871 Sampler::Sampler(Isolate* isolate, int interval)
872  : isolate_(isolate),
873  interval_(interval),
874  profiling_(false),
875  active_(false),
876  samples_taken_(0) {
877  data_ = new PlatformData;
878 }
879 
880 
882  ASSERT(!IsActive());
883  delete data_;
884 }
885 
886 
887 void Sampler::Start() {
888  ASSERT(!IsActive());
889  SetActive(true);
891 }
892 
893 
894 void Sampler::Stop() {
895  ASSERT(IsActive());
897  SetActive(false);
898 }
899 
900 
901 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static void * GetThreadLocal(LocalStorageKey key)
static void Free(void *address, const size_t size)
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:745
StateTag current_vm_state()
Definition: isolate.h:991
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:221
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:734
Isolate * isolate()
Definition: platform.h:736
static void SignalCodeMovingGC()
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:529
Sampler * sampler()
Definition: log.cc:1679
static const int kStackWalkError
Definition: platform.h:223
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:225
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
static struct sigaction old_signal_handler_
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:712
ThreadManager * thread_manager()
Definition: isolate.h:867
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1783
unsigned int seed
Definition: test-strings.cc:17
#define timeradd(a, b, result)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:811
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:729
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:321
intptr_t AtomicWord
Definition: atomicops.h:72
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static void DebugBreak()
int backtrace(void **, int) __attribute__((weak_import))
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1410
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1762
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void SetUp()
void USE(T)
Definition: globals.h:303
static int ActivationFrameAlignment()
static const int kSignalSenderStackSize
Logger * logger()
Definition: isolate.h:813
static size_t AllocateAlignment()
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1771
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
RuntimeProfilerRateLimiter rate_limiter_
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1745
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)