v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-cygwin.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for Cygwin goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <stdarg.h>
35 #include <strings.h> // index
36 #include <sys/time.h>
37 #include <sys/mman.h> // mmap & munmap
38 #include <unistd.h> // sysconf
39 
40 #undef MAP_TYPE
41 
42 #include "v8.h"
43 
44 #include "platform-posix.h"
45 #include "platform.h"
46 #include "v8threads.h"
47 #include "vm-state-inl.h"
48 #include "win32-headers.h"
49 
50 namespace v8 {
51 namespace internal {
52 
53 // 0 is never a valid thread id
54 static const pthread_t kNoThread = (pthread_t) 0;
55 
56 
57 double ceiling(double x) {
58  return ceil(x);
59 }
60 
61 
62 static Mutex* limit_mutex = NULL;
63 
64 
65 void OS::PostSetUp() {
67 }
68 
70  return 0; // Nothing special about Cygwin.
71 }
72 
73 
75  // With gcc 4.4 the tree vectorization optimizer can generate code
76  // that requires 16 byte alignment such as movdqa on x86.
77  return 16;
78 }
79 
80 
81 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
82  __asm__ __volatile__("" : : : "memory");
83  // An x86 store acts as a release barrier.
84  *ptr = value;
85 }
86 
87 const char* OS::LocalTimezone(double time) {
88  if (isnan(time)) return "";
89  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
90  struct tm* t = localtime(&tv);
91  if (NULL == t) return "";
92  return tzname[0]; // The location of the timezone string on Cygwin.
93 }
94 
95 
97  // On Cygwin, struct tm does not contain a tm_gmtoff field.
98  time_t utc = time(NULL);
99  ASSERT(utc != -1);
100  struct tm* loc = localtime(&utc);
101  ASSERT(loc != NULL);
102  // time - localtime includes any daylight savings offset, so subtract it.
103  return static_cast<double>((mktime(loc) - utc) * msPerSecond -
104  (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
105 }
106 
107 
108 // We keep the lowest and highest addresses mapped as a quick way of
109 // determining that pointers are outside the heap (used mostly in assertions
110 // and verification). The estimate is conservative, i.e., not all addresses in
111 // 'allocated' space are actually allocated to our heap. The range is
112 // [lowest, highest), inclusive on the low and and exclusive on the high end.
113 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
114 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
115 
116 
117 static void UpdateAllocatedSpaceLimits(void* address, int size) {
118  ASSERT(limit_mutex != NULL);
119  ScopedLock lock(limit_mutex);
120 
121  lowest_ever_allocated = Min(lowest_ever_allocated, address);
122  highest_ever_allocated =
123  Max(highest_ever_allocated,
124  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
125 }
126 
127 
128 bool OS::IsOutsideAllocatedSpace(void* address) {
129  return address < lowest_ever_allocated || address >= highest_ever_allocated;
130 }
131 
132 
134  return sysconf(_SC_PAGESIZE);
135 }
136 
137 
138 void* OS::Allocate(const size_t requested,
139  size_t* allocated,
140  bool is_executable) {
141  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
142  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
143  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
144  if (mbase == MAP_FAILED) {
145  LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
146  return NULL;
147  }
148  *allocated = msize;
149  UpdateAllocatedSpaceLimits(mbase, msize);
150  return mbase;
151 }
152 
153 
154 void OS::Free(void* address, const size_t size) {
155  // TODO(1240712): munmap has a return value which is ignored here.
156  int result = munmap(address, size);
157  USE(result);
158  ASSERT(result == 0);
159 }
160 
161 
162 void OS::ProtectCode(void* address, const size_t size) {
163  DWORD old_protect;
164  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
165 }
166 
167 
168 void OS::Guard(void* address, const size_t size) {
169  DWORD oldprotect;
170  VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
171 }
172 
173 
174 void OS::Sleep(int milliseconds) {
175  unsigned int ms = static_cast<unsigned int>(milliseconds);
176  usleep(1000 * ms);
177 }
178 
179 
180 void OS::Abort() {
181  // Redirect to std abort to signal abnormal program termination.
182  abort();
183 }
184 
185 
187  asm("int $3");
188 }
189 
190 
192  public:
193  PosixMemoryMappedFile(FILE* file, void* memory, int size)
194  : file_(file), memory_(memory), size_(size) { }
195  virtual ~PosixMemoryMappedFile();
196  virtual void* memory() { return memory_; }
197  virtual int size() { return size_; }
198  private:
199  FILE* file_;
200  void* memory_;
201  int size_;
202 };
203 
204 
206  FILE* file = fopen(name, "r+");
207  if (file == NULL) return NULL;
208 
209  fseek(file, 0, SEEK_END);
210  int size = ftell(file);
211 
212  void* memory =
213  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
214  return new PosixMemoryMappedFile(file, memory, size);
215 }
216 
217 
219  void* initial) {
220  FILE* file = fopen(name, "w+");
221  if (file == NULL) return NULL;
222  int result = fwrite(initial, size, 1, file);
223  if (result < 1) {
224  fclose(file);
225  return NULL;
226  }
227  void* memory =
228  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
229  return new PosixMemoryMappedFile(file, memory, size);
230 }
231 
232 
234  if (memory_) munmap(memory_, size_);
235  fclose(file_);
236 }
237 
238 
240  // This function assumes that the layout of the file is as follows:
241  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
242  // If we encounter an unexpected situation we abort scanning further entries.
243  FILE* fp = fopen("/proc/self/maps", "r");
244  if (fp == NULL) return;
245 
246  // Allocate enough room to be able to store a full file name.
247  const int kLibNameLen = FILENAME_MAX + 1;
248  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
249 
250  i::Isolate* isolate = ISOLATE;
251  // This loop will terminate once the scanning hits an EOF.
252  while (true) {
253  uintptr_t start, end;
254  char attr_r, attr_w, attr_x, attr_p;
255  // Parse the addresses and permission bits at the beginning of the line.
256  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
257  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
258 
259  int c;
260  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
261  // Found a read-only executable entry. Skip characters until we reach
262  // the beginning of the filename or the end of the line.
263  do {
264  c = getc(fp);
265  } while ((c != EOF) && (c != '\n') && (c != '/'));
266  if (c == EOF) break; // EOF: Was unexpected, just exit.
267 
268  // Process the filename if found.
269  if (c == '/') {
270  ungetc(c, fp); // Push the '/' back into the stream to be read below.
271 
272  // Read to the end of the line. Exit if the read fails.
273  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
274 
275  // Drop the newline character read by fgets. We do not need to check
276  // for a zero-length string because we know that we at least read the
277  // '/' character.
278  lib_name[strlen(lib_name) - 1] = '\0';
279  } else {
280  // No library name found, just record the raw address range.
281  snprintf(lib_name, kLibNameLen,
282  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
283  }
284  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
285  } else {
286  // Entry not describing executable data. Skip to end of line to set up
287  // reading the next entry.
288  do {
289  c = getc(fp);
290  } while ((c != EOF) && (c != '\n'));
291  if (c == EOF) break;
292  }
293  }
294  free(lib_name);
295  fclose(fp);
296 }
297 
298 
300  // Nothing to do on Cygwin.
301 }
302 
303 
305  // Not supported on Cygwin.
306  return 0;
307 }
308 
309 
310 // The VirtualMemory implementation is taken from platform-win32.cc.
311 // The mmap-based virtual memory implementation as it is used on most posix
312 // platforms does not work well because Cygwin does not support MAP_FIXED.
313 // This causes VirtualMemory::Commit to not always commit the memory region
314 // specified.
315 
317  return address_ != NULL;
318 }
319 
320 
322  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
323  size_ = size;
324 }
325 
326 
328  if (IsReserved()) {
329  if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
330  }
331 }
332 
333 
334 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
335  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
336  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
337  return false;
338  }
339 
340  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
341  return true;
342 }
343 
344 
345 bool VirtualMemory::Uncommit(void* address, size_t size) {
346  ASSERT(IsReserved());
347  return VirtualFree(address, size, MEM_DECOMMIT) != false;
348 }
349 
350 
351 bool VirtualMemory::Guard(void* address) {
352  if (NULL == VirtualAlloc(address,
354  MEM_COMMIT,
355  PAGE_READONLY | PAGE_GUARD)) {
356  return false;
357  }
358  return true;
359 }
360 
361 
363  public:
364  PlatformData() : thread_(kNoThread) {}
365  pthread_t thread_; // Thread handle for pthread.
366 };
367 
368 
369 
370 
371 Thread::Thread(const Options& options)
372  : data_(new PlatformData()),
373  stack_size_(options.stack_size()) {
374  set_name(options.name());
375 }
376 
377 
379  delete data_;
380 }
381 
382 
383 static void* ThreadEntry(void* arg) {
384  Thread* thread = reinterpret_cast<Thread*>(arg);
385  // This is also initialized by the first argument to pthread_create() but we
386  // don't know which thread will run first (the original thread or the new
387  // one) so we initialize it here too.
388  thread->data()->thread_ = pthread_self();
389  ASSERT(thread->data()->thread_ != kNoThread);
390  thread->Run();
391  return NULL;
392 }
393 
394 
395 void Thread::set_name(const char* name) {
396  strncpy(name_, name, sizeof(name_));
397  name_[sizeof(name_) - 1] = '\0';
398 }
399 
400 
402  pthread_attr_t* attr_ptr = NULL;
403  pthread_attr_t attr;
404  if (stack_size_ > 0) {
405  pthread_attr_init(&attr);
406  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
407  attr_ptr = &attr;
408  }
409  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
410  ASSERT(data_->thread_ != kNoThread);
411 }
412 
413 
414 void Thread::Join() {
415  pthread_join(data_->thread_, NULL);
416 }
417 
418 
419 static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
420  pthread_key_t pthread_key) {
421  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
422  // because pthread_key_t is a pointer type on Cygwin. This will probably not
423  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
424  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
425  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
426  return static_cast<Thread::LocalStorageKey>(ptr_key);
427 }
428 
429 
430 static inline pthread_key_t LocalKeyToPthreadKey(
431  Thread::LocalStorageKey local_key) {
432  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
433  intptr_t ptr_key = static_cast<intptr_t>(local_key);
434  return reinterpret_cast<pthread_key_t>(ptr_key);
435 }
436 
437 
439  pthread_key_t key;
440  int result = pthread_key_create(&key, NULL);
441  USE(result);
442  ASSERT(result == 0);
443  return PthreadKeyToLocalKey(key);
444 }
445 
446 
448  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
449  int result = pthread_key_delete(pthread_key);
450  USE(result);
451  ASSERT(result == 0);
452 }
453 
454 
456  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
457  return pthread_getspecific(pthread_key);
458 }
459 
460 
461 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
462  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
463  pthread_setspecific(pthread_key, value);
464 }
465 
466 
468  sched_yield();
469 }
470 
471 
472 class CygwinMutex : public Mutex {
473  public:
475  pthread_mutexattr_t attrs;
476  memset(&attrs, 0, sizeof(attrs));
477 
478  int result = pthread_mutexattr_init(&attrs);
479  ASSERT(result == 0);
480  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
481  ASSERT(result == 0);
482  result = pthread_mutex_init(&mutex_, &attrs);
483  ASSERT(result == 0);
484  }
485 
486  virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
487 
488  virtual int Lock() {
489  int result = pthread_mutex_lock(&mutex_);
490  return result;
491  }
492 
493  virtual int Unlock() {
494  int result = pthread_mutex_unlock(&mutex_);
495  return result;
496  }
497 
498  virtual bool TryLock() {
499  int result = pthread_mutex_trylock(&mutex_);
500  // Return false if the lock is busy and locking failed.
501  if (result == EBUSY) {
502  return false;
503  }
504  ASSERT(result == 0); // Verify no other errors.
505  return true;
506  }
507 
508  private:
509  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
510 };
511 
512 
514  return new CygwinMutex();
515 }
516 
517 
518 class CygwinSemaphore : public Semaphore {
519  public:
520  explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
521  virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
522 
523  virtual void Wait();
524  virtual bool Wait(int timeout);
525  virtual void Signal() { sem_post(&sem_); }
526  private:
527  sem_t sem_;
528 };
529 
530 
532  while (true) {
533  int result = sem_wait(&sem_);
534  if (result == 0) return; // Successfully got semaphore.
535  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
536  }
537 }
538 
539 
540 #ifndef TIMEVAL_TO_TIMESPEC
541 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
542  (ts)->tv_sec = (tv)->tv_sec; \
543  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
544 } while (false)
545 #endif
546 
547 
548 bool CygwinSemaphore::Wait(int timeout) {
549  const long kOneSecondMicros = 1000000; // NOLINT
550 
551  // Split timeout into second and nanosecond parts.
552  struct timeval delta;
553  delta.tv_usec = timeout % kOneSecondMicros;
554  delta.tv_sec = timeout / kOneSecondMicros;
555 
556  struct timeval current_time;
557  // Get the current time.
558  if (gettimeofday(&current_time, NULL) == -1) {
559  return false;
560  }
561 
562  // Calculate time for end of timeout.
563  struct timeval end_time;
564  timeradd(&current_time, &delta, &end_time);
565 
566  struct timespec ts;
567  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
568  // Wait for semaphore signalled or timeout.
569  while (true) {
570  int result = sem_timedwait(&sem_, &ts);
571  if (result == 0) return true; // Successfully got semaphore.
572  if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
573  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
574  }
575 }
576 
577 
579  return new CygwinSemaphore(count);
580 }
581 
582 
583 // ----------------------------------------------------------------------------
584 // Cygwin profiler support.
585 //
586 // On Cygwin we use the same sampler implementation as on win32.
587 
589  public:
590  // Get a handle to the calling thread. This is the thread that we are
591  // going to profile. We need to make a copy of the handle because we are
592  // going to use it in the sampler thread. Using GetThreadHandle() will
593  // not work in this case. We're using OpenThread because DuplicateHandle
594  // for some reason doesn't work in Chrome's sandbox.
595  PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
596  THREAD_SUSPEND_RESUME |
597  THREAD_QUERY_INFORMATION,
598  false,
599  GetCurrentThreadId())) {}
600 
602  if (profiled_thread_ != NULL) {
603  CloseHandle(profiled_thread_);
604  profiled_thread_ = NULL;
605  }
606  }
607 
608  HANDLE profiled_thread() { return profiled_thread_; }
609 
610  private:
611  HANDLE profiled_thread_;
612 };
613 
614 
615 class SamplerThread : public Thread {
616  public:
617  static const int kSamplerThreadStackSize = 64 * KB;
618 
619  explicit SamplerThread(int interval)
620  : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
621  interval_(interval) {}
622 
623  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
624  static void TearDown() { delete mutex_; }
625 
626  static void AddActiveSampler(Sampler* sampler) {
627  ScopedLock lock(mutex_);
629  if (instance_ == NULL) {
630  instance_ = new SamplerThread(sampler->interval());
631  instance_->Start();
632  } else {
633  ASSERT(instance_->interval_ == sampler->interval());
634  }
635  }
636 
637  static void RemoveActiveSampler(Sampler* sampler) {
638  ScopedLock lock(mutex_);
642  delete instance_;
643  instance_ = NULL;
644  }
645  }
646 
647  // Implement Thread::Run().
648  virtual void Run() {
650  while ((state = SamplerRegistry::GetState()) !=
652  bool cpu_profiling_enabled =
654  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
655  // When CPU profiling is enabled both JavaScript and C++ code is
656  // profiled. We must not suspend.
657  if (!cpu_profiling_enabled) {
658  if (rate_limiter_.SuspendIfNecessary()) continue;
659  }
660  if (cpu_profiling_enabled) {
662  return;
663  }
664  }
665  if (runtime_profiler_enabled) {
667  return;
668  }
669  }
671  }
672  }
673 
674  static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
675  if (!sampler->isolate()->IsInitialized()) return;
676  if (!sampler->IsProfiling()) return;
677  SamplerThread* sampler_thread =
678  reinterpret_cast<SamplerThread*>(raw_sampler_thread);
679  sampler_thread->SampleContext(sampler);
680  }
681 
682  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
683  if (!sampler->isolate()->IsInitialized()) return;
684  sampler->isolate()->runtime_profiler()->NotifyTick();
685  }
686 
687  void SampleContext(Sampler* sampler) {
688  HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
689  if (profiled_thread == NULL) return;
690 
691  // Context used for sampling the register state of the profiled thread.
692  CONTEXT context;
693  memset(&context, 0, sizeof(context));
694 
695  TickSample sample_obj;
697  if (sample == NULL) sample = &sample_obj;
698 
699  static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
700  if (SuspendThread(profiled_thread) == kSuspendFailed) return;
701  sample->state = sampler->isolate()->current_vm_state();
702 
703  context.ContextFlags = CONTEXT_FULL;
704  if (GetThreadContext(profiled_thread, &context) != 0) {
705 #if V8_HOST_ARCH_X64
706  sample->pc = reinterpret_cast<Address>(context.Rip);
707  sample->sp = reinterpret_cast<Address>(context.Rsp);
708  sample->fp = reinterpret_cast<Address>(context.Rbp);
709 #else
710  sample->pc = reinterpret_cast<Address>(context.Eip);
711  sample->sp = reinterpret_cast<Address>(context.Esp);
712  sample->fp = reinterpret_cast<Address>(context.Ebp);
713 #endif
714  sampler->SampleStack(sample);
715  sampler->Tick(sample);
716  }
717  ResumeThread(profiled_thread);
718  }
719 
720  const int interval_;
721  RuntimeProfilerRateLimiter rate_limiter_;
722 
723  // Protects the process wide state below.
724  static Mutex* mutex_;
726 
727  private:
728  DISALLOW_COPY_AND_ASSIGN(SamplerThread);
729 };
730 
731 
734 
735 
736 void OS::SetUp() {
737  // Seed the random number generator.
738  // Convert the current time to a 64-bit integer first, before converting it
739  // to an unsigned. Going directly can cause an overflow and the seed to be
740  // set to all ones. The seed will be identical for different instances that
741  // call this setup code within the same millisecond.
742  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
743  srandom(static_cast<unsigned int>(seed));
744  limit_mutex = CreateMutex();
746 }
747 
748 
749 void OS::TearDown() {
751  delete limit_mutex;
752 }
753 
754 
755 Sampler::Sampler(Isolate* isolate, int interval)
756  : isolate_(isolate),
757  interval_(interval),
758  profiling_(false),
759  active_(false),
760  samples_taken_(0) {
761  data_ = new PlatformData;
762 }
763 
764 
766  ASSERT(!IsActive());
767  delete data_;
768 }
769 
770 
772  ASSERT(!IsActive());
773  SetActive(true);
775 }
776 
777 
779  ASSERT(IsActive());
781  SetActive(false);
782 }
783 
784 
785 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static void * GetThreadLocal(LocalStorageKey key)
static void RemoveActiveSampler(Sampler *sampler)
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:204
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:745
StateTag current_vm_state()
Definition: isolate.h:991
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:221
bool IsActive() const
Definition: platform.h:734
virtual void Run()=0
Isolate * isolate()
Definition: platform.h:736
static void SignalCodeMovingGC()
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
double ceiling(double x)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
static SamplerThread * instance_
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
static void AddActiveSampler(Sampler *sampler)
static const int kSamplerThreadStackSize
#define ASSERT(condition)
Definition: checks.h:270
typedef HANDLE(__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(DWORD dwFlags
int interval() const
Definition: platform.h:712
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1783
unsigned int seed
Definition: test-strings.cc:17
#define timeradd(a, b, result)
typedef DWORD(__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
STATIC_ASSERT((FixedDoubleArray::kHeaderSize &kDoubleAlignmentMask)==0)
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:811
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:729
static void ProtectCode(void *address, const size_t size)
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
intptr_t AtomicWord
Definition: atomicops.h:72
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
static Mutex * CreateMutex()
static double TimeCurrentMillis()
static void DoCpuProfile(Sampler *sampler, void *raw_sampler_thread)
static void DebugBreak()
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
PlatformData * data()
Definition: platform.h:501
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1410
#define TIMEVAL_TO_TIMESPEC(tv, ts)
void SampleContext(Sampler *sampler)
RuntimeProfilerRateLimiter rate_limiter_
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1762
void SampleStack(TickSample *sample)
Definition: platform.h:715
static void LogSharedLibraryAddresses()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void SetUp()
void USE(T)
Definition: globals.h:303
static int ActivationFrameAlignment()
const char * name() const
Definition: platform.h:439
static size_t AllocateAlignment()
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1771
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
virtual void Tick(TickSample *sample)=0
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1745
const Register fp
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoRuntimeProfile(Sampler *sampler, void *ignored)