v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-linux.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for Linux goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/prctl.h>
35 #include <sys/time.h>
36 #include <sys/resource.h>
37 #include <sys/syscall.h>
38 #include <sys/types.h>
39 #include <stdlib.h>
40 
41 // Ubuntu Dapper requires memory pages to be marked as
42 // executable. Otherwise, OS raises an exception when executing code
43 // in that page.
44 #include <sys/types.h> // mmap & munmap
45 #include <sys/mman.h> // mmap & munmap
46 #include <sys/stat.h> // open
47 #include <fcntl.h> // open
48 #include <unistd.h> // sysconf
49 #if defined(__GLIBC__) && !defined(__UCLIBC__)
50 #include <execinfo.h> // backtrace, backtrace_symbols
51 #endif // defined(__GLIBC__) && !defined(__UCLIBC__)
52 #include <strings.h> // index
53 #include <errno.h>
54 #include <stdarg.h>
55 
56 #undef MAP_TYPE
57 
58 #include "v8.h"
59 
60 #include "platform-posix.h"
61 #include "platform.h"
62 #include "v8threads.h"
63 #include "vm-state-inl.h"
64 
65 
66 namespace v8 {
67 namespace internal {
68 
69 // 0 is never a valid thread id on Linux since tids and pids share a
70 // name space and pid 0 is reserved (see man 2 kill).
71 static const pthread_t kNoThread = (pthread_t) 0;
72 
73 
74 double ceiling(double x) {
75  return ceil(x);
76 }
77 
78 
79 static Mutex* limit_mutex = NULL;
80 
81 
82 void OS::PostSetUp() {
84 }
85 
86 
88  return 0; // Linux runs on anything.
89 }
90 
91 
92 #ifdef __arm__
93 static bool CPUInfoContainsString(const char * search_string) {
94  const char* file_name = "/proc/cpuinfo";
95  // This is written as a straight shot one pass parser
96  // and not using STL string and ifstream because,
97  // on Linux, it's reading from a (non-mmap-able)
98  // character special device.
99  FILE* f = NULL;
100  const char* what = search_string;
101 
102  if (NULL == (f = fopen(file_name, "r")))
103  return false;
104 
105  int k;
106  while (EOF != (k = fgetc(f))) {
107  if (k == *what) {
108  ++what;
109  while ((*what != '\0') && (*what == fgetc(f))) {
110  ++what;
111  }
112  if (*what == '\0') {
113  fclose(f);
114  return true;
115  } else {
116  what = search_string;
117  }
118  }
119  }
120  fclose(f);
121 
122  // Did not find string in the proc file.
123  return false;
124 }
125 
126 
127 bool OS::ArmCpuHasFeature(CpuFeature feature) {
128  const char* search_string = NULL;
129  // Simple detection of VFP at runtime for Linux.
130  // It is based on /proc/cpuinfo, which reveals hardware configuration
131  // to user-space applications. According to ARM (mid 2009), no similar
132  // facility is universally available on the ARM architectures,
133  // so it's up to individual OSes to provide such.
134  switch (feature) {
135  case VFP3:
136  search_string = "vfpv3";
137  break;
138  case ARMv7:
139  search_string = "ARMv7";
140  break;
141  default:
142  UNREACHABLE();
143  }
144 
145  if (CPUInfoContainsString(search_string)) {
146  return true;
147  }
148 
149  if (feature == VFP3) {
150  // Some old kernels will report vfp not vfpv3. Here we make a last attempt
151  // to detect vfpv3 by checking for vfp *and* neon, since neon is only
152  // available on architectures with vfpv3.
153  // Checking neon on its own is not enough as it is possible to have neon
154  // without vfp.
155  if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
156  return true;
157  }
158  }
159 
160  return false;
161 }
162 
163 
164 // Simple helper function to detect whether the C code is compiled with
165 // option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
166 // pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
167 // calling this will return 1.0 and otherwise 0.0.
168 static void ArmUsingHardFloatHelper() {
169  asm("mov r0, #0":::"r0");
170 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
171  // Load 0x3ff00000 into r1 using instructions available in both ARM
172  // and Thumb mode.
173  asm("mov r1, #3":::"r1");
174  asm("mov r2, #255":::"r2");
175  asm("lsl r1, r1, #8":::"r1");
176  asm("orr r1, r1, r2":::"r1");
177  asm("lsl r1, r1, #20":::"r1");
178  // For vmov d0, r0, r1 use ARM mode.
179 #ifdef __thumb__
180  asm volatile(
181  "@ Enter ARM Mode \n\t"
182  " adr r3, 1f \n\t"
183  " bx r3 \n\t"
184  " .ALIGN 4 \n\t"
185  " .ARM \n"
186  "1: vmov d0, r0, r1 \n\t"
187  "@ Enter THUMB Mode\n\t"
188  " adr r3, 2f+1 \n\t"
189  " bx r3 \n\t"
190  " .THUMB \n"
191  "2: \n\t":::"r3");
192 #else
193  asm("vmov d0, r0, r1");
194 #endif // __thumb__
195 #endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
196  asm("mov r1, #0":::"r1");
197 }
198 
199 
200 bool OS::ArmUsingHardFloat() {
201  // Cast helper function from returning void to returning double.
202  typedef double (*F)();
203  F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
204  return f() == 1.0;
205 }
206 #endif // def __arm__
207 
208 
209 #ifdef __mips__
210 bool OS::MipsCpuHasFeature(CpuFeature feature) {
211  const char* search_string = NULL;
212  const char* file_name = "/proc/cpuinfo";
213  // Simple detection of FPU at runtime for Linux.
214  // It is based on /proc/cpuinfo, which reveals hardware configuration
215  // to user-space applications. According to MIPS (early 2010), no similar
216  // facility is universally available on the MIPS architectures,
217  // so it's up to individual OSes to provide such.
218  //
219  // This is written as a straight shot one pass parser
220  // and not using STL string and ifstream because,
221  // on Linux, it's reading from a (non-mmap-able)
222  // character special device.
223 
224  switch (feature) {
225  case FPU:
226  search_string = "FPU";
227  break;
228  default:
229  UNREACHABLE();
230  }
231 
232  FILE* f = NULL;
233  const char* what = search_string;
234 
235  if (NULL == (f = fopen(file_name, "r")))
236  return false;
237 
238  int k;
239  while (EOF != (k = fgetc(f))) {
240  if (k == *what) {
241  ++what;
242  while ((*what != '\0') && (*what == fgetc(f))) {
243  ++what;
244  }
245  if (*what == '\0') {
246  fclose(f);
247  return true;
248  } else {
249  what = search_string;
250  }
251  }
252  }
253  fclose(f);
254 
255  // Did not find string in the proc file.
256  return false;
257 }
258 #endif // def __mips__
259 
260 
262 #ifdef V8_TARGET_ARCH_ARM
263  // On EABI ARM targets this is required for fp correctness in the
264  // runtime system.
265  return 8;
266 #elif V8_TARGET_ARCH_MIPS
267  return 8;
268 #endif
269  // With gcc 4.4 the tree vectorization optimizer can generate code
270  // that requires 16 byte alignment such as movdqa on x86.
271  return 16;
272 }
273 
274 
275 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
276 #if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
277  (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
278  // Only use on ARM or MIPS hardware.
279  MemoryBarrier();
280 #else
281  __asm__ __volatile__("" : : : "memory");
282  // An x86 store acts as a release barrier.
283 #endif
284  *ptr = value;
285 }
286 
287 
288 const char* OS::LocalTimezone(double time) {
289  if (isnan(time)) return "";
290  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
291  struct tm* t = localtime(&tv);
292  if (NULL == t) return "";
293  return t->tm_zone;
294 }
295 
296 
297 double OS::LocalTimeOffset() {
298  time_t tv = time(NULL);
299  struct tm* t = localtime(&tv);
300  // tm_gmtoff includes any daylight savings offset, so subtract it.
301  return static_cast<double>(t->tm_gmtoff * msPerSecond -
302  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
303 }
304 
305 
306 // We keep the lowest and highest addresses mapped as a quick way of
307 // determining that pointers are outside the heap (used mostly in assertions
308 // and verification). The estimate is conservative, i.e., not all addresses in
309 // 'allocated' space are actually allocated to our heap. The range is
310 // [lowest, highest), inclusive on the low and and exclusive on the high end.
311 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
312 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
313 
314 
315 static void UpdateAllocatedSpaceLimits(void* address, int size) {
316  ASSERT(limit_mutex != NULL);
317  ScopedLock lock(limit_mutex);
318 
319  lowest_ever_allocated = Min(lowest_ever_allocated, address);
320  highest_ever_allocated =
321  Max(highest_ever_allocated,
322  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
323 }
324 
325 
326 bool OS::IsOutsideAllocatedSpace(void* address) {
327  return address < lowest_ever_allocated || address >= highest_ever_allocated;
328 }
329 
330 
331 size_t OS::AllocateAlignment() {
332  return sysconf(_SC_PAGESIZE);
333 }
334 
335 
336 void* OS::Allocate(const size_t requested,
337  size_t* allocated,
338  bool is_executable) {
339  const size_t msize = RoundUp(requested, AllocateAlignment());
340  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
341  void* addr = OS::GetRandomMmapAddr();
342  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
343  if (mbase == MAP_FAILED) {
344  LOG(i::Isolate::Current(),
345  StringEvent("OS::Allocate", "mmap failed"));
346  return NULL;
347  }
348  *allocated = msize;
349  UpdateAllocatedSpaceLimits(mbase, msize);
350  return mbase;
351 }
352 
353 
354 void OS::Free(void* address, const size_t size) {
355  // TODO(1240712): munmap has a return value which is ignored here.
356  int result = munmap(address, size);
357  USE(result);
358  ASSERT(result == 0);
359 }
360 
361 
362 void OS::Sleep(int milliseconds) {
363  unsigned int ms = static_cast<unsigned int>(milliseconds);
364  usleep(1000 * ms);
365 }
366 
367 
368 void OS::Abort() {
369  // Redirect to std abort to signal abnormal program termination.
370  if (FLAG_break_on_abort) {
371  DebugBreak();
372  }
373  abort();
374 }
375 
376 
377 void OS::DebugBreak() {
378 // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
379 // which is the architecture of generated code).
380 #if (defined(__arm__) || defined(__thumb__))
381 # if defined(CAN_USE_ARMV5_INSTRUCTIONS)
382  asm("bkpt 0");
383 # endif
384 #elif defined(__mips__)
385  asm("break");
386 #else
387  asm("int $3");
388 #endif
389 }
390 
391 
392 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
393  public:
394  PosixMemoryMappedFile(FILE* file, void* memory, int size)
395  : file_(file), memory_(memory), size_(size) { }
396  virtual ~PosixMemoryMappedFile();
397  virtual void* memory() { return memory_; }
398  virtual int size() { return size_; }
399  private:
400  FILE* file_;
401  void* memory_;
402  int size_;
403 };
404 
405 
406 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
407  FILE* file = fopen(name, "r+");
408  if (file == NULL) return NULL;
409 
410  fseek(file, 0, SEEK_END);
411  int size = ftell(file);
412 
413  void* memory =
414  mmap(OS::GetRandomMmapAddr(),
415  size,
416  PROT_READ | PROT_WRITE,
417  MAP_SHARED,
418  fileno(file),
419  0);
420  return new PosixMemoryMappedFile(file, memory, size);
421 }
422 
423 
424 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
425  void* initial) {
426  FILE* file = fopen(name, "w+");
427  if (file == NULL) return NULL;
428  int result = fwrite(initial, size, 1, file);
429  if (result < 1) {
430  fclose(file);
431  return NULL;
432  }
433  void* memory =
434  mmap(OS::GetRandomMmapAddr(),
435  size,
436  PROT_READ | PROT_WRITE,
437  MAP_SHARED,
438  fileno(file),
439  0);
440  return new PosixMemoryMappedFile(file, memory, size);
441 }
442 
443 
445  if (memory_) OS::Free(memory_, size_);
446  fclose(file_);
447 }
448 
449 
451  // This function assumes that the layout of the file is as follows:
452  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
453  // If we encounter an unexpected situation we abort scanning further entries.
454  FILE* fp = fopen("/proc/self/maps", "r");
455  if (fp == NULL) return;
456 
457  // Allocate enough room to be able to store a full file name.
458  const int kLibNameLen = FILENAME_MAX + 1;
459  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
460 
461  i::Isolate* isolate = ISOLATE;
462  // This loop will terminate once the scanning hits an EOF.
463  while (true) {
464  uintptr_t start, end;
465  char attr_r, attr_w, attr_x, attr_p;
466  // Parse the addresses and permission bits at the beginning of the line.
467  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
468  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
469 
470  int c;
471  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
472  // Found a read-only executable entry. Skip characters until we reach
473  // the beginning of the filename or the end of the line.
474  do {
475  c = getc(fp);
476  } while ((c != EOF) && (c != '\n') && (c != '/'));
477  if (c == EOF) break; // EOF: Was unexpected, just exit.
478 
479  // Process the filename if found.
480  if (c == '/') {
481  ungetc(c, fp); // Push the '/' back into the stream to be read below.
482 
483  // Read to the end of the line. Exit if the read fails.
484  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
485 
486  // Drop the newline character read by fgets. We do not need to check
487  // for a zero-length string because we know that we at least read the
488  // '/' character.
489  lib_name[strlen(lib_name) - 1] = '\0';
490  } else {
491  // No library name found, just record the raw address range.
492  snprintf(lib_name, kLibNameLen,
493  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
494  }
495  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
496  } else {
497  // Entry not describing executable data. Skip to end of line to set up
498  // reading the next entry.
499  do {
500  c = getc(fp);
501  } while ((c != EOF) && (c != '\n'));
502  if (c == EOF) break;
503  }
504  }
505  free(lib_name);
506  fclose(fp);
507 }
508 
509 
510 static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
511 
512 
513 void OS::SignalCodeMovingGC() {
514  // Support for ll_prof.py.
515  //
516  // The Linux profiler built into the kernel logs all mmap's with
517  // PROT_EXEC so that analysis tools can properly attribute ticks. We
518  // do a mmap with a name known by ll_prof.py and immediately munmap
519  // it. This injects a GC marker into the stream of events generated
520  // by the kernel and allows us to synchronize V8 code log and the
521  // kernel log.
522  int size = sysconf(_SC_PAGESIZE);
523  FILE* f = fopen(kGCFakeMmap, "w+");
524  void* addr = mmap(OS::GetRandomMmapAddr(),
525  size,
526  PROT_READ | PROT_EXEC,
527  MAP_PRIVATE,
528  fileno(f),
529  0);
530  ASSERT(addr != MAP_FAILED);
531  OS::Free(addr, size);
532  fclose(f);
533 }
534 
535 
536 int OS::StackWalk(Vector<OS::StackFrame> frames) {
537  // backtrace is a glibc extension.
538 #if defined(__GLIBC__) && !defined(__UCLIBC__)
539  int frames_size = frames.length();
540  ScopedVector<void*> addresses(frames_size);
541 
542  int frames_count = backtrace(addresses.start(), frames_size);
543 
544  char** symbols = backtrace_symbols(addresses.start(), frames_count);
545  if (symbols == NULL) {
546  return kStackWalkError;
547  }
548 
549  for (int i = 0; i < frames_count; i++) {
550  frames[i].address = addresses[i];
551  // Format a text representation of the frame based on the information
552  // available.
554  "%s",
555  symbols[i]);
556  // Make sure line termination is in place.
557  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
558  }
559 
560  free(symbols);
561 
562  return frames_count;
563 #else // defined(__GLIBC__) && !defined(__UCLIBC__)
564  return 0;
565 #endif // defined(__GLIBC__) && !defined(__UCLIBC__)
566 }
567 
568 
569 // Constants used for mmap.
570 static const int kMmapFd = -1;
571 static const int kMmapFdOffset = 0;
572 
573 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
574 
575 VirtualMemory::VirtualMemory(size_t size) {
576  address_ = ReserveRegion(size);
577  size_ = size;
578 }
579 
580 
581 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
582  : address_(NULL), size_(0) {
583  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
584  size_t request_size = RoundUp(size + alignment,
585  static_cast<intptr_t>(OS::AllocateAlignment()));
586  void* reservation = mmap(OS::GetRandomMmapAddr(),
587  request_size,
588  PROT_NONE,
589  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
590  kMmapFd,
591  kMmapFdOffset);
592  if (reservation == MAP_FAILED) return;
593 
594  Address base = static_cast<Address>(reservation);
595  Address aligned_base = RoundUp(base, alignment);
596  ASSERT_LE(base, aligned_base);
597 
598  // Unmap extra memory reserved before and after the desired block.
599  if (aligned_base != base) {
600  size_t prefix_size = static_cast<size_t>(aligned_base - base);
601  OS::Free(base, prefix_size);
602  request_size -= prefix_size;
603  }
604 
605  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
606  ASSERT_LE(aligned_size, request_size);
607 
608  if (aligned_size != request_size) {
609  size_t suffix_size = request_size - aligned_size;
610  OS::Free(aligned_base + aligned_size, suffix_size);
611  request_size -= suffix_size;
612  }
613 
614  ASSERT(aligned_size == request_size);
615 
616  address_ = static_cast<void*>(aligned_base);
617  size_ = aligned_size;
618 }
619 
620 
622  if (IsReserved()) {
623  bool result = ReleaseRegion(address(), size());
624  ASSERT(result);
625  USE(result);
626  }
627 }
628 
629 
631  return address_ != NULL;
632 }
633 
634 
635 void VirtualMemory::Reset() {
636  address_ = NULL;
637  size_ = 0;
638 }
639 
640 
641 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
642  return CommitRegion(address, size, is_executable);
643 }
644 
645 
646 bool VirtualMemory::Uncommit(void* address, size_t size) {
647  return UncommitRegion(address, size);
648 }
649 
650 
651 bool VirtualMemory::Guard(void* address) {
652  OS::Guard(address, OS::CommitPageSize());
653  return true;
654 }
655 
656 
657 void* VirtualMemory::ReserveRegion(size_t size) {
658  void* result = mmap(OS::GetRandomMmapAddr(),
659  size,
660  PROT_NONE,
661  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
662  kMmapFd,
663  kMmapFdOffset);
664 
665  if (result == MAP_FAILED) return NULL;
666 
667  return result;
668 }
669 
670 
671 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
672  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
673  if (MAP_FAILED == mmap(base,
674  size,
675  prot,
676  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
677  kMmapFd,
678  kMmapFdOffset)) {
679  return false;
680  }
681 
682  UpdateAllocatedSpaceLimits(base, size);
683  return true;
684 }
685 
686 
687 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
688  return mmap(base,
689  size,
690  PROT_NONE,
691  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
692  kMmapFd,
693  kMmapFdOffset) != MAP_FAILED;
694 }
695 
696 
697 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
698  return munmap(base, size) == 0;
699 }
700 
701 
702 class Thread::PlatformData : public Malloced {
703  public:
704  PlatformData() : thread_(kNoThread) {}
705 
706  pthread_t thread_; // Thread handle for pthread.
707 };
708 
709 Thread::Thread(const Options& options)
710  : data_(new PlatformData()),
711  stack_size_(options.stack_size()) {
712  set_name(options.name());
713 }
714 
715 
716 Thread::~Thread() {
717  delete data_;
718 }
719 
720 
721 static void* ThreadEntry(void* arg) {
722  Thread* thread = reinterpret_cast<Thread*>(arg);
723  // This is also initialized by the first argument to pthread_create() but we
724  // don't know which thread will run first (the original thread or the new
725  // one) so we initialize it here too.
726 #ifdef PR_SET_NAME
727  prctl(PR_SET_NAME,
728  reinterpret_cast<unsigned long>(thread->name()), // NOLINT
729  0, 0, 0);
730 #endif
731  thread->data()->thread_ = pthread_self();
732  ASSERT(thread->data()->thread_ != kNoThread);
733  thread->Run();
734  return NULL;
735 }
736 
737 
738 void Thread::set_name(const char* name) {
739  strncpy(name_, name, sizeof(name_));
740  name_[sizeof(name_) - 1] = '\0';
741 }
742 
743 
744 void Thread::Start() {
745  pthread_attr_t* attr_ptr = NULL;
746  pthread_attr_t attr;
747  if (stack_size_ > 0) {
748  pthread_attr_init(&attr);
749  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
750  attr_ptr = &attr;
751  }
752  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
753  CHECK_EQ(0, result);
754  ASSERT(data_->thread_ != kNoThread);
755 }
756 
757 
758 void Thread::Join() {
759  pthread_join(data_->thread_, NULL);
760 }
761 
762 
764  pthread_key_t key;
765  int result = pthread_key_create(&key, NULL);
766  USE(result);
767  ASSERT(result == 0);
768  return static_cast<LocalStorageKey>(key);
769 }
770 
771 
773  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
774  int result = pthread_key_delete(pthread_key);
775  USE(result);
776  ASSERT(result == 0);
777 }
778 
779 
781  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
782  return pthread_getspecific(pthread_key);
783 }
784 
785 
786 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
787  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
788  pthread_setspecific(pthread_key, value);
789 }
790 
791 
792 void Thread::YieldCPU() {
793  sched_yield();
794 }
795 
796 
797 class LinuxMutex : public Mutex {
798  public:
800  pthread_mutexattr_t attrs;
801  int result = pthread_mutexattr_init(&attrs);
802  ASSERT(result == 0);
803  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
804  ASSERT(result == 0);
805  result = pthread_mutex_init(&mutex_, &attrs);
806  ASSERT(result == 0);
807  USE(result);
808  }
809 
810  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
811 
812  virtual int Lock() {
813  int result = pthread_mutex_lock(&mutex_);
814  return result;
815  }
816 
817  virtual int Unlock() {
818  int result = pthread_mutex_unlock(&mutex_);
819  return result;
820  }
821 
822  virtual bool TryLock() {
823  int result = pthread_mutex_trylock(&mutex_);
824  // Return false if the lock is busy and locking failed.
825  if (result == EBUSY) {
826  return false;
827  }
828  ASSERT(result == 0); // Verify no other errors.
829  return true;
830  }
831 
832  private:
833  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
834 };
835 
836 
837 Mutex* OS::CreateMutex() {
838  return new LinuxMutex();
839 }
840 
841 
842 class LinuxSemaphore : public Semaphore {
843  public:
844  explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
845  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
846 
847  virtual void Wait();
848  virtual bool Wait(int timeout);
849  virtual void Signal() { sem_post(&sem_); }
850  private:
851  sem_t sem_;
852 };
853 
854 
856  while (true) {
857  int result = sem_wait(&sem_);
858  if (result == 0) return; // Successfully got semaphore.
859  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
860  }
861 }
862 
863 
864 #ifndef TIMEVAL_TO_TIMESPEC
865 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
866  (ts)->tv_sec = (tv)->tv_sec; \
867  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
868 } while (false)
869 #endif
870 
871 
872 bool LinuxSemaphore::Wait(int timeout) {
873  const long kOneSecondMicros = 1000000; // NOLINT
874 
875  // Split timeout into second and nanosecond parts.
876  struct timeval delta;
877  delta.tv_usec = timeout % kOneSecondMicros;
878  delta.tv_sec = timeout / kOneSecondMicros;
879 
880  struct timeval current_time;
881  // Get the current time.
882  if (gettimeofday(&current_time, NULL) == -1) {
883  return false;
884  }
885 
886  // Calculate time for end of timeout.
887  struct timeval end_time;
888  timeradd(&current_time, &delta, &end_time);
889 
890  struct timespec ts;
891  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
892  // Wait for semaphore signalled or timeout.
893  while (true) {
894  int result = sem_timedwait(&sem_, &ts);
895  if (result == 0) return true; // Successfully got semaphore.
896  if (result > 0) {
897  // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
898  errno = result;
899  result = -1;
900  }
901  if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
902  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
903  }
904 }
905 
906 
907 Semaphore* OS::CreateSemaphore(int count) {
908  return new LinuxSemaphore(count);
909 }
910 
911 
912 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
913 // Android runs a fairly new Linux kernel, so signal info is there,
914 // but the C library doesn't have the structs defined.
915 
916 struct sigcontext {
917  uint32_t trap_no;
918  uint32_t error_code;
919  uint32_t oldmask;
920  uint32_t gregs[16];
921  uint32_t arm_cpsr;
922  uint32_t fault_address;
923 };
924 typedef uint32_t __sigset_t;
925 typedef struct sigcontext mcontext_t;
926 typedef struct ucontext {
927  uint32_t uc_flags;
928  struct ucontext* uc_link;
929  stack_t uc_stack;
930  mcontext_t uc_mcontext;
931  __sigset_t uc_sigmask;
932 } ucontext_t;
933 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
934 
935 #elif !defined(__GLIBC__) && defined(__mips__)
936 // MIPS version of sigcontext, for Android bionic.
937 struct sigcontext {
938  uint32_t regmask;
939  uint32_t status;
940  uint64_t pc;
941  uint64_t gregs[32];
942  uint64_t fpregs[32];
943  uint32_t acx;
944  uint32_t fpc_csr;
945  uint32_t fpc_eir;
946  uint32_t used_math;
947  uint32_t dsp;
948  uint64_t mdhi;
949  uint64_t mdlo;
950  uint32_t hi1;
951  uint32_t lo1;
952  uint32_t hi2;
953  uint32_t lo2;
954  uint32_t hi3;
955  uint32_t lo3;
956 };
957 typedef uint32_t __sigset_t;
958 typedef struct sigcontext mcontext_t;
959 typedef struct ucontext {
960  uint32_t uc_flags;
961  struct ucontext* uc_link;
962  stack_t uc_stack;
963  mcontext_t uc_mcontext;
964  __sigset_t uc_sigmask;
965 } ucontext_t;
966 
967 #elif !defined(__GLIBC__) && defined(__i386__)
968 // x86 version for Android.
969 struct sigcontext {
970  uint32_t gregs[19];
971  void* fpregs;
972  uint32_t oldmask;
973  uint32_t cr2;
974 };
975 
976 typedef uint32_t __sigset_t;
977 typedef struct sigcontext mcontext_t;
978 typedef struct ucontext {
979  uint32_t uc_flags;
980  struct ucontext* uc_link;
981  stack_t uc_stack;
982  mcontext_t uc_mcontext;
983  __sigset_t uc_sigmask;
984 } ucontext_t;
985 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
986 #endif
987 
988 
989 static int GetThreadID() {
990  // Glibc doesn't provide a wrapper for gettid(2).
991 #if defined(ANDROID)
992  return syscall(__NR_gettid);
993 #else
994  return syscall(SYS_gettid);
995 #endif
996 }
997 
998 
999 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
1000  USE(info);
1001  if (signal != SIGPROF) return;
1002  Isolate* isolate = Isolate::UncheckedCurrent();
1003  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
1004  // We require a fully initialized and entered isolate.
1005  return;
1006  }
1007  if (v8::Locker::IsActive() &&
1008  !isolate->thread_manager()->IsLockedByCurrentThread()) {
1009  return;
1010  }
1011 
1012  Sampler* sampler = isolate->logger()->sampler();
1013  if (sampler == NULL || !sampler->IsActive()) return;
1014 
1015  TickSample sample_obj;
1016  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
1017  if (sample == NULL) sample = &sample_obj;
1018 
1019  // Extracting the sample from the context is extremely machine dependent.
1020  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
1021  mcontext_t& mcontext = ucontext->uc_mcontext;
1022  sample->state = isolate->current_vm_state();
1023 #if V8_HOST_ARCH_IA32
1024  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
1025  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
1026  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
1027 #elif V8_HOST_ARCH_X64
1028  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
1029  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
1030  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
1031 #elif V8_HOST_ARCH_ARM
1032 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
1033 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1034  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
1035  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
1036  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
1037 #else
1038  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
1039  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
1040  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
1041 #endif // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1042 #elif V8_HOST_ARCH_MIPS
1043  sample->pc = reinterpret_cast<Address>(mcontext.pc);
1044  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
1045  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
1046 #endif // V8_HOST_ARCH_*
1047  sampler->SampleStack(sample);
1048  sampler->Tick(sample);
1049 }
1050 
1051 
1052 class Sampler::PlatformData : public Malloced {
1053  public:
1054  PlatformData() : vm_tid_(GetThreadID()) {}
1055 
1056  int vm_tid() const { return vm_tid_; }
1057 
1058  private:
1059  const int vm_tid_;
1060 };
1061 
1062 
1063 class SignalSender : public Thread {
1064  public:
1066  HALF_INTERVAL,
1067  FULL_INTERVAL
1068  };
1069 
1070  static const int kSignalSenderStackSize = 64 * KB;
1071 
1072  explicit SignalSender(int interval)
1073  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
1074  vm_tgid_(getpid()),
1075  interval_(interval) {}
1076 
1077  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
1078  static void TearDown() { delete mutex_; }
1079 
1080  static void InstallSignalHandler() {
1081  struct sigaction sa;
1082  sa.sa_sigaction = ProfilerSignalHandler;
1083  sigemptyset(&sa.sa_mask);
1084  sa.sa_flags = SA_RESTART | SA_SIGINFO;
1085  signal_handler_installed_ =
1086  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
1087  }
1088 
1089  static void RestoreSignalHandler() {
1090  if (signal_handler_installed_) {
1091  sigaction(SIGPROF, &old_signal_handler_, 0);
1092  signal_handler_installed_ = false;
1093  }
1094  }
1095 
1096  static void AddActiveSampler(Sampler* sampler) {
1097  ScopedLock lock(mutex_);
1099  if (instance_ == NULL) {
1100  // Start a thread that will send SIGPROF signal to VM threads,
1101  // when CPU profiling will be enabled.
1102  instance_ = new SignalSender(sampler->interval());
1103  instance_->Start();
1104  } else {
1105  ASSERT(instance_->interval_ == sampler->interval());
1106  }
1107  }
1108 
1109  static void RemoveActiveSampler(Sampler* sampler) {
1110  ScopedLock lock(mutex_);
1114  delete instance_;
1115  instance_ = NULL;
1116  RestoreSignalHandler();
1117  }
1118  }
1119 
1120  // Implement Thread::Run().
1121  virtual void Run() {
1122  SamplerRegistry::State state;
1123  while ((state = SamplerRegistry::GetState()) !=
1125  bool cpu_profiling_enabled =
1127  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
1128  if (cpu_profiling_enabled && !signal_handler_installed_) {
1129  InstallSignalHandler();
1130  } else if (!cpu_profiling_enabled && signal_handler_installed_) {
1131  RestoreSignalHandler();
1132  }
1133  // When CPU profiling is enabled both JavaScript and C++ code is
1134  // profiled. We must not suspend.
1135  if (!cpu_profiling_enabled) {
1136  if (rate_limiter_.SuspendIfNecessary()) continue;
1137  }
1138  if (cpu_profiling_enabled && runtime_profiler_enabled) {
1139  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
1140  return;
1141  }
1142  Sleep(HALF_INTERVAL);
1143  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
1144  return;
1145  }
1146  Sleep(HALF_INTERVAL);
1147  } else {
1148  if (cpu_profiling_enabled) {
1149  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
1150  this)) {
1151  return;
1152  }
1153  }
1154  if (runtime_profiler_enabled) {
1155  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
1156  NULL)) {
1157  return;
1158  }
1159  }
1160  Sleep(FULL_INTERVAL);
1161  }
1162  }
1163  }
1164 
1165  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
1166  if (!sampler->IsProfiling()) return;
1167  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
1168  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
1169  }
1170 
1171  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
1172  if (!sampler->isolate()->IsInitialized()) return;
1173  sampler->isolate()->runtime_profiler()->NotifyTick();
1174  }
1175 
1176  void SendProfilingSignal(int tid) {
1177  if (!signal_handler_installed_) return;
1178  // Glibc doesn't provide a wrapper for tgkill(2).
1179 #if defined(ANDROID)
1180  syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
1181 #else
1182  syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
1183 #endif
1184  }
1185 
1186  void Sleep(SleepInterval full_or_half) {
1187  // Convert ms to us and subtract 100 us to compensate delays
1188  // occuring during signal delivery.
1189  useconds_t interval = interval_ * 1000 - 100;
1190  if (full_or_half == HALF_INTERVAL) interval /= 2;
1191 #if defined(ANDROID)
1192  usleep(interval);
1193 #else
1194  int result = usleep(interval);
1195 #ifdef DEBUG
1196  if (result != 0 && errno != EINTR) {
1197  fprintf(stderr,
1198  "SignalSender usleep error; interval = %u, errno = %d\n",
1199  interval,
1200  errno);
1201  ASSERT(result == 0 || errno == EINTR);
1202  }
1203 #endif // DEBUG
1204  USE(result);
1205 #endif // ANDROID
1206  }
1207 
1208  const int vm_tgid_;
1209  const int interval_;
1210  RuntimeProfilerRateLimiter rate_limiter_;
1211 
1212  // Protects the process wide state below.
1213  static Mutex* mutex_;
1214  static SignalSender* instance_;
1215  static bool signal_handler_installed_;
1216  static struct sigaction old_signal_handler_;
1217 
1218  private:
1220 };
1221 
1222 
1225 struct sigaction SignalSender::old_signal_handler_;
1227 
1228 
1229 void OS::SetUp() {
1230  // Seed the random number generator. We preserve microsecond resolution.
1231  uint64_t seed = Ticks() ^ (getpid() << 16);
1232  srandom(static_cast<unsigned int>(seed));
1233  limit_mutex = CreateMutex();
1234 
1235 #ifdef __arm__
1236  // When running on ARM hardware check that the EABI used by V8 and
1237  // by the C code is the same.
1238  bool hard_float = OS::ArmUsingHardFloat();
1239  if (hard_float) {
1240 #if !USE_EABI_HARDFLOAT
1241  PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
1242  "-DUSE_EABI_HARDFLOAT\n");
1243  exit(1);
1244 #endif
1245  } else {
1246 #if USE_EABI_HARDFLOAT
1247  PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
1248  "-DUSE_EABI_HARDFLOAT\n");
1249  exit(1);
1250 #endif
1251  }
1252 #endif
1254 }
1255 
1256 
1257 void OS::TearDown() {
1259  delete limit_mutex;
1260 }
1261 
1262 
1263 Sampler::Sampler(Isolate* isolate, int interval)
1264  : isolate_(isolate),
1265  interval_(interval),
1266  profiling_(false),
1267  active_(false),
1268  samples_taken_(0) {
1269  data_ = new PlatformData;
1270 }
1271 
1272 
1274  ASSERT(!IsActive());
1275  delete data_;
1276 }
1277 
1278 
1279 void Sampler::Start() {
1280  ASSERT(!IsActive());
1281  SetActive(true);
1283 }
1284 
1285 
1286 void Sampler::Stop() {
1287  ASSERT(IsActive());
1289  SetActive(false);
1290 }
1291 
1292 
1293 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static void * GetThreadLocal(LocalStorageKey key)
#define CHECK_EQ(expected, value)
Definition: checks.h:219
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:204
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
void PrintF(const char *format,...)
Definition: v8utils.cc:40
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:745
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:221
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:734
Isolate * isolate()
Definition: platform.h:736
static void SignalCodeMovingGC()
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
Definition: flags.cc:1349
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:529
static const int kStackWalkError
Definition: platform.h:223
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:225
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:712
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1783
unsigned int seed
Definition: test-strings.cc:17
#define timeradd(a, b, result)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
#define UNREACHABLE()
Definition: checks.h:50
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:811
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:729
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:321
intptr_t AtomicWord
Definition: atomicops.h:72
const Register pc
static void Guard(void *address, const size_t size)
static bool ArmCpuHasFeature(CpuFeature feature)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static bool MipsCpuHasFeature(CpuFeature feature)
static void DebugBreak()
int backtrace(void **, int) __attribute__((weak_import))
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1410
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
const CRegister cr2
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1762
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
static void SetUp()
void USE(T)
Definition: globals.h:303
static int ActivationFrameAlignment()
static size_t AllocateAlignment()
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1771
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1745
const Register fp
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
#define FUNCTION_ADDR(f)
Definition: globals.h:307
static bool ArmUsingHardFloat()
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)