v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-linux.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform specific code for Linux goes here. For the POSIX comaptible parts
29 // the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/prctl.h>
35 #include <sys/time.h>
36 #include <sys/resource.h>
37 #include <sys/syscall.h>
38 #include <sys/types.h>
39 #include <stdlib.h>
40 
41 // Ubuntu Dapper requires memory pages to be marked as
42 // executable. Otherwise, OS raises an exception when executing code
43 // in that page.
44 #include <sys/types.h> // mmap & munmap
45 #include <sys/mman.h> // mmap & munmap
46 #include <sys/stat.h> // open
47 #include <fcntl.h> // open
48 #include <unistd.h> // sysconf
49 #if defined(__GLIBC__) && !defined(__UCLIBC__)
50 #include <execinfo.h> // backtrace, backtrace_symbols
51 #endif // defined(__GLIBC__) && !defined(__UCLIBC__)
52 #include <strings.h> // index
53 #include <errno.h>
54 #include <stdarg.h>
55 
56 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
57 // Old versions of the C library <signal.h> didn't define the type.
58 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
59  defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
60 #include <asm/sigcontext.h>
61 #endif
62 
63 #undef MAP_TYPE
64 
65 #include "v8.h"
66 
67 #include "platform-posix.h"
68 #include "platform.h"
69 #include "v8threads.h"
70 #include "vm-state-inl.h"
71 
72 
73 namespace v8 {
74 namespace internal {
75 
76 // 0 is never a valid thread id on Linux since tids and pids share a
77 // name space and pid 0 is reserved (see man 2 kill).
78 static const pthread_t kNoThread = (pthread_t) 0;
79 
80 
81 double ceiling(double x) {
82  return ceil(x);
83 }
84 
85 
86 static Mutex* limit_mutex = NULL;
87 
88 
89 void OS::PostSetUp() {
91 }
92 
93 
95  return 0; // Linux runs on anything.
96 }
97 
98 
99 #ifdef __arm__
100 static bool CPUInfoContainsString(const char * search_string) {
101  const char* file_name = "/proc/cpuinfo";
102  // This is written as a straight shot one pass parser
103  // and not using STL string and ifstream because,
104  // on Linux, it's reading from a (non-mmap-able)
105  // character special device.
106  FILE* f = NULL;
107  const char* what = search_string;
108 
109  if (NULL == (f = fopen(file_name, "r")))
110  return false;
111 
112  int k;
113  while (EOF != (k = fgetc(f))) {
114  if (k == *what) {
115  ++what;
116  while ((*what != '\0') && (*what == fgetc(f))) {
117  ++what;
118  }
119  if (*what == '\0') {
120  fclose(f);
121  return true;
122  } else {
123  what = search_string;
124  }
125  }
126  }
127  fclose(f);
128 
129  // Did not find string in the proc file.
130  return false;
131 }
132 
133 
134 bool OS::ArmCpuHasFeature(CpuFeature feature) {
135  const char* search_string = NULL;
136  // Simple detection of VFP at runtime for Linux.
137  // It is based on /proc/cpuinfo, which reveals hardware configuration
138  // to user-space applications. According to ARM (mid 2009), no similar
139  // facility is universally available on the ARM architectures,
140  // so it's up to individual OSes to provide such.
141  switch (feature) {
142  case VFP2:
143  search_string = "vfp";
144  break;
145  case VFP3:
146  search_string = "vfpv3";
147  break;
148  case ARMv7:
149  search_string = "ARMv7";
150  break;
151  case SUDIV:
152  search_string = "idiva";
153  break;
154  default:
155  UNREACHABLE();
156  }
157 
158  if (CPUInfoContainsString(search_string)) {
159  return true;
160  }
161 
162  if (feature == VFP3) {
163  // Some old kernels will report vfp not vfpv3. Here we make a last attempt
164  // to detect vfpv3 by checking for vfp *and* neon, since neon is only
165  // available on architectures with vfpv3.
166  // Checking neon on its own is not enough as it is possible to have neon
167  // without vfp.
168  if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
169  return true;
170  }
171  }
172 
173  return false;
174 }
175 
176 
178  static bool use_cached_value = false;
179  static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
180  if (use_cached_value) {
181  return cached_value;
182  }
183  if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
184  cached_value = ARM_IMPLEMENTER;
185  } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
186  cached_value = QUALCOMM_IMPLEMENTER;
187  } else {
188  cached_value = UNKNOWN_IMPLEMENTER;
189  }
190  use_cached_value = true;
191  return cached_value;
192 }
193 
194 
195 bool OS::ArmUsingHardFloat() {
196  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
197  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
198  // We use these as well as a couple of other defines to statically determine
199  // what FP ABI used.
200  // GCC versions 4.4 and below don't support hard-fp.
201  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
202  // __ARM_PCS_VFP.
203 
204 #define GCC_VERSION (__GNUC__ * 10000 \
205  + __GNUC_MINOR__ * 100 \
206  + __GNUC_PATCHLEVEL__)
207 #if GCC_VERSION >= 40600
208 #if defined(__ARM_PCS_VFP)
209  return true;
210 #else
211  return false;
212 #endif
213 
214 #elif GCC_VERSION < 40500
215  return false;
216 
217 #else
218 #if defined(__ARM_PCS_VFP)
219  return true;
220 #elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
221  return false;
222 #else
223 #error "Your version of GCC does not report the FP ABI compiled for." \
224  "Please report it on this issue" \
225  "http://code.google.com/p/v8/issues/detail?id=2140"
226 
227 #endif
228 #endif
229 #undef GCC_VERSION
230 }
231 
232 #endif // def __arm__
233 
234 
235 #ifdef __mips__
236 bool OS::MipsCpuHasFeature(CpuFeature feature) {
237  const char* search_string = NULL;
238  const char* file_name = "/proc/cpuinfo";
239  // Simple detection of FPU at runtime for Linux.
240  // It is based on /proc/cpuinfo, which reveals hardware configuration
241  // to user-space applications. According to MIPS (early 2010), no similar
242  // facility is universally available on the MIPS architectures,
243  // so it's up to individual OSes to provide such.
244  //
245  // This is written as a straight shot one pass parser
246  // and not using STL string and ifstream because,
247  // on Linux, it's reading from a (non-mmap-able)
248  // character special device.
249 
250  switch (feature) {
251  case FPU:
252  search_string = "FPU";
253  break;
254  default:
255  UNREACHABLE();
256  }
257 
258  FILE* f = NULL;
259  const char* what = search_string;
260 
261  if (NULL == (f = fopen(file_name, "r")))
262  return false;
263 
264  int k;
265  while (EOF != (k = fgetc(f))) {
266  if (k == *what) {
267  ++what;
268  while ((*what != '\0') && (*what == fgetc(f))) {
269  ++what;
270  }
271  if (*what == '\0') {
272  fclose(f);
273  return true;
274  } else {
275  what = search_string;
276  }
277  }
278  }
279  fclose(f);
280 
281  // Did not find string in the proc file.
282  return false;
283 }
284 #endif // def __mips__
285 
286 
288 #ifdef V8_TARGET_ARCH_ARM
289  // On EABI ARM targets this is required for fp correctness in the
290  // runtime system.
291  return 8;
292 #elif V8_TARGET_ARCH_MIPS
293  return 8;
294 #endif
295  // With gcc 4.4 the tree vectorization optimizer can generate code
296  // that requires 16 byte alignment such as movdqa on x86.
297  return 16;
298 }
299 
300 
301 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
302 #if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
303  (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
304  // Only use on ARM or MIPS hardware.
305  MemoryBarrier();
306 #else
307  __asm__ __volatile__("" : : : "memory");
308  // An x86 store acts as a release barrier.
309 #endif
310  *ptr = value;
311 }
312 
313 
314 const char* OS::LocalTimezone(double time) {
315  if (isnan(time)) return "";
316  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
317  struct tm* t = localtime(&tv);
318  if (NULL == t) return "";
319  return t->tm_zone;
320 }
321 
322 
323 double OS::LocalTimeOffset() {
324  time_t tv = time(NULL);
325  struct tm* t = localtime(&tv);
326  // tm_gmtoff includes any daylight savings offset, so subtract it.
327  return static_cast<double>(t->tm_gmtoff * msPerSecond -
328  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
329 }
330 
331 
332 // We keep the lowest and highest addresses mapped as a quick way of
333 // determining that pointers are outside the heap (used mostly in assertions
334 // and verification). The estimate is conservative, i.e., not all addresses in
335 // 'allocated' space are actually allocated to our heap. The range is
336 // [lowest, highest), inclusive on the low and and exclusive on the high end.
337 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
338 static void* highest_ever_allocated = reinterpret_cast<void*>(0);
339 
340 
341 static void UpdateAllocatedSpaceLimits(void* address, int size) {
342  ASSERT(limit_mutex != NULL);
343  ScopedLock lock(limit_mutex);
344 
345  lowest_ever_allocated = Min(lowest_ever_allocated, address);
346  highest_ever_allocated =
347  Max(highest_ever_allocated,
348  reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
349 }
350 
351 
352 bool OS::IsOutsideAllocatedSpace(void* address) {
353  return address < lowest_ever_allocated || address >= highest_ever_allocated;
354 }
355 
356 
357 size_t OS::AllocateAlignment() {
358  return sysconf(_SC_PAGESIZE);
359 }
360 
361 
362 void* OS::Allocate(const size_t requested,
363  size_t* allocated,
364  bool is_executable) {
365  const size_t msize = RoundUp(requested, AllocateAlignment());
366  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
367  void* addr = OS::GetRandomMmapAddr();
368  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
369  if (mbase == MAP_FAILED) {
370  LOG(i::Isolate::Current(),
371  StringEvent("OS::Allocate", "mmap failed"));
372  return NULL;
373  }
374  *allocated = msize;
375  UpdateAllocatedSpaceLimits(mbase, msize);
376  return mbase;
377 }
378 
379 
380 void OS::Free(void* address, const size_t size) {
381  // TODO(1240712): munmap has a return value which is ignored here.
382  int result = munmap(address, size);
383  USE(result);
384  ASSERT(result == 0);
385 }
386 
387 
388 void OS::Sleep(int milliseconds) {
389  unsigned int ms = static_cast<unsigned int>(milliseconds);
390  usleep(1000 * ms);
391 }
392 
393 
394 void OS::Abort() {
395  // Redirect to std abort to signal abnormal program termination.
396  if (FLAG_break_on_abort) {
397  DebugBreak();
398  }
399  abort();
400 }
401 
402 
403 void OS::DebugBreak() {
404 // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
405 // which is the architecture of generated code).
406 #if (defined(__arm__) || defined(__thumb__))
407 # if defined(CAN_USE_ARMV5_INSTRUCTIONS)
408  asm("bkpt 0");
409 # endif
410 #elif defined(__mips__)
411  asm("break");
412 #else
413  asm("int $3");
414 #endif
415 }
416 
417 
418 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
419  public:
420  PosixMemoryMappedFile(FILE* file, void* memory, int size)
421  : file_(file), memory_(memory), size_(size) { }
422  virtual ~PosixMemoryMappedFile();
423  virtual void* memory() { return memory_; }
424  virtual int size() { return size_; }
425  private:
426  FILE* file_;
427  void* memory_;
428  int size_;
429 };
430 
431 
432 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
433  FILE* file = fopen(name, "r+");
434  if (file == NULL) return NULL;
435 
436  fseek(file, 0, SEEK_END);
437  int size = ftell(file);
438 
439  void* memory =
440  mmap(OS::GetRandomMmapAddr(),
441  size,
442  PROT_READ | PROT_WRITE,
443  MAP_SHARED,
444  fileno(file),
445  0);
446  return new PosixMemoryMappedFile(file, memory, size);
447 }
448 
449 
450 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
451  void* initial) {
452  FILE* file = fopen(name, "w+");
453  if (file == NULL) return NULL;
454  int result = fwrite(initial, size, 1, file);
455  if (result < 1) {
456  fclose(file);
457  return NULL;
458  }
459  void* memory =
460  mmap(OS::GetRandomMmapAddr(),
461  size,
462  PROT_READ | PROT_WRITE,
463  MAP_SHARED,
464  fileno(file),
465  0);
466  return new PosixMemoryMappedFile(file, memory, size);
467 }
468 
469 
471  if (memory_) OS::Free(memory_, size_);
472  fclose(file_);
473 }
474 
475 
477  // This function assumes that the layout of the file is as follows:
478  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
479  // If we encounter an unexpected situation we abort scanning further entries.
480  FILE* fp = fopen("/proc/self/maps", "r");
481  if (fp == NULL) return;
482 
483  // Allocate enough room to be able to store a full file name.
484  const int kLibNameLen = FILENAME_MAX + 1;
485  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
486 
487  i::Isolate* isolate = ISOLATE;
488  // This loop will terminate once the scanning hits an EOF.
489  while (true) {
490  uintptr_t start, end;
491  char attr_r, attr_w, attr_x, attr_p;
492  // Parse the addresses and permission bits at the beginning of the line.
493  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
494  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
495 
496  int c;
497  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
498  // Found a read-only executable entry. Skip characters until we reach
499  // the beginning of the filename or the end of the line.
500  do {
501  c = getc(fp);
502  } while ((c != EOF) && (c != '\n') && (c != '/'));
503  if (c == EOF) break; // EOF: Was unexpected, just exit.
504 
505  // Process the filename if found.
506  if (c == '/') {
507  ungetc(c, fp); // Push the '/' back into the stream to be read below.
508 
509  // Read to the end of the line. Exit if the read fails.
510  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
511 
512  // Drop the newline character read by fgets. We do not need to check
513  // for a zero-length string because we know that we at least read the
514  // '/' character.
515  lib_name[strlen(lib_name) - 1] = '\0';
516  } else {
517  // No library name found, just record the raw address range.
518  snprintf(lib_name, kLibNameLen,
519  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
520  }
521  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
522  } else {
523  // Entry not describing executable data. Skip to end of line to set up
524  // reading the next entry.
525  do {
526  c = getc(fp);
527  } while ((c != EOF) && (c != '\n'));
528  if (c == EOF) break;
529  }
530  }
531  free(lib_name);
532  fclose(fp);
533 }
534 
535 
536 void OS::SignalCodeMovingGC() {
537  // Support for ll_prof.py.
538  //
539  // The Linux profiler built into the kernel logs all mmap's with
540  // PROT_EXEC so that analysis tools can properly attribute ticks. We
541  // do a mmap with a name known by ll_prof.py and immediately munmap
542  // it. This injects a GC marker into the stream of events generated
543  // by the kernel and allows us to synchronize V8 code log and the
544  // kernel log.
545  int size = sysconf(_SC_PAGESIZE);
546  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
547  void* addr = mmap(OS::GetRandomMmapAddr(),
548  size,
549  PROT_READ | PROT_EXEC,
550  MAP_PRIVATE,
551  fileno(f),
552  0);
553  ASSERT(addr != MAP_FAILED);
554  OS::Free(addr, size);
555  fclose(f);
556 }
557 
558 
559 int OS::StackWalk(Vector<OS::StackFrame> frames) {
560  // backtrace is a glibc extension.
561 #if defined(__GLIBC__) && !defined(__UCLIBC__)
562  int frames_size = frames.length();
563  ScopedVector<void*> addresses(frames_size);
564 
565  int frames_count = backtrace(addresses.start(), frames_size);
566 
567  char** symbols = backtrace_symbols(addresses.start(), frames_count);
568  if (symbols == NULL) {
569  return kStackWalkError;
570  }
571 
572  for (int i = 0; i < frames_count; i++) {
573  frames[i].address = addresses[i];
574  // Format a text representation of the frame based on the information
575  // available.
577  "%s",
578  symbols[i]);
579  // Make sure line termination is in place.
580  frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
581  }
582 
583  free(symbols);
584 
585  return frames_count;
586 #else // defined(__GLIBC__) && !defined(__UCLIBC__)
587  return 0;
588 #endif // defined(__GLIBC__) && !defined(__UCLIBC__)
589 }
590 
591 
592 // Constants used for mmap.
593 static const int kMmapFd = -1;
594 static const int kMmapFdOffset = 0;
595 
596 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
597 
598 VirtualMemory::VirtualMemory(size_t size) {
599  address_ = ReserveRegion(size);
600  size_ = size;
601 }
602 
603 
604 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
605  : address_(NULL), size_(0) {
606  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
607  size_t request_size = RoundUp(size + alignment,
608  static_cast<intptr_t>(OS::AllocateAlignment()));
609  void* reservation = mmap(OS::GetRandomMmapAddr(),
610  request_size,
611  PROT_NONE,
612  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
613  kMmapFd,
614  kMmapFdOffset);
615  if (reservation == MAP_FAILED) return;
616 
617  Address base = static_cast<Address>(reservation);
618  Address aligned_base = RoundUp(base, alignment);
619  ASSERT_LE(base, aligned_base);
620 
621  // Unmap extra memory reserved before and after the desired block.
622  if (aligned_base != base) {
623  size_t prefix_size = static_cast<size_t>(aligned_base - base);
624  OS::Free(base, prefix_size);
625  request_size -= prefix_size;
626  }
627 
628  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
629  ASSERT_LE(aligned_size, request_size);
630 
631  if (aligned_size != request_size) {
632  size_t suffix_size = request_size - aligned_size;
633  OS::Free(aligned_base + aligned_size, suffix_size);
634  request_size -= suffix_size;
635  }
636 
637  ASSERT(aligned_size == request_size);
638 
639  address_ = static_cast<void*>(aligned_base);
640  size_ = aligned_size;
641 }
642 
643 
645  if (IsReserved()) {
646  bool result = ReleaseRegion(address(), size());
647  ASSERT(result);
648  USE(result);
649  }
650 }
651 
652 
654  return address_ != NULL;
655 }
656 
657 
658 void VirtualMemory::Reset() {
659  address_ = NULL;
660  size_ = 0;
661 }
662 
663 
664 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
665  return CommitRegion(address, size, is_executable);
666 }
667 
668 
669 bool VirtualMemory::Uncommit(void* address, size_t size) {
670  return UncommitRegion(address, size);
671 }
672 
673 
674 bool VirtualMemory::Guard(void* address) {
675  OS::Guard(address, OS::CommitPageSize());
676  return true;
677 }
678 
679 
680 void* VirtualMemory::ReserveRegion(size_t size) {
681  void* result = mmap(OS::GetRandomMmapAddr(),
682  size,
683  PROT_NONE,
684  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
685  kMmapFd,
686  kMmapFdOffset);
687 
688  if (result == MAP_FAILED) return NULL;
689 
690  return result;
691 }
692 
693 
694 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
695  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
696  if (MAP_FAILED == mmap(base,
697  size,
698  prot,
699  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
700  kMmapFd,
701  kMmapFdOffset)) {
702  return false;
703  }
704 
705  UpdateAllocatedSpaceLimits(base, size);
706  return true;
707 }
708 
709 
710 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
711  return mmap(base,
712  size,
713  PROT_NONE,
714  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
715  kMmapFd,
716  kMmapFdOffset) != MAP_FAILED;
717 }
718 
719 
720 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
721  return munmap(base, size) == 0;
722 }
723 
724 
725 class Thread::PlatformData : public Malloced {
726  public:
727  PlatformData() : thread_(kNoThread) {}
728 
729  pthread_t thread_; // Thread handle for pthread.
730 };
731 
732 Thread::Thread(const Options& options)
733  : data_(new PlatformData()),
734  stack_size_(options.stack_size()) {
735  set_name(options.name());
736 }
737 
738 
739 Thread::~Thread() {
740  delete data_;
741 }
742 
743 
744 static void* ThreadEntry(void* arg) {
745  Thread* thread = reinterpret_cast<Thread*>(arg);
746  // This is also initialized by the first argument to pthread_create() but we
747  // don't know which thread will run first (the original thread or the new
748  // one) so we initialize it here too.
749 #ifdef PR_SET_NAME
750  prctl(PR_SET_NAME,
751  reinterpret_cast<unsigned long>(thread->name()), // NOLINT
752  0, 0, 0);
753 #endif
754  thread->data()->thread_ = pthread_self();
755  ASSERT(thread->data()->thread_ != kNoThread);
756  thread->Run();
757  return NULL;
758 }
759 
760 
761 void Thread::set_name(const char* name) {
762  strncpy(name_, name, sizeof(name_));
763  name_[sizeof(name_) - 1] = '\0';
764 }
765 
766 
767 void Thread::Start() {
768  pthread_attr_t* attr_ptr = NULL;
769  pthread_attr_t attr;
770  if (stack_size_ > 0) {
771  pthread_attr_init(&attr);
772  pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
773  attr_ptr = &attr;
774  }
775  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
776  CHECK_EQ(0, result);
777  ASSERT(data_->thread_ != kNoThread);
778 }
779 
780 
781 void Thread::Join() {
782  pthread_join(data_->thread_, NULL);
783 }
784 
785 
787  pthread_key_t key;
788  int result = pthread_key_create(&key, NULL);
789  USE(result);
790  ASSERT(result == 0);
791  return static_cast<LocalStorageKey>(key);
792 }
793 
794 
796  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
797  int result = pthread_key_delete(pthread_key);
798  USE(result);
799  ASSERT(result == 0);
800 }
801 
802 
804  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
805  return pthread_getspecific(pthread_key);
806 }
807 
808 
809 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
810  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
811  pthread_setspecific(pthread_key, value);
812 }
813 
814 
815 void Thread::YieldCPU() {
816  sched_yield();
817 }
818 
819 
820 class LinuxMutex : public Mutex {
821  public:
823  pthread_mutexattr_t attrs;
824  int result = pthread_mutexattr_init(&attrs);
825  ASSERT(result == 0);
826  result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
827  ASSERT(result == 0);
828  result = pthread_mutex_init(&mutex_, &attrs);
829  ASSERT(result == 0);
830  USE(result);
831  }
832 
833  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
834 
835  virtual int Lock() {
836  int result = pthread_mutex_lock(&mutex_);
837  return result;
838  }
839 
840  virtual int Unlock() {
841  int result = pthread_mutex_unlock(&mutex_);
842  return result;
843  }
844 
845  virtual bool TryLock() {
846  int result = pthread_mutex_trylock(&mutex_);
847  // Return false if the lock is busy and locking failed.
848  if (result == EBUSY) {
849  return false;
850  }
851  ASSERT(result == 0); // Verify no other errors.
852  return true;
853  }
854 
855  private:
856  pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
857 };
858 
859 
860 Mutex* OS::CreateMutex() {
861  return new LinuxMutex();
862 }
863 
864 
865 class LinuxSemaphore : public Semaphore {
866  public:
867  explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
868  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
869 
870  virtual void Wait();
871  virtual bool Wait(int timeout);
872  virtual void Signal() { sem_post(&sem_); }
873  private:
874  sem_t sem_;
875 };
876 
877 
879  while (true) {
880  int result = sem_wait(&sem_);
881  if (result == 0) return; // Successfully got semaphore.
882  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
883  }
884 }
885 
886 
887 #ifndef TIMEVAL_TO_TIMESPEC
888 #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
889  (ts)->tv_sec = (tv)->tv_sec; \
890  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
891 } while (false)
892 #endif
893 
894 
895 bool LinuxSemaphore::Wait(int timeout) {
896  const long kOneSecondMicros = 1000000; // NOLINT
897 
898  // Split timeout into second and nanosecond parts.
899  struct timeval delta;
900  delta.tv_usec = timeout % kOneSecondMicros;
901  delta.tv_sec = timeout / kOneSecondMicros;
902 
903  struct timeval current_time;
904  // Get the current time.
905  if (gettimeofday(&current_time, NULL) == -1) {
906  return false;
907  }
908 
909  // Calculate time for end of timeout.
910  struct timeval end_time;
911  timeradd(&current_time, &delta, &end_time);
912 
913  struct timespec ts;
914  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
915  // Wait for semaphore signalled or timeout.
916  while (true) {
917  int result = sem_timedwait(&sem_, &ts);
918  if (result == 0) return true; // Successfully got semaphore.
919  if (result > 0) {
920  // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
921  errno = result;
922  result = -1;
923  }
924  if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
925  CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
926  }
927 }
928 
929 
930 Semaphore* OS::CreateSemaphore(int count) {
931  return new LinuxSemaphore(count);
932 }
933 
934 
935 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
936 
937 // Not all versions of Android's C library provide ucontext_t.
938 // Detect this and provide custom but compatible definitions. Note that these
939 // follow the GLibc naming convention to access register values from
940 // mcontext_t.
941 //
942 // See http://code.google.com/p/android/issues/detail?id=34784
943 
944 #if defined(__arm__)
945 
946 typedef struct sigcontext mcontext_t;
947 
948 typedef struct ucontext {
949  uint32_t uc_flags;
950  struct ucontext* uc_link;
951  stack_t uc_stack;
952  mcontext_t uc_mcontext;
953  // Other fields are not used by V8, don't define them here.
954 } ucontext_t;
955 
956 #elif defined(__mips__)
957 // MIPS version of sigcontext, for Android bionic.
958 typedef struct {
959  uint32_t regmask;
960  uint32_t status;
961  uint64_t pc;
962  uint64_t gregs[32];
963  uint64_t fpregs[32];
964  uint32_t acx;
965  uint32_t fpc_csr;
966  uint32_t fpc_eir;
967  uint32_t used_math;
968  uint32_t dsp;
969  uint64_t mdhi;
970  uint64_t mdlo;
971  uint32_t hi1;
972  uint32_t lo1;
973  uint32_t hi2;
974  uint32_t lo2;
975  uint32_t hi3;
976  uint32_t lo3;
977 } mcontext_t;
978 
979 typedef struct ucontext {
980  uint32_t uc_flags;
981  struct ucontext* uc_link;
982  stack_t uc_stack;
983  mcontext_t uc_mcontext;
984  // Other fields are not used by V8, don't define them here.
985 } ucontext_t;
986 
987 #elif defined(__i386__)
988 // x86 version for Android.
989 typedef struct {
990  uint32_t gregs[19];
991  void* fpregs;
992  uint32_t oldmask;
993  uint32_t cr2;
994 } mcontext_t;
995 
996 typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
997 typedef struct ucontext {
998  uint32_t uc_flags;
999  struct ucontext* uc_link;
1000  stack_t uc_stack;
1001  mcontext_t uc_mcontext;
1002  // Other fields are not used by V8, don't define them here.
1003 } ucontext_t;
1004 enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
1005 #endif
1006 
1007 #endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
1008 
1009 static int GetThreadID() {
1010 #if defined(__ANDROID__)
1011  // Android's C library provides gettid(2).
1012  return gettid();
1013 #else
1014  // Glibc doesn't provide a wrapper for gettid(2).
1015  return syscall(SYS_gettid);
1016 #endif
1017 }
1018 
1019 
1020 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
1021  USE(info);
1022  if (signal != SIGPROF) return;
1023  Isolate* isolate = Isolate::UncheckedCurrent();
1024  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
1025  // We require a fully initialized and entered isolate.
1026  return;
1027  }
1028  if (v8::Locker::IsActive() &&
1029  !isolate->thread_manager()->IsLockedByCurrentThread()) {
1030  return;
1031  }
1032 
1033  Sampler* sampler = isolate->logger()->sampler();
1034  if (sampler == NULL || !sampler->IsActive()) return;
1035 
1036  TickSample sample_obj;
1037  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
1038  if (sample == NULL) sample = &sample_obj;
1039 
1040  // Extracting the sample from the context is extremely machine dependent.
1041  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
1042  mcontext_t& mcontext = ucontext->uc_mcontext;
1043  sample->state = isolate->current_vm_state();
1044 #if V8_HOST_ARCH_IA32
1045  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
1046  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
1047  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
1048 #elif V8_HOST_ARCH_X64
1049  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
1050  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
1051  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
1052 #elif V8_HOST_ARCH_ARM
1053 #if defined(__GLIBC__) && !defined(__UCLIBC__) && \
1054  (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1055  // Old GLibc ARM versions used a gregs[] array to access the register
1056  // values from mcontext_t.
1057  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
1058  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
1059  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
1060 #else
1061  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
1062  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
1063  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
1064 #endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
1065  // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1066 #elif V8_HOST_ARCH_MIPS
1067  sample->pc = reinterpret_cast<Address>(mcontext.pc);
1068  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
1069  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
1070 #endif // V8_HOST_ARCH_*
1071  sampler->SampleStack(sample);
1072  sampler->Tick(sample);
1073 }
1074 
1075 
1076 class Sampler::PlatformData : public Malloced {
1077  public:
1078  PlatformData() : vm_tid_(GetThreadID()) {}
1079 
1080  int vm_tid() const { return vm_tid_; }
1081 
1082  private:
1083  const int vm_tid_;
1084 };
1085 
1086 
1087 class SignalSender : public Thread {
1088  public:
1090  HALF_INTERVAL,
1091  FULL_INTERVAL
1092  };
1093 
1094  static const int kSignalSenderStackSize = 64 * KB;
1095 
1096  explicit SignalSender(int interval)
1097  : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
1098  vm_tgid_(getpid()),
1099  interval_(interval) {}
1100 
1101  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
1102  static void TearDown() { delete mutex_; }
1103 
1104  static void InstallSignalHandler() {
1105  struct sigaction sa;
1106  sa.sa_sigaction = ProfilerSignalHandler;
1107  sigemptyset(&sa.sa_mask);
1108  sa.sa_flags = SA_RESTART | SA_SIGINFO;
1109  signal_handler_installed_ =
1110  (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
1111  }
1112 
1113  static void RestoreSignalHandler() {
1114  if (signal_handler_installed_) {
1115  sigaction(SIGPROF, &old_signal_handler_, 0);
1116  signal_handler_installed_ = false;
1117  }
1118  }
1119 
1120  static void AddActiveSampler(Sampler* sampler) {
1121  ScopedLock lock(mutex_);
1123  if (instance_ == NULL) {
1124  // Start a thread that will send SIGPROF signal to VM threads,
1125  // when CPU profiling will be enabled.
1126  instance_ = new SignalSender(sampler->interval());
1127  instance_->Start();
1128  } else {
1129  ASSERT(instance_->interval_ == sampler->interval());
1130  }
1131  }
1132 
1133  static void RemoveActiveSampler(Sampler* sampler) {
1134  ScopedLock lock(mutex_);
1138  delete instance_;
1139  instance_ = NULL;
1140  RestoreSignalHandler();
1141  }
1142  }
1143 
1144  // Implement Thread::Run().
1145  virtual void Run() {
1146  SamplerRegistry::State state;
1147  while ((state = SamplerRegistry::GetState()) !=
1149  bool cpu_profiling_enabled =
1151  bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
1152  if (cpu_profiling_enabled && !signal_handler_installed_) {
1153  InstallSignalHandler();
1154  } else if (!cpu_profiling_enabled && signal_handler_installed_) {
1155  RestoreSignalHandler();
1156  }
1157  // When CPU profiling is enabled both JavaScript and C++ code is
1158  // profiled. We must not suspend.
1159  if (!cpu_profiling_enabled) {
1160  if (rate_limiter_.SuspendIfNecessary()) continue;
1161  }
1162  if (cpu_profiling_enabled && runtime_profiler_enabled) {
1163  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
1164  return;
1165  }
1166  Sleep(HALF_INTERVAL);
1167  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
1168  return;
1169  }
1170  Sleep(HALF_INTERVAL);
1171  } else {
1172  if (cpu_profiling_enabled) {
1173  if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
1174  this)) {
1175  return;
1176  }
1177  }
1178  if (runtime_profiler_enabled) {
1179  if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
1180  NULL)) {
1181  return;
1182  }
1183  }
1184  Sleep(FULL_INTERVAL);
1185  }
1186  }
1187  }
1188 
1189  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
1190  if (!sampler->IsProfiling()) return;
1191  SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
1192  sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
1193  }
1194 
1195  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
1196  if (!sampler->isolate()->IsInitialized()) return;
1197  sampler->isolate()->runtime_profiler()->NotifyTick();
1198  }
1199 
1200  void SendProfilingSignal(int tid) {
1201  if (!signal_handler_installed_) return;
1202  // Glibc doesn't provide a wrapper for tgkill(2).
1203 #if defined(ANDROID)
1204  syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
1205 #else
1206  syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
1207 #endif
1208  }
1209 
1210  void Sleep(SleepInterval full_or_half) {
1211  // Convert ms to us and subtract 100 us to compensate delays
1212  // occuring during signal delivery.
1213  useconds_t interval = interval_ * 1000 - 100;
1214  if (full_or_half == HALF_INTERVAL) interval /= 2;
1215 #if defined(ANDROID)
1216  usleep(interval);
1217 #else
1218  int result = usleep(interval);
1219 #ifdef DEBUG
1220  if (result != 0 && errno != EINTR) {
1221  fprintf(stderr,
1222  "SignalSender usleep error; interval = %u, errno = %d\n",
1223  interval,
1224  errno);
1225  ASSERT(result == 0 || errno == EINTR);
1226  }
1227 #endif // DEBUG
1228  USE(result);
1229 #endif // ANDROID
1230  }
1231 
1232  const int vm_tgid_;
1233  const int interval_;
1234  RuntimeProfilerRateLimiter rate_limiter_;
1235 
1236  // Protects the process wide state below.
1237  static Mutex* mutex_;
1238  static SignalSender* instance_;
1239  static bool signal_handler_installed_;
1240  static struct sigaction old_signal_handler_;
1241 
1242  private:
1244 };
1245 
1246 
1249 struct sigaction SignalSender::old_signal_handler_;
1251 
1252 
1253 void OS::SetUp() {
1254  // Seed the random number generator. We preserve microsecond resolution.
1255  uint64_t seed = Ticks() ^ (getpid() << 16);
1256  srandom(static_cast<unsigned int>(seed));
1257  limit_mutex = CreateMutex();
1258 
1259 #ifdef __arm__
1260  // When running on ARM hardware check that the EABI used by V8 and
1261  // by the C code is the same.
1262  bool hard_float = OS::ArmUsingHardFloat();
1263  if (hard_float) {
1264 #if !USE_EABI_HARDFLOAT
1265  PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
1266  "-DUSE_EABI_HARDFLOAT\n");
1267  exit(1);
1268 #endif
1269  } else {
1270 #if USE_EABI_HARDFLOAT
1271  PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
1272  "-DUSE_EABI_HARDFLOAT\n");
1273  exit(1);
1274 #endif
1275  }
1276 #endif
1278 }
1279 
1280 
1281 void OS::TearDown() {
1283  delete limit_mutex;
1284 }
1285 
1286 
1287 Sampler::Sampler(Isolate* isolate, int interval)
1288  : isolate_(isolate),
1289  interval_(interval),
1290  profiling_(false),
1291  active_(false),
1292  samples_taken_(0) {
1293  data_ = new PlatformData;
1294 }
1295 
1296 
1298  ASSERT(!IsActive());
1299  delete data_;
1300 }
1301 
1302 
1303 void Sampler::Start() {
1304  ASSERT(!IsActive());
1305  SetActive(true);
1307 }
1308 
1309 
1310 void Sampler::Stop() {
1311  ASSERT(IsActive());
1313  SetActive(false);
1314 }
1315 
1316 
1317 } } // namespace v8::internal
byte * Address
Definition: globals.h:157
static void * GetThreadLocal(LocalStorageKey key)
#define CHECK_EQ(expected, value)
Definition: checks.h:219
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:189
char ** backtrace_symbols(void *const *, int) __attribute__((weak_import))
void PrintF(const char *format,...)
Definition: v8utils.cc:40
Thread(const Options &options)
PlatformData * platform_data()
Definition: platform.h:772
#define LOG(isolate, Call)
Definition: log.h:81
const int KB
Definition: globals.h:207
void SendProfilingSignal(pthread_t tid)
bool IsActive() const
Definition: platform.h:761
Isolate * isolate()
Definition: platform.h:763
static void SignalCodeMovingGC()
static void * GetRandomMmapAddr()
double ceiling(double x)
static void * ReserveRegion(size_t size)
T Max(T a, T b)
Definition: utils.h:222
static bool IsOutsideAllocatedSpace(void *pointer)
static const char * LocalTimezone(double time)
Vector< char > MutableCStrVector(char *data)
Definition: utils.h:530
static const int kStackWalkError
Definition: platform.h:245
void Sleep(SleepInterval full_or_half)
static const int kStackWalkMaxTextLen
Definition: platform.h:247
static void DoRuntimeProfile(Sampler *sampler, void *ignored)
TickSample * sample
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:270
int interval() const
Definition: platform.h:739
#define CHECK(condition)
Definition: checks.h:56
int isnan(double x)
static MemoryMappedFile * open(const char *name)
static void RemoveActiveSampler(Sampler *sampler)
Definition: log.cc:1868
unsigned int seed
Definition: test-strings.cc:18
#define timeradd(a, b, result)
static void Abort()
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
#define UNREACHABLE()
Definition: checks.h:50
static SignalSender * instance_
static void ReleaseStore(volatile AtomicWord *ptr, AtomicWord value)
RuntimeProfiler * runtime_profiler()
Definition: isolate.h:826
static TickSample * TickSampleEvent(Isolate *isolate)
bool IsProfiling() const
Definition: platform.h:756
void POSIXPostSetUp()
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
Definition: utils.h:206
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
intptr_t AtomicWord
Definition: atomicops.h:75
const Register pc
static void Guard(void *address, const size_t size)
static bool ArmCpuHasFeature(CpuFeature feature)
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
#define ASSERT_LE(v1, v2)
Definition: checks.h:275
static Mutex * CreateMutex()
static bool IsActive()
Definition: v8threads.cc:97
static CpuImplementer GetCpuImplementer()
static bool MipsCpuHasFeature(CpuFeature feature)
static void DebugBreak()
int backtrace(void **, int) __attribute__((weak_import))
activate correct semantics for inheriting readonliness false
Definition: flags.cc:141
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static void TearDown()
static int SNPrintF(Vector< char > str, const char *format,...)
static Semaphore * CreateSemaphore(int count)
#define ISOLATE
Definition: isolate.h:1435
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
const CRegister cr2
#define TIMEVAL_TO_TIMESPEC(tv, ts)
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static int StackWalk(Vector< StackFrame > frames)
static void PostSetUp()
static State GetState()
Definition: log.cc:1847
static bool UncommitRegion(void *base, size_t size)
static void LogSharedLibraryAddresses()
static void SetUp()
void USE(T)
Definition: globals.h:289
static int ActivationFrameAlignment()
static size_t AllocateAlignment()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
static void AddActiveSampler(Sampler *sampler)
static void AddActiveSampler(Sampler *sampler)
Definition: log.cc:1856
Sampler(Isolate *isolate, int interval)
static uint64_t CpuFeaturesImpliedByPlatform()
static bool IterateActiveSamplers(VisitSampler func, void *param)
Definition: log.cc:1830
const Register fp
static void RemoveActiveSampler(Sampler *sampler)
static double LocalTimeOffset()
T Min(T a, T b)
Definition: utils.h:229
static bool ArmUsingHardFloat()
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoCpuProfile(Sampler *sampler, void *raw_sender)