v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-linux.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform-specific code for Linux goes here. For the POSIX-compatible
29 // parts, the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/prctl.h>
35 #include <sys/time.h>
36 #include <sys/resource.h>
37 #include <sys/syscall.h>
38 #include <sys/types.h>
39 #include <stdlib.h>
40 
41 // Ubuntu Dapper requires memory pages to be marked as
42 // executable. Otherwise, OS raises an exception when executing code
43 // in that page.
44 #include <sys/types.h> // mmap & munmap
45 #include <sys/mman.h> // mmap & munmap
46 #include <sys/stat.h> // open
47 #include <fcntl.h> // open
48 #include <unistd.h> // sysconf
49 #include <strings.h> // index
50 #include <errno.h>
51 #include <stdarg.h>
52 
53 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
54 // Old versions of the C library <signal.h> didn't define the type.
55 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
56  (defined(__arm__) || defined(__aarch64__)) && \
57  !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
58 #include <asm/sigcontext.h>
59 #endif
60 
61 #if defined(LEAK_SANITIZER)
62 #include <sanitizer/lsan_interface.h>
63 #endif
64 
65 #undef MAP_TYPE
66 
67 #include "v8.h"
68 
69 #include "platform.h"
70 #include "v8threads.h"
71 #include "vm-state-inl.h"
72 
73 
74 namespace v8 {
75 namespace internal {
76 
77 
78 #ifdef __arm__
79 
80 bool OS::ArmUsingHardFloat() {
81  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
82  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
83  // We use these as well as a couple of other defines to statically determine
84  // what FP ABI used.
85  // GCC versions 4.4 and below don't support hard-fp.
86  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
87  // __ARM_PCS_VFP.
88 
89 #define GCC_VERSION (__GNUC__ * 10000 \
90  + __GNUC_MINOR__ * 100 \
91  + __GNUC_PATCHLEVEL__)
92 #if GCC_VERSION >= 40600
93 #if defined(__ARM_PCS_VFP)
94  return true;
95 #else
96  return false;
97 #endif
98 
99 #elif GCC_VERSION < 40500
100  return false;
101 
102 #else
103 #if defined(__ARM_PCS_VFP)
104  return true;
105 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
106  !defined(__VFP_FP__)
107  return false;
108 #else
109 #error "Your version of GCC does not report the FP ABI compiled for." \
110  "Please report it on this issue" \
111  "http://code.google.com/p/v8/issues/detail?id=2140"
112 
113 #endif
114 #endif
115 #undef GCC_VERSION
116 }
117 
118 #endif // def __arm__
119 
120 
121 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
122  if (std::isnan(time)) return "";
123  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
124  struct tm* t = localtime(&tv);
125  if (NULL == t) return "";
126  return t->tm_zone;
127 }
128 
129 
130 double OS::LocalTimeOffset(TimezoneCache* cache) {
131  time_t tv = time(NULL);
132  struct tm* t = localtime(&tv);
133  // tm_gmtoff includes any daylight savings offset, so subtract it.
134  return static_cast<double>(t->tm_gmtoff * msPerSecond -
135  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
136 }
137 
138 
139 void* OS::Allocate(const size_t requested,
140  size_t* allocated,
141  bool is_executable) {
142  const size_t msize = RoundUp(requested, AllocateAlignment());
143  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
144  void* addr = OS::GetRandomMmapAddr();
145  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
146  if (mbase == MAP_FAILED) {
147  LOG(i::Isolate::Current(),
148  StringEvent("OS::Allocate", "mmap failed"));
149  return NULL;
150  }
151  *allocated = msize;
152  return mbase;
153 }
154 
155 
156 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
157  public:
158  PosixMemoryMappedFile(FILE* file, void* memory, int size)
159  : file_(file), memory_(memory), size_(size) { }
160  virtual ~PosixMemoryMappedFile();
161  virtual void* memory() { return memory_; }
162  virtual int size() { return size_; }
163  private:
164  FILE* file_;
165  void* memory_;
166  int size_;
167 };
168 
169 
170 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
171  FILE* file = fopen(name, "r+");
172  if (file == NULL) return NULL;
173 
174  fseek(file, 0, SEEK_END);
175  int size = ftell(file);
176 
177  void* memory =
178  mmap(OS::GetRandomMmapAddr(),
179  size,
180  PROT_READ | PROT_WRITE,
181  MAP_SHARED,
182  fileno(file),
183  0);
184  return new PosixMemoryMappedFile(file, memory, size);
185 }
186 
187 
188 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
189  void* initial) {
190  FILE* file = fopen(name, "w+");
191  if (file == NULL) return NULL;
192  int result = fwrite(initial, size, 1, file);
193  if (result < 1) {
194  fclose(file);
195  return NULL;
196  }
197  void* memory =
198  mmap(OS::GetRandomMmapAddr(),
199  size,
200  PROT_READ | PROT_WRITE,
201  MAP_SHARED,
202  fileno(file),
203  0);
204  return new PosixMemoryMappedFile(file, memory, size);
205 }
206 
207 
209  if (memory_) OS::Free(memory_, size_);
210  fclose(file_);
211 }
212 
213 
214 void OS::LogSharedLibraryAddresses(Isolate* isolate) {
215  // This function assumes that the layout of the file is as follows:
216  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
217  // If we encounter an unexpected situation we abort scanning further entries.
218  FILE* fp = fopen("/proc/self/maps", "r");
219  if (fp == NULL) return;
220 
221  // Allocate enough room to be able to store a full file name.
222  const int kLibNameLen = FILENAME_MAX + 1;
223  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
224 
225  // This loop will terminate once the scanning hits an EOF.
226  while (true) {
227  uintptr_t start, end;
228  char attr_r, attr_w, attr_x, attr_p;
229  // Parse the addresses and permission bits at the beginning of the line.
230  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
231  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
232 
233  int c;
234  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
235  // Found a read-only executable entry. Skip characters until we reach
236  // the beginning of the filename or the end of the line.
237  do {
238  c = getc(fp);
239  } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
240  if (c == EOF) break; // EOF: Was unexpected, just exit.
241 
242  // Process the filename if found.
243  if ((c == '/') || (c == '[')) {
244  // Push the '/' or '[' back into the stream to be read below.
245  ungetc(c, fp);
246 
247  // Read to the end of the line. Exit if the read fails.
248  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
249 
250  // Drop the newline character read by fgets. We do not need to check
251  // for a zero-length string because we know that we at least read the
252  // '/' or '[' character.
253  lib_name[strlen(lib_name) - 1] = '\0';
254  } else {
255  // No library name found, just record the raw address range.
256  snprintf(lib_name, kLibNameLen,
257  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
258  }
259  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
260  } else {
261  // Entry not describing executable data. Skip to end of line to set up
262  // reading the next entry.
263  do {
264  c = getc(fp);
265  } while ((c != EOF) && (c != '\n'));
266  if (c == EOF) break;
267  }
268  }
269  free(lib_name);
270  fclose(fp);
271 }
272 
273 
274 void OS::SignalCodeMovingGC() {
275  // Support for ll_prof.py.
276  //
277  // The Linux profiler built into the kernel logs all mmap's with
278  // PROT_EXEC so that analysis tools can properly attribute ticks. We
279  // do a mmap with a name known by ll_prof.py and immediately munmap
280  // it. This injects a GC marker into the stream of events generated
281  // by the kernel and allows us to synchronize V8 code log and the
282  // kernel log.
283  int size = sysconf(_SC_PAGESIZE);
284  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
285  if (f == NULL) {
286  OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
287  OS::Abort();
288  }
289  void* addr = mmap(OS::GetRandomMmapAddr(),
290  size,
291 #if defined(__native_client__)
292  // The Native Client port of V8 uses an interpreter,
293  // so code pages don't need PROT_EXEC.
294  PROT_READ,
295 #else
296  PROT_READ | PROT_EXEC,
297 #endif
298  MAP_PRIVATE,
299  fileno(f),
300  0);
301  ASSERT(addr != MAP_FAILED);
302  OS::Free(addr, size);
303  fclose(f);
304 }
305 
306 
307 // Constants used for mmap.
308 static const int kMmapFd = -1;
309 static const int kMmapFdOffset = 0;
310 
311 
312 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
313 
314 
315 VirtualMemory::VirtualMemory(size_t size)
316  : address_(ReserveRegion(size)), size_(size) { }
317 
318 
319 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
320  : address_(NULL), size_(0) {
321  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
322  size_t request_size = RoundUp(size + alignment,
323  static_cast<intptr_t>(OS::AllocateAlignment()));
324  void* reservation = mmap(OS::GetRandomMmapAddr(),
325  request_size,
326  PROT_NONE,
327  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
328  kMmapFd,
329  kMmapFdOffset);
330  if (reservation == MAP_FAILED) return;
331 
332  Address base = static_cast<Address>(reservation);
333  Address aligned_base = RoundUp(base, alignment);
334  ASSERT_LE(base, aligned_base);
335 
336  // Unmap extra memory reserved before and after the desired block.
337  if (aligned_base != base) {
338  size_t prefix_size = static_cast<size_t>(aligned_base - base);
339  OS::Free(base, prefix_size);
340  request_size -= prefix_size;
341  }
342 
343  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
344  ASSERT_LE(aligned_size, request_size);
345 
346  if (aligned_size != request_size) {
347  size_t suffix_size = request_size - aligned_size;
348  OS::Free(aligned_base + aligned_size, suffix_size);
349  request_size -= suffix_size;
350  }
351 
352  ASSERT(aligned_size == request_size);
353 
354  address_ = static_cast<void*>(aligned_base);
355  size_ = aligned_size;
356 #if defined(LEAK_SANITIZER)
357  __lsan_register_root_region(address_, size_);
358 #endif
359 }
360 
361 
363  if (IsReserved()) {
364  bool result = ReleaseRegion(address(), size());
365  ASSERT(result);
366  USE(result);
367  }
368 }
369 
370 
372  return address_ != NULL;
373 }
374 
375 
376 void VirtualMemory::Reset() {
377  address_ = NULL;
378  size_ = 0;
379 }
380 
381 
382 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
383  return CommitRegion(address, size, is_executable);
384 }
385 
386 
387 bool VirtualMemory::Uncommit(void* address, size_t size) {
388  return UncommitRegion(address, size);
389 }
390 
391 
392 bool VirtualMemory::Guard(void* address) {
393  OS::Guard(address, OS::CommitPageSize());
394  return true;
395 }
396 
397 
398 void* VirtualMemory::ReserveRegion(size_t size) {
399  void* result = mmap(OS::GetRandomMmapAddr(),
400  size,
401  PROT_NONE,
402  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
403  kMmapFd,
404  kMmapFdOffset);
405 
406  if (result == MAP_FAILED) return NULL;
407 
408 #if defined(LEAK_SANITIZER)
409  __lsan_register_root_region(result, size);
410 #endif
411  return result;
412 }
413 
414 
415 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
416 #if defined(__native_client__)
417  // The Native Client port of V8 uses an interpreter,
418  // so code pages don't need PROT_EXEC.
419  int prot = PROT_READ | PROT_WRITE;
420 #else
421  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
422 #endif
423  if (MAP_FAILED == mmap(base,
424  size,
425  prot,
426  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
427  kMmapFd,
428  kMmapFdOffset)) {
429  return false;
430  }
431 
432  return true;
433 }
434 
435 
436 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
437  return mmap(base,
438  size,
439  PROT_NONE,
440  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
441  kMmapFd,
442  kMmapFdOffset) != MAP_FAILED;
443 }
444 
445 
446 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
447 #if defined(LEAK_SANITIZER)
448  __lsan_unregister_root_region(base, size);
449 #endif
450  return munmap(base, size) == 0;
451 }
452 
453 
455  return true;
456 }
457 
458 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:228
static void LogSharedLibraryAddresses(Isolate *isolate)
#define LOG(isolate, Call)
Definition: log.h:86
static bool ArmUsingHardFloat()
static void SignalCodeMovingGC()
static void * GetRandomMmapAddr()
static void * ReserveRegion(size_t size)
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:329
static MemoryMappedFile * open(const char *name)
int isnan(double x)
static void Abort()
bool IsAligned(T value, U alignment)
Definition: utils.h:211
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
static const char * LocalTimezone(double time, TimezoneCache *cache)
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void PrintError(const char *format,...)
static bool UncommitRegion(void *base, size_t size)
void USE(T)
Definition: globals.h:341
static size_t AllocateAlignment()
const Register fp
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static intptr_t CommitPageSize()
static double LocalTimeOffset(TimezoneCache *cache)
bool Uncommit(void *address, size_t size)