v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
platform-openbsd.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // Platform-specific code for OpenBSD and NetBSD goes here. For the
29 // POSIX-compatible parts, the implementation is in platform-posix.cc.
30 
31 #include <pthread.h>
32 #include <semaphore.h>
33 #include <signal.h>
34 #include <sys/time.h>
35 #include <sys/resource.h>
36 #include <sys/syscall.h>
37 #include <sys/types.h>
38 #include <stdlib.h>
39 
40 #include <sys/types.h> // mmap & munmap
41 #include <sys/mman.h> // mmap & munmap
42 #include <sys/stat.h> // open
43 #include <fcntl.h> // open
44 #include <unistd.h> // sysconf
45 #include <strings.h> // index
46 #include <errno.h>
47 #include <stdarg.h>
48 
49 #undef MAP_TYPE
50 
51 #include "v8.h"
52 
53 #include "platform.h"
54 #include "v8threads.h"
55 #include "vm-state-inl.h"
56 
57 
58 namespace v8 {
59 namespace internal {
60 
61 
62 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
63  if (std::isnan(time)) return "";
64  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
65  struct tm* t = localtime(&tv);
66  if (NULL == t) return "";
67  return t->tm_zone;
68 }
69 
70 
71 double OS::LocalTimeOffset(TimezoneCache* cache) {
72  time_t tv = time(NULL);
73  struct tm* t = localtime(&tv);
74  // tm_gmtoff includes any daylight savings offset, so subtract it.
75  return static_cast<double>(t->tm_gmtoff * msPerSecond -
76  (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
77 }
78 
79 
80 void* OS::Allocate(const size_t requested,
81  size_t* allocated,
82  bool is_executable) {
83  const size_t msize = RoundUp(requested, AllocateAlignment());
84  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
85  void* addr = OS::GetRandomMmapAddr();
86  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
87  if (mbase == MAP_FAILED) {
88  LOG(i::Isolate::Current(),
89  StringEvent("OS::Allocate", "mmap failed"));
90  return NULL;
91  }
92  *allocated = msize;
93  return mbase;
94 }
95 
96 
97 class PosixMemoryMappedFile : public OS::MemoryMappedFile {
98  public:
99  PosixMemoryMappedFile(FILE* file, void* memory, int size)
100  : file_(file), memory_(memory), size_(size) { }
101  virtual ~PosixMemoryMappedFile();
102  virtual void* memory() { return memory_; }
103  virtual int size() { return size_; }
104  private:
105  FILE* file_;
106  void* memory_;
107  int size_;
108 };
109 
110 
111 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
112  FILE* file = fopen(name, "r+");
113  if (file == NULL) return NULL;
114 
115  fseek(file, 0, SEEK_END);
116  int size = ftell(file);
117 
118  void* memory =
119  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
120  return new PosixMemoryMappedFile(file, memory, size);
121 }
122 
123 
124 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
125  void* initial) {
126  FILE* file = fopen(name, "w+");
127  if (file == NULL) return NULL;
128  int result = fwrite(initial, size, 1, file);
129  if (result < 1) {
130  fclose(file);
131  return NULL;
132  }
133  void* memory =
134  mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
135  return new PosixMemoryMappedFile(file, memory, size);
136 }
137 
138 
140  if (memory_) OS::Free(memory_, size_);
141  fclose(file_);
142 }
143 
144 
145 void OS::LogSharedLibraryAddresses(Isolate* isolate) {
146  // This function assumes that the layout of the file is as follows:
147  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
148  // If we encounter an unexpected situation we abort scanning further entries.
149  FILE* fp = fopen("/proc/self/maps", "r");
150  if (fp == NULL) return;
151 
152  // Allocate enough room to be able to store a full file name.
153  const int kLibNameLen = FILENAME_MAX + 1;
154  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
155 
156  // This loop will terminate once the scanning hits an EOF.
157  while (true) {
158  uintptr_t start, end;
159  char attr_r, attr_w, attr_x, attr_p;
160  // Parse the addresses and permission bits at the beginning of the line.
161  if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
162  if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
163 
164  int c;
165  if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
166  // Found a read-only executable entry. Skip characters until we reach
167  // the beginning of the filename or the end of the line.
168  do {
169  c = getc(fp);
170  } while ((c != EOF) && (c != '\n') && (c != '/'));
171  if (c == EOF) break; // EOF: Was unexpected, just exit.
172 
173  // Process the filename if found.
174  if (c == '/') {
175  ungetc(c, fp); // Push the '/' back into the stream to be read below.
176 
177  // Read to the end of the line. Exit if the read fails.
178  if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
179 
180  // Drop the newline character read by fgets. We do not need to check
181  // for a zero-length string because we know that we at least read the
182  // '/' character.
183  lib_name[strlen(lib_name) - 1] = '\0';
184  } else {
185  // No library name found, just record the raw address range.
186  snprintf(lib_name, kLibNameLen,
187  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
188  }
189  LOG(isolate, SharedLibraryEvent(lib_name, start, end));
190  } else {
191  // Entry not describing executable data. Skip to end of line to set up
192  // reading the next entry.
193  do {
194  c = getc(fp);
195  } while ((c != EOF) && (c != '\n'));
196  if (c == EOF) break;
197  }
198  }
199  free(lib_name);
200  fclose(fp);
201 }
202 
203 
204 void OS::SignalCodeMovingGC() {
205  // Support for ll_prof.py.
206  //
207  // The Linux profiler built into the kernel logs all mmap's with
208  // PROT_EXEC so that analysis tools can properly attribute ticks. We
209  // do a mmap with a name known by ll_prof.py and immediately munmap
210  // it. This injects a GC marker into the stream of events generated
211  // by the kernel and allows us to synchronize V8 code log and the
212  // kernel log.
213  int size = sysconf(_SC_PAGESIZE);
214  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
215  if (f == NULL) {
216  OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
217  OS::Abort();
218  }
219  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
220  fileno(f), 0);
221  ASSERT(addr != MAP_FAILED);
222  OS::Free(addr, size);
223  fclose(f);
224 }
225 
226 
227 
228 // Constants used for mmap.
229 static const int kMmapFd = -1;
230 static const int kMmapFdOffset = 0;
231 
232 
233 VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
234 
235 
236 VirtualMemory::VirtualMemory(size_t size)
237  : address_(ReserveRegion(size)), size_(size) { }
238 
239 
240 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
241  : address_(NULL), size_(0) {
242  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
243  size_t request_size = RoundUp(size + alignment,
244  static_cast<intptr_t>(OS::AllocateAlignment()));
245  void* reservation = mmap(OS::GetRandomMmapAddr(),
246  request_size,
247  PROT_NONE,
248  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
249  kMmapFd,
250  kMmapFdOffset);
251  if (reservation == MAP_FAILED) return;
252 
253  Address base = static_cast<Address>(reservation);
254  Address aligned_base = RoundUp(base, alignment);
255  ASSERT_LE(base, aligned_base);
256 
257  // Unmap extra memory reserved before and after the desired block.
258  if (aligned_base != base) {
259  size_t prefix_size = static_cast<size_t>(aligned_base - base);
260  OS::Free(base, prefix_size);
261  request_size -= prefix_size;
262  }
263 
264  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
265  ASSERT_LE(aligned_size, request_size);
266 
267  if (aligned_size != request_size) {
268  size_t suffix_size = request_size - aligned_size;
269  OS::Free(aligned_base + aligned_size, suffix_size);
270  request_size -= suffix_size;
271  }
272 
273  ASSERT(aligned_size == request_size);
274 
275  address_ = static_cast<void*>(aligned_base);
276  size_ = aligned_size;
277 }
278 
279 
281  if (IsReserved()) {
282  bool result = ReleaseRegion(address(), size());
283  ASSERT(result);
284  USE(result);
285  }
286 }
287 
288 
290  return address_ != NULL;
291 }
292 
293 
294 void VirtualMemory::Reset() {
295  address_ = NULL;
296  size_ = 0;
297 }
298 
299 
300 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
301  return CommitRegion(address, size, is_executable);
302 }
303 
304 
305 bool VirtualMemory::Uncommit(void* address, size_t size) {
306  return UncommitRegion(address, size);
307 }
308 
309 
310 bool VirtualMemory::Guard(void* address) {
311  OS::Guard(address, OS::CommitPageSize());
312  return true;
313 }
314 
315 
316 void* VirtualMemory::ReserveRegion(size_t size) {
317  void* result = mmap(OS::GetRandomMmapAddr(),
318  size,
319  PROT_NONE,
320  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
321  kMmapFd,
322  kMmapFdOffset);
323 
324  if (result == MAP_FAILED) return NULL;
325 
326  return result;
327 }
328 
329 
330 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
331  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
332  if (MAP_FAILED == mmap(base,
333  size,
334  prot,
335  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
336  kMmapFd,
337  kMmapFdOffset)) {
338  return false;
339  }
340  return true;
341 }
342 
343 
344 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
345  return mmap(base,
346  size,
347  PROT_NONE,
348  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
349  kMmapFd,
350  kMmapFdOffset) != MAP_FAILED;
351 }
352 
353 
354 bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
355  return munmap(base, size) == 0;
356 }
357 
358 
360  // TODO(alph): implement for the platform.
361  return false;
362 }
363 
364 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
static void Free(void *address, const size_t size)
#define V8PRIxPTR
Definition: globals.h:228
static void LogSharedLibraryAddresses(Isolate *isolate)
#define LOG(isolate, Call)
Definition: log.h:86
static void SignalCodeMovingGC()
static void * GetRandomMmapAddr()
static void * ReserveRegion(size_t size)
PosixMemoryMappedFile(FILE *file, void *memory, int size)
#define ASSERT(condition)
Definition: checks.h:329
static MemoryMappedFile * open(const char *name)
int isnan(double x)
static void Abort()
bool IsAligned(T value, U alignment)
Definition: utils.h:211
static MemoryMappedFile * create(const char *name, int size, void *initial)
bool Commit(void *address, size_t size, bool is_executable)
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
#define ASSERT_LE(v1, v2)
Definition: checks.h:334
static const char * LocalTimezone(double time, TimezoneCache *cache)
static bool ReleaseRegion(void *base, size_t size)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void PrintError(const char *format,...)
static bool UncommitRegion(void *base, size_t size)
void USE(T)
Definition: globals.h:341
static size_t AllocateAlignment()
const Register fp
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
static intptr_t CommitPageSize()
static double LocalTimeOffset(TimezoneCache *cache)
bool Uncommit(void *address, size_t size)