34 #include <mach/mach_init.h>
35 #include <mach-o/dyld.h>
36 #include <mach-o/getsect.h>
38 #include <AvailabilityMacros.h>
41 #include <semaphore.h>
43 #include <libkern/OSAtomic.h>
44 #include <mach/mach.h>
45 #include <mach/semaphore.h>
46 #include <mach/task.h>
47 #include <mach/vm_statistics.h>
49 #include <sys/resource.h>
50 #include <sys/types.h>
51 #include <sys/sysctl.h>
81 static const pthread_t kNoThread = (pthread_t) 0;
86 if (-1.0 < x && x < 0.0) {
94 static Mutex* limit_mutex =
NULL;
97 void OS::PostSetUp() {
107 static void* lowest_ever_allocated =
reinterpret_cast<void*
>(-1);
108 static void* highest_ever_allocated =
reinterpret_cast<void*
>(0);
111 static void UpdateAllocatedSpaceLimits(
void* address,
int size) {
113 ScopedLock lock(limit_mutex);
115 lowest_ever_allocated =
Min(lowest_ever_allocated, address);
116 highest_ever_allocated =
117 Max(highest_ever_allocated,
118 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
122 bool OS::IsOutsideAllocatedSpace(
void* address) {
123 return address < lowest_ever_allocated || address >= highest_ever_allocated;
127 size_t OS::AllocateAlignment() {
128 return getpagesize();
136 static const int kMmapFd = VM_MAKE_TAG(255);
137 static const off_t kMmapFdOffset = 0;
140 void* OS::Allocate(
const size_t requested,
142 bool is_executable) {
143 const size_t msize =
RoundUp(requested, getpagesize());
144 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
145 void* mbase = mmap(OS::GetRandomMmapAddr(),
148 MAP_PRIVATE | MAP_ANON,
151 if (mbase == MAP_FAILED) {
152 LOG(Isolate::Current(), StringEvent(
"OS::Allocate",
"mmap failed"));
156 UpdateAllocatedSpaceLimits(mbase, msize);
161 void OS::Free(
void* address,
const size_t size) {
163 int result = munmap(address, size);
169 void OS::Sleep(
int milliseconds) {
170 usleep(1000 * milliseconds);
180 void OS::DebugBreak() {
185 class PosixMemoryMappedFile :
public OS::MemoryMappedFile {
188 : file_(file), memory_(memory), size_(size) { }
190 virtual void*
memory() {
return memory_; }
191 virtual int size() {
return size_; }
199 OS::MemoryMappedFile* OS::MemoryMappedFile::open(
const char* name) {
200 FILE* file = fopen(name,
"r+");
203 fseek(file, 0, SEEK_END);
204 int size = ftell(file);
207 mmap(OS::GetRandomMmapAddr(),
209 PROT_READ | PROT_WRITE,
213 return new PosixMemoryMappedFile(file, memory, size);
217 OS::MemoryMappedFile* OS::MemoryMappedFile::create(
const char* name,
int size,
219 FILE* file = fopen(name,
"w+");
221 int result = fwrite(initial, size, 1, file);
227 mmap(OS::GetRandomMmapAddr(),
229 PROT_READ | PROT_WRITE,
233 return new PosixMemoryMappedFile(file, memory, size);
237 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
238 if (memory_) OS::Free(memory_, size_);
243 void OS::LogSharedLibraryAddresses() {
244 unsigned int images_count = _dyld_image_count();
245 for (
unsigned int i = 0; i < images_count; ++i) {
246 const mach_header* header = _dyld_get_image_header(i);
247 if (header ==
NULL)
continue;
250 char* code_ptr = getsectdatafromheader_64(
251 reinterpret_cast<const mach_header_64*>(header),
257 char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
259 if (code_ptr ==
NULL)
continue;
260 const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
261 const uintptr_t start =
reinterpret_cast<uintptr_t
>(code_ptr) + slide;
262 LOG(Isolate::Current(),
263 SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
268 void OS::SignalCodeMovingGC() {
272 uint64_t OS::CpuFeaturesImpliedByPlatform() {
275 const uint64_t one = 1;
280 int OS::ActivationFrameAlignment() {
293 const char* OS::LocalTimezone(
double time) {
294 if (
isnan(time))
return "";
295 time_t tv =
static_cast<time_t
>(floor(time/msPerSecond));
296 struct tm* t = localtime(&tv);
297 if (
NULL == t)
return "";
302 double OS::LocalTimeOffset() {
303 time_t tv = time(
NULL);
304 struct tm* t = localtime(&tv);
306 return static_cast<double>(t->tm_gmtoff * msPerSecond -
307 (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
311 int OS::StackWalk(Vector<StackFrame> frames) {
316 int frames_size = frames.length();
317 ScopedVector<void*> addresses(frames_size);
319 int frames_count =
backtrace(addresses.start(), frames_size);
322 if (symbols ==
NULL) {
323 return kStackWalkError;
326 for (
int i = 0; i < frames_count; i++) {
327 frames[i].address = addresses[i];
331 kStackWalkMaxTextLen),
335 frames[i].text[kStackWalkMaxTextLen - 1] =
'\0';
344 VirtualMemory::VirtualMemory() : address_(
NULL), size_(0) { }
348 : address_(ReserveRegion(size)), size_(size) { }
352 : address_(
NULL), size_(0) {
354 size_t request_size =
RoundUp(size + alignment,
359 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
362 if (reservation == MAP_FAILED)
return;
369 if (aligned_base != base) {
370 size_t prefix_size =
static_cast<size_t>(aligned_base - base);
372 request_size -= prefix_size;
378 if (aligned_size != request_size) {
379 size_t suffix_size = request_size - aligned_size;
380 OS::Free(aligned_base + aligned_size, suffix_size);
381 request_size -= suffix_size;
384 ASSERT(aligned_size == request_size);
386 address_ =
static_cast<void*
>(aligned_base);
387 size_ = aligned_size;
410 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
414 if (result == MAP_FAILED)
return NULL;
421 return address_ !=
NULL;
438 bool is_executable) {
439 int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
440 if (MAP_FAILED == mmap(address,
443 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
449 UpdateAllocatedSpaceLimits(address, size);
463 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
465 kMmapFdOffset) != MAP_FAILED;
470 return munmap(address, size) == 0;
474 class Thread::PlatformData :
public Malloced {
482 : data_(new PlatformData),
483 stack_size_(options.stack_size()) {
484 set_name(options.name());
493 static void SetThreadName(
const char* name) {
496 int (*dynamic_pthread_setname_np)(
const char*);
497 *
reinterpret_cast<void**
>(&dynamic_pthread_setname_np) =
498 dlsym(RTLD_DEFAULT,
"pthread_setname_np");
499 if (!dynamic_pthread_setname_np)
503 static const int kMaxNameLength = 63;
506 dynamic_pthread_setname_np(name);
510 static void* ThreadEntry(
void* arg) {
515 thread->data()->thread_ = pthread_self();
516 SetThreadName(thread->name());
517 ASSERT(thread->data()->thread_ != kNoThread);
523 void Thread::set_name(
const char* name) {
524 strncpy(name_, name,
sizeof(name_));
525 name_[
sizeof(name_) - 1] =
'\0';
530 pthread_attr_t* attr_ptr =
NULL;
532 if (stack_size_ > 0) {
533 pthread_attr_init(&attr);
534 pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
537 pthread_create(&data_->
thread_, attr_ptr, ThreadEntry,
this);
547 #ifdef V8_FAST_TLS_SUPPORTED
549 static Atomic32 tls_base_offset_initialized = 0;
550 intptr_t kMacTlsBaseOffset = 0;
554 static void InitializeTlsBaseOffset() {
555 const size_t kBufferSize = 128;
556 char buffer[kBufferSize];
557 size_t buffer_size = kBufferSize;
558 int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
559 if (sysctl(ctl_name, 2, buffer, &buffer_size,
NULL, 0) != 0) {
560 V8_Fatal(__FILE__, __LINE__,
"V8 failed to get kernel version");
565 buffer[kBufferSize - 1] =
'\0';
566 char* period_pos = strchr(buffer,
'.');
568 int kernel_version_major =
569 static_cast<int>(strtol(buffer,
NULL, 10));
572 if (kernel_version_major < 11) {
575 #if defined(V8_HOST_ARCH_IA32)
576 kMacTlsBaseOffset = 0x48;
578 kMacTlsBaseOffset = 0x60;
582 kMacTlsBaseOffset = 0;
589 void* expected =
reinterpret_cast<void*
>(0x1234CAFE);
592 if (expected != actual) {
594 "V8 failed to initialize fast TLS on current kernel");
599 #endif // V8_FAST_TLS_SUPPORTED
603 #ifdef V8_FAST_TLS_SUPPORTED
604 bool check_fast_tls =
false;
605 if (tls_base_offset_initialized == 0) {
606 check_fast_tls =
true;
607 InitializeTlsBaseOffset();
611 int result = pthread_key_create(&key,
NULL);
615 #ifdef V8_FAST_TLS_SUPPORTED
617 if (check_fast_tls) CheckFastTls(typed_key);
624 pthread_key_t pthread_key =
static_cast<pthread_key_t
>(key);
625 int result = pthread_key_delete(pthread_key);
632 pthread_key_t pthread_key =
static_cast<pthread_key_t
>(key);
633 return pthread_getspecific(pthread_key);
638 pthread_key_t pthread_key =
static_cast<pthread_key_t
>(key);
639 pthread_setspecific(pthread_key, value);
651 pthread_mutexattr_t attr;
652 pthread_mutexattr_init(&attr);
653 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
654 pthread_mutex_init(&mutex_, &attr);
659 virtual int Lock() {
return pthread_mutex_lock(&mutex_); }
660 virtual int Unlock() {
return pthread_mutex_unlock(&mutex_); }
663 int result = pthread_mutex_trylock(&mutex_);
665 if (result == EBUSY) {
673 pthread_mutex_t mutex_;
678 return new MacOSMutex();
686 r = semaphore_create(mach_task_self(),
690 ASSERT(r == KERN_SUCCESS);
695 r = semaphore_destroy(mach_task_self(), semaphore_);
696 ASSERT(r == KERN_SUCCESS);
702 r = semaphore_wait(semaphore_);
703 ASSERT(r == KERN_SUCCESS || r == KERN_ABORTED);
704 }
while (r == KERN_ABORTED);
707 bool Wait(
int timeout);
709 void Signal() { semaphore_signal(semaphore_); }
712 semaphore_t semaphore_;
718 ts.tv_sec = timeout / 1000000;
719 ts.tv_nsec = (timeout % 1000000) * 1000;
720 return semaphore_timedwait(semaphore_, ts) != KERN_OPERATION_TIMED_OUT;
729 class Sampler::PlatformData :
public Malloced {
735 mach_port_deallocate(mach_task_self(), profiled_thread_);
744 thread_act_t profiled_thread_;
750 static const int kSamplerThreadStackSize = 64 *
KB;
754 interval_(interval) {}
762 if (instance_ ==
NULL) {
785 bool cpu_profiling_enabled =
790 if (!cpu_profiling_enabled) {
791 if (rate_limiter_.SuspendIfNecessary())
continue;
793 if (cpu_profiling_enabled) {
798 if (runtime_profiler_enabled) {
824 if (sample ==
NULL) sample = &sample_obj;
826 if (KERN_SUCCESS != thread_suspend(profiled_thread))
return;
829 thread_state_flavor_t flavor = x86_THREAD_STATE64;
830 x86_thread_state64_t state;
831 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
833 #define REGISTER_FIELD(name) __r ## name
835 #define REGISTER_FIELD(name) r ## name
836 #endif // __DARWIN_UNIX03
837 #elif V8_HOST_ARCH_IA32
838 thread_state_flavor_t flavor = i386_THREAD_STATE;
839 i386_thread_state_t state;
840 mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
842 #define REGISTER_FIELD(name) __e ## name
844 #define REGISTER_FIELD(name) e ## name
845 #endif // __DARWIN_UNIX03
847 #error Unsupported Mac OS X host architecture.
848 #endif // V8_HOST_ARCH
850 if (thread_get_state(profiled_thread,
852 reinterpret_cast<natural_t*>(&state),
853 &count) == KERN_SUCCESS) {
855 sample->
pc =
reinterpret_cast<Address>(state.REGISTER_FIELD(
ip));
856 sample->
sp =
reinterpret_cast<Address>(state.REGISTER_FIELD(
sp));
857 sample->
fp =
reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
859 sampler->
Tick(sample);
861 thread_resume(profiled_thread);
865 RuntimeProfilerRateLimiter rate_limiter_;
868 static Mutex* mutex_;
875 #undef REGISTER_FIELD
884 uint64_t
seed = Ticks() ^ (getpid() << 16);
885 srandom(static_cast<unsigned int>(seed));
886 limit_mutex = CreateMutex();
903 data_ =
new PlatformData;
static void * GetThreadLocal(LocalStorageKey key)
static void RemoveActiveSampler(Sampler *sampler)
static void Free(void *address, const size_t size)
Thread(const Options &options)
PlatformData * platform_data()
StateTag current_vm_state()
MacOSSemaphore(int count)
#define LOG(isolate, Call)
static void * GetRandomMmapAddr()
SamplerThread(int interval)
static void * ReserveRegion(size_t size)
Vector< char > MutableCStrVector(char *data)
static SamplerThread * instance_
PosixMemoryMappedFile(FILE *file, void *memory, int size)
static void AddActiveSampler(Sampler *sampler)
#define ASSERT(condition)
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak))
static void RemoveActiveSampler(Sampler *sampler)
void V8_Fatal(const char *file, int line, const char *format,...)
bool Guard(void *address)
static void StopRuntimeProfilerThreadBeforeShutdown(Thread *thread)
RuntimeProfiler * runtime_profiler()
static TickSample * TickSampleEvent(Isolate *isolate)
static LocalStorageKey CreateThreadLocalKey()
bool IsAligned(T value, U alignment)
bool Commit(void *address, size_t size, bool is_executable)
static void * GetExistingThreadLocal(LocalStorageKey key)
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
static void Guard(void *address, const size_t size)
T RoundUp(T x, intptr_t m)
#define ASSERT_LE(v1, v2)
static Mutex * CreateMutex()
static void DoCpuProfile(Sampler *sampler, void *raw_sampler_thread)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable memory(in Mbytes)") DEFINE_bool(gc_global
activate correct semantics for inheriting readonliness false
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static Semaphore * CreateSemaphore(int count)
static const int kMaxThreadNameLength
static bool ReleaseRegion(void *base, size_t size)
void SampleContext(Sampler *sampler)
static bool CommitRegion(void *base, size_t size, bool is_executable)
static void SetThreadLocal(LocalStorageKey key, void *value)
void SampleStack(TickSample *sample)
static bool UncommitRegion(void *base, size_t size)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static size_t AllocateAlignment()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static void AddActiveSampler(Sampler *sampler)
Sampler(Isolate *isolate, int interval)
virtual void Tick(TickSample *sample)=0
static bool IterateActiveSamplers(VisitSampler func, void *param)
static intptr_t CommitPageSize()
bool Uncommit(void *address, size_t size)
static void DoRuntimeProfile(Sampler *sampler, void *ignored)