36 #ifdef MINGW_HAS_SECURE_API
37 #undef MINGW_HAS_SECURE_API
38 #endif // MINGW_HAS_SECURE_API
55 int strncasecmp(
const char*
s1,
const char*
s2,
int n) {
56 return _strnicmp(s1, s2, n);
67 #ifndef __MINGW64_VERSION_MAJOR
74 __asm__ __volatile__(
"xchgl %%eax,%0 ":
"=r" (barrier));
77 #endif // __MINGW64_VERSION_MAJOR
80 int localtime_s(tm* out_tm,
const time_t* time) {
81 tm* posix_local_time_struct = localtime(time);
82 if (posix_local_time_struct ==
NULL)
return 1;
83 *out_tm = *posix_local_time_struct;
88 int fopen_s(FILE** pFile,
const char* filename,
const char*
mode) {
89 *pFile = fopen(filename, mode);
90 return *pFile !=
NULL ? 0 : 1;
93 int _vsnprintf_s(
char* buffer,
size_t sizeOfBuffer,
size_t count,
94 const char* format, va_list argptr) {
95 ASSERT(count == _TRUNCATE);
96 return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
100 int strncpy_s(
char* dest,
size_t dest_size,
const char* source,
size_t count) {
105 if (count == _TRUNCATE) {
106 while (dest_size > 0 && *source != 0) {
107 *(dest++) = *(source++);
110 if (dest_size == 0) {
115 while (dest_size > 0 && count > 0 && *source != 0) {
116 *(dest++) = *(source++);
126 #endif // __MINGW32__
136 #if V8_TARGET_ARCH_IA32
137 static void MemMoveWrapper(
void* dest,
const void* src,
size_t size) {
138 memmove(dest, src, size);
143 static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
146 OS::MemMoveFunction CreateMemMoveFunction();
150 if (size == 0)
return;
153 (*memmove_function)(dest, src,
size);
156 #endif // V8_TARGET_ARCH_IA32
159 typedef double (*ModuloFunction)(double, double);
160 static ModuloFunction modulo_function =
NULL;
162 ModuloFunction CreateModuloFunction();
164 void init_modulo_function() {
165 modulo_function = CreateModuloFunction();
169 double modulo(
double x,
double y) {
172 return (*modulo_function)(x, y);
176 double modulo(
double x,
double y) {
190 #define UNARY_MATH_FUNCTION(name, generator) \
191 static UnaryMathFunction fast_##name##_function = NULL; \
192 void init_fast_##name##_function() { \
193 fast_##name##_function = generator; \
195 double fast_##name(double x) { \
196 return (*fast_##name##_function)(x); \
202 #undef UNARY_MATH_FUNCTION
206 if (fast_exp_function ==
NULL) {
207 init_fast_exp_function();
214 init_modulo_function();
217 init_fast_sqrt_function();
221 class TimezoneCache {
226 initialized_ =
false;
233 if (initialized_)
return;
238 memset(&tzinfo_, 0,
sizeof(tzinfo_));
239 if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
242 tzinfo_.StandardDate.wMonth = 10;
243 tzinfo_.StandardDate.wDay = 5;
244 tzinfo_.StandardDate.wHour = 3;
245 tzinfo_.StandardBias = 0;
246 tzinfo_.DaylightDate.wMonth = 3;
247 tzinfo_.DaylightDate.wDay = 5;
248 tzinfo_.DaylightDate.wHour = 2;
249 tzinfo_.DaylightBias = -60;
253 WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
254 std_tz_name_, kTzNameSize,
NULL,
NULL);
255 std_tz_name_[kTzNameSize - 1] =
'\0';
256 WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
257 dst_tz_name_, kTzNameSize,
NULL,
NULL);
258 dst_tz_name_[kTzNameSize - 1] =
'\0';
264 if (std_tz_name_[0] ==
'\0' || std_tz_name_[0] ==
'@') {
269 if (dst_tz_name_[0] ==
'\0' || dst_tz_name_[0] ==
'@') {
281 static const int kHour = 60;
283 case -9*kHour:
return "Alaska";
284 case -8*kHour:
return "Pacific";
285 case -7*kHour:
return "Mountain";
286 case -6*kHour:
return "Central";
287 case -5*kHour:
return "Eastern";
288 case -4*kHour:
return "Atlantic";
289 case 0*kHour:
return "GMT";
290 case +1*kHour:
return "Central Europe";
291 case +2*kHour:
return "Eastern Europe";
292 case +3*kHour:
return "Russia";
293 case +5*kHour + 30:
return "India";
294 case +8*kHour:
return "China";
295 case +9*kHour:
return "Japan";
296 case +12*kHour:
return "New Zealand";
297 default:
return "Local";
303 static const int kTzNameSize = 128;
305 char std_tz_name_[kTzNameSize];
306 char dst_tz_name_[kTzNameSize];
307 TIME_ZONE_INFORMATION tzinfo_;
323 Win32Time(
int year,
int mon,
int day,
int hour,
int min,
int sec);
347 static const int64_t kTimeEpoc = 116444736000000000LL;
348 static const int64_t kTimeScaler = 10000;
349 static const int64_t kMsPerMinute = 60000;
352 static const bool kShortTzNames =
false;
358 FILETIME& ft() {
return time_.ft_; }
361 int64_t& t() {
return time_.t_; }
385 t() =
static_cast<int64_t
>(jstime) * kTimeScaler + kTimeEpoc;
398 st.wMilliseconds = 0;
399 SystemTimeToFileTime(&st, &ft());
405 return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
428 static bool initialized =
false;
429 static TimeStamp init_time;
430 static DWORD init_ticks;
431 static const int64_t kHundredNanosecondsPerSecond = 10000000;
432 static const int64_t kMaxClockElapsedTime =
433 60*kHundredNanosecondsPerSecond;
436 bool needs_resync = !initialized;
440 GetSystemTimeAsFileTime(&time_now.ft_);
441 DWORD ticks_now = timeGetTime();
444 needs_resync |= ticks_now < init_ticks;
447 needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
450 needs_resync |= time_now.t_ < init_time.t_;
454 GetSystemTimeAsFileTime(&init_time.ft_);
455 init_ticks = ticks_now = timeGetTime();
460 DWORD elapsed = ticks_now - init_ticks;
461 this->time_.t_ = init_time.t_ + (
static_cast<int64_t
>(elapsed) * 10000);
471 cache->InitializeIfNeeded();
474 rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
482 double unchecked_posix_time = rounded_to_second.
ToJSTime() / 1000;
483 if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
487 time_t posix_time =
static_cast<time_t
>(unchecked_posix_time);
490 tm posix_local_time_struct;
491 if (localtime_s(&posix_local_time_struct, &posix_time))
return 0;
493 if (posix_local_time_struct.tm_isdst > 0) {
494 return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
495 }
else if (posix_local_time_struct.tm_isdst == 0) {
496 return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
498 return cache->tzinfo_.Bias * -kMsPerMinute;
505 cache->InitializeIfNeeded();
509 if (cache->tzinfo_.StandardDate.wMonth != 0 ||
510 cache->tzinfo_.DaylightDate.wMonth != 0) {
517 -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
521 in_dst = offset == dstofs;
530 return InDST(cache) ? 60 * kMsPerMinute : 0;
539 return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
547 #if V8_TARGET_ARCH_IA32
548 OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
549 if (generated_memmove !=
NULL) {
550 memmove_function = generated_memmove;
562 if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
563 reinterpret_cast<FILETIME*>(&usertime)))
return -1;
569 *secs =
static_cast<uint32_t
>(usertime / 1000000);
570 *usecs =
static_cast<uint32_t
>(usertime % 1000000);
578 return Time::Now().ToJsTime();
583 return new TimezoneCache();
600 return Win32Time(time).LocalTimezone(cache);
610 return static_cast<double>(t.LocalOffset(cache) -
611 t.DaylightSavingsOffset(cache));
618 int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
619 return static_cast<double>(offset);
624 return ::GetLastError();
655 static bool HasConsole() {
663 if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
664 GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
673 static void VPrintHelper(FILE* stream,
const char* format, va_list args) {
674 if ((stream == stdout || stream == stderr) && !HasConsole()) {
678 EmbeddedVector<char, 4096> buffer;
680 OutputDebugStringA(buffer.start());
682 vfprintf(stream, format, args);
689 if (fopen_s(&result, path, mode) == 0) {
698 return (DeleteFileA(path) != 0);
704 char tempPathBuffer[MAX_PATH];
705 DWORD path_result = 0;
706 path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
707 if (path_result > MAX_PATH || path_result == 0)
return NULL;
708 UINT name_result = 0;
709 char tempNameBuffer[MAX_PATH];
710 name_result = GetTempFileNameA(tempPathBuffer,
"", 0, tempNameBuffer);
711 if (name_result == 0)
return NULL;
712 FILE* result =
FOpen(tempNameBuffer,
"w+");
713 if (result !=
NULL) {
725 void OS::Print(
const char* format, ...) {
727 va_start(args, format);
733 void OS::VPrint(
const char* format, va_list args) {
734 VPrintHelper(stdout, format, args);
738 void OS::FPrint(FILE* out,
const char* format, ...) {
740 va_start(args, format);
746 void OS::VFPrint(FILE* out,
const char* format, va_list args) {
747 VPrintHelper(out, format, args);
754 va_start(args, format);
761 VPrintHelper(stderr, format, args);
765 int OS::SNPrintF(Vector<char> str,
const char* format, ...) {
767 va_start(args, format);
768 int result =
VSNPrintF(str, format, args);
774 int OS::VSNPrintF(Vector<char> str,
const char* format, va_list args) {
775 int n = _vsnprintf_s(str.start(), str.length(), _TRUNCATE, format, args);
778 if (n < 0 || n >= str.length()) {
779 if (str.length() > 0)
780 str[str.length() - 1] =
'\0';
789 return const_cast<char*
>(strchr(str, c));
793 void OS::StrNCpy(Vector<char> dest,
const char* src,
size_t n) {
795 size_t buffer_size =
static_cast<size_t>(dest.length());
796 if (n + 1 > buffer_size)
798 int result = strncpy_s(dest.start(), dest.length(), src, n);
800 ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
811 static size_t GetPageSize() {
812 static size_t page_size = 0;
813 if (page_size == 0) {
815 GetSystemInfo(&info);
825 static size_t allocate_alignment = 0;
826 if (allocate_alignment == 0) {
828 GetSystemInfo(&info);
829 allocate_alignment = info.dwAllocationGranularity;
831 return allocate_alignment;
836 Isolate* isolate = Isolate::UncheckedCurrent();
840 if (isolate !=
NULL) {
846 #ifdef V8_HOST_ARCH_64_BIT
847 static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
848 static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
850 static const intptr_t kAllocationRandomAddressMin = 0x04000000;
851 static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
854 (isolate->random_number_generator()->NextInt() <<
kPageSizeBits) |
855 kAllocationRandomAddressMin;
856 address &= kAllocationRandomAddressMax;
857 return reinterpret_cast<void *
>(address);
863 static void* RandomizedVirtualAlloc(
size_t size,
int action,
int protection) {
866 if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
868 for (
size_t attempts = 0; base ==
NULL && attempts < 3; ++attempts) {
874 if (base ==
NULL) base = VirtualAlloc(
NULL, size, action, protection);
882 bool is_executable) {
884 size_t msize =
RoundUp(requested, static_cast<int>(GetPageSize()));
887 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
889 LPVOID mbase = RandomizedVirtualAlloc(msize,
890 MEM_COMMIT | MEM_RESERVE,
894 LOG(Isolate::Current(), StringEvent(
"OS::Allocate",
"VirtualAlloc failed"));
905 void OS::Free(
void* address,
const size_t size) {
907 VirtualFree(address, 0, MEM_RELEASE);
919 VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
923 void OS::Guard(
void* address,
const size_t size) {
925 VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
935 if (FLAG_hard_abort) {
962 file_mapping_(file_mapping),
966 virtual void*
memory() {
return memory_; }
967 virtual int size() {
return size_; }
978 HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
979 FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_EXISTING, 0,
NULL);
980 if (file == INVALID_HANDLE_VALUE)
return NULL;
982 int size =
static_cast<int>(GetFileSize(file,
NULL));
985 HANDLE file_mapping = CreateFileMapping(file,
NULL,
986 PAGE_READWRITE, 0, static_cast<DWORD>(size),
NULL);
987 if (file_mapping ==
NULL)
return NULL;
990 void*
memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
991 return new Win32MemoryMappedFile(file, file_mapping, memory, size);
998 HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
999 FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_ALWAYS, 0,
NULL);
1002 HANDLE file_mapping = CreateFileMapping(file,
NULL,
1003 PAGE_READWRITE, 0, static_cast<DWORD>(size),
NULL);
1004 if (file_mapping ==
NULL)
return NULL;
1006 void*
memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
1008 return new Win32MemoryMappedFile(file, file_mapping, memory, size);
1013 if (memory_ !=
NULL)
1014 UnmapViewOfFile(memory_);
1015 CloseHandle(file_mapping_);
1027 #define DBGHELP_FUNCTION_LIST(V) \
1031 V(SymGetSearchPath) \
1032 V(SymLoadModule64) \
1034 V(SymGetSymFromAddr64) \
1035 V(SymGetLineFromAddr64) \
1036 V(SymFunctionTableAccess64) \
1037 V(SymGetModuleBase64)
1040 #define TLHELP32_FUNCTION_LIST(V) \
1041 V(CreateToolhelp32Snapshot) \
1047 #define DLL_FUNC_TYPE(name) _##name##_
1048 #define DLL_FUNC_VAR(name) _##name
1094 OUT PIMAGEHLP_SYMBOL64
Symbol);
1099 OUT PIMAGEHLP_LINE64
Line64);
1115 LPMODULEENTRY32W
lpme);
1121 #define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
1124 #undef DEF_DLL_FUNCTION
1129 static bool LoadDbgHelpAndTlHelp32() {
1130 static bool dbghelp_loaded =
false;
1132 if (dbghelp_loaded)
return true;
1137 module = LoadLibrary(TEXT(
"dbghelp.dll"));
1138 if (module ==
NULL) {
1142 #define LOAD_DLL_FUNC(name) \
1143 DLL_FUNC_VAR(name) = \
1144 reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
1148 #undef LOAD_DLL_FUNC
1152 module = LoadLibrary(TEXT(
"kernel32.dll"));
1153 if (module ==
NULL) {
1157 #define LOAD_DLL_FUNC(name) \
1158 DLL_FUNC_VAR(name) = \
1159 reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
1163 #undef LOAD_DLL_FUNC
1167 #define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
1172 #undef DLL_FUNC_LOADED
1175 dbghelp_loaded = result;
1181 #undef DBGHELP_FUNCTION_LIST
1182 #undef TLHELP32_FUNCTION_LIST
1184 #undef DLL_FUNC_TYPE
1188 static bool LoadSymbols(Isolate* isolate,
HANDLE process_handle) {
1189 static bool symbols_loaded =
false;
1191 if (symbols_loaded)
return true;
1196 ok = _SymInitialize(process_handle,
1199 if (!ok)
return false;
1201 DWORD options = _SymGetOptions();
1202 options |= SYMOPT_LOAD_LINES;
1203 options |= SYMOPT_FAIL_CRITICAL_ERRORS;
1204 options = _SymSetOptions(options);
1214 HANDLE snapshot = _CreateToolhelp32Snapshot(
1217 if (snapshot == INVALID_HANDLE_VALUE)
return false;
1218 MODULEENTRY32W module_entry;
1219 module_entry.dwSize =
sizeof(module_entry);
1220 BOOL cont = _Module32FirstW(snapshot, &module_entry);
1225 base = _SymLoadModule64(
1228 reinterpret_cast<PSTR>(module_entry.szExePath),
1229 reinterpret_cast<PSTR>(module_entry.szModule),
1230 reinterpret_cast<DWORD64>(module_entry.modBaseAddr),
1231 module_entry.modBaseSize);
1234 if (err != ERROR_MOD_NOT_FOUND &&
1235 err != ERROR_INVALID_HANDLE)
return false;
1239 module_entry.szExePath,
1240 reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
1241 reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
1242 module_entry.modBaseSize)));
1243 cont = _Module32NextW(snapshot, &module_entry);
1245 CloseHandle(snapshot);
1247 symbols_loaded =
true;
1257 if (!LoadDbgHelpAndTlHelp32())
return;
1258 HANDLE process_handle = GetCurrentProcess();
1259 LoadSymbols(isolate, process_handle);
1268 MEMORYSTATUSEX memory_info;
1269 memory_info.dwLength =
sizeof(memory_info);
1270 if (!GlobalMemoryStatusEx(&memory_info)) {
1275 return static_cast<uint64_t
>(memory_info.ullTotalPhys);
1279 #else // __MINGW32__
1282 #endif // __MINGW32__
1295 return *
reinterpret_cast<const double*
>(&nanval);
1305 #elif defined(__MINGW32__)
1318 VirtualMemory::VirtualMemory(
size_t size)
1319 : address_(ReserveRegion(size)), size_(size) { }
1322 VirtualMemory::VirtualMemory(
size_t size,
size_t alignment)
1323 : address_(
NULL), size_(0) {
1324 ASSERT(
IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
1325 size_t request_size =
RoundUp(size + alignment,
1326 static_cast<intptr_t>(OS::AllocateAlignment()));
1327 void* address = ReserveRegion(request_size);
1328 if (address ==
NULL)
return;
1329 Address base =
RoundUp(static_cast<Address>(address), alignment);
1331 bool result = ReleaseRegion(address, request_size);
1334 address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
1335 if (address !=
NULL) {
1336 request_size =
size;
1337 ASSERT(base == static_cast<Address>(address));
1340 address = ReserveRegion(request_size);
1341 if (address ==
NULL)
return;
1344 size_ = request_size;
1348 VirtualMemory::~VirtualMemory() {
1350 bool result = ReleaseRegion(address(),
size());
1357 bool VirtualMemory::IsReserved() {
1358 return address_ !=
NULL;
1362 void VirtualMemory::Reset() {
1368 bool VirtualMemory::Commit(
void* address,
size_t size,
bool is_executable) {
1369 return CommitRegion(address, size, is_executable);
1373 bool VirtualMemory::Uncommit(
void* address,
size_t size) {
1375 return UncommitRegion(address, size);
1379 bool VirtualMemory::Guard(
void* address) {
1380 if (
NULL == VirtualAlloc(address,
1381 OS::CommitPageSize(),
1390 void* VirtualMemory::ReserveRegion(
size_t size) {
1391 return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
1395 bool VirtualMemory::CommitRegion(
void* base,
size_t size,
bool is_executable) {
1396 int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
1397 if (
NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
1404 bool VirtualMemory::UncommitRegion(
void* base,
size_t size) {
1405 return VirtualFree(base, size, MEM_DECOMMIT) != 0;
1409 bool VirtualMemory::ReleaseRegion(
void* base,
size_t size) {
1410 return VirtualFree(base, 0, MEM_RELEASE) != 0;
1414 bool VirtualMemory::HasLazyCommits() {
1424 static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
1430 static unsigned int __stdcall ThreadEntry(
void* arg) {
1431 Thread* thread =
reinterpret_cast<Thread*
>(arg);
1432 thread->NotifyStartedAndRun();
1437 class Thread::PlatformData :
public Malloced {
1448 Thread::Thread(
const Options& options)
1449 : stack_size_(options.stack_size()),
1450 start_semaphore_(
NULL) {
1452 set_name(options.
name());
1456 void Thread::set_name(
const char* name) {
1458 name_[
sizeof(name_) - 1] =
'\0';
1474 _beginthreadex(
NULL,
1475 static_cast<unsigned>(stack_size_),
1485 if (data_->
thread_id_ != GetCurrentThreadId()) {
1486 WaitForSingleObject(data_->
thread_, INFINITE);
1492 DWORD result = TlsAlloc();
1493 ASSERT(result != TLS_OUT_OF_INDEXES);
1499 BOOL result = TlsFree(static_cast<DWORD>(key));
1506 return TlsGetValue(static_cast<DWORD>(key));
1511 BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 PGET_MODULE_BASE_ROUTINE64 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static void * GetThreadLocal(LocalStorageKey key)
int64_t LocalOffset(TimezoneCache *cache)
IN DWORD64 OUT PDWORD64 pdwDisplacement
static void Free(void *address, const size_t size)
int64_t DaylightSavingsOffset(TimezoneCache *cache)
void PrintF(const char *format,...)
static int VSNPrintF(Vector< char > str, const char *format, va_list args)
static void LogSharedLibraryAddresses(Isolate *isolate)
IN DWORD64 OUT PDWORD OUT PIMAGEHLP_LINE64 Line64
#define LOG(isolate, Call)
typedef PVOID(__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(HANDLE hProcess
static FILE * OpenTemporaryFile()
char * LocalTimezone(TimezoneCache *cache)
static void SignalCodeMovingGC()
IN HANDLE IN PSTR IN PSTR ModuleName
static void * GetRandomMmapAddr()
static double DaylightSavingsOffset(double time, TimezoneCache *cache)
const uint64_t kQuietNaNMask
static int GetUserTime(uint32_t *secs, uint32_t *usecs)
static void ClearTimezoneCache(TimezoneCache *cache)
#define ASSERT(condition)
typedef HANDLE(__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(DWORD dwFlags
static void VFPrint(FILE *out, const char *format, va_list args)
IN HANDLE IN PSTR IN PSTR IN DWORD64 IN DWORD SizeOfDll
#define V8_IMMEDIATE_CRASH()
static MemoryMappedFile * open(const char *name)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
UnaryMathFunction CreateExpFunction()
typedef DWORD(__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID)
IN HANDLE IN PSTR ImageName
HANDLE HANDLE LPSTACKFRAME64 StackFrame
virtual void * memory()=0
typedef BOOL(__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess
typedef DWORD64(__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(IN HANDLE hProcess
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
IN HANDLE IN PSTR IN PSTR IN DWORD64 BaseOfDll
const char * GuessTimezoneNameFromBias(int bias)
Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void *memory, int size)
HANDLE HANDLE LPSTACKFRAME64 PVOID ContextRecord
static void ProtectCode(void *address, const size_t size)
virtual ~Win32MemoryMappedFile()
HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine
void lazily_initialize_fast_exp()
static LocalStorageKey CreateThreadLocalKey()
static FILE * FOpen(const char *path, const char *mode)
bool IsAligned(T value, U alignment)
static MemoryMappedFile * create(const char *name, int size, void *initial)
static void VPrint(const char *format, va_list args)
static void MemMove(void *dest, const void *src, size_t size)
UnaryMathFunction CreateSqrtFunction()
static void Guard(void *address, const size_t size)
static void VPrintError(const char *format, va_list args)
double modulo(double x, double y)
static int GetCurrentProcessId()
T RoundUp(T x, intptr_t m)
static double TimeCurrentMillis()
static void DeleteThreadLocalKey(LocalStorageKey key)
static void Sleep(const int milliseconds)
static const char * LocalTimezone(double time, TimezoneCache *cache)
static void Print(const char *format,...)
void InitializeIfNeeded()
HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine
static int SNPrintF(Vector< char > str, const char *format,...)
static const int kStackWalkMaxNameLen
static void DisposeTimezoneCache(TimezoneCache *cache)
static double nan_value()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
static void SetThreadLocal(LocalStorageKey key, void *value)
static void * Allocate(const size_t requested, size_t *allocated, bool is_executable)
static void PrintError(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable memory(in Mbytes)") DEFINE_bool(gc_global
OUT PSTR IN DWORD SearchPathLength
static void StrNCpy(Vector< char > dest, const char *src, size_t n)
static int ActivationFrameAlignment()
const char * name() const
IN DWORD64 OUT PDWORD64 OUT PIMAGEHLP_SYMBOL64 Symbol
static size_t AllocateAlignment()
static TimezoneCache * CreateTimezoneCache()
static uint64_t CpuFeaturesImpliedByPlatform()
static bool Remove(const char *path)
static int GetLastError()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
static intptr_t MaxVirtualMemory()
static void FPrint(FILE *out, const char *format,...)
IN PSTR IN BOOL fInvadeProcess
HANDLE HANDLE LPSTACKFRAME64 PVOID PREAD_PROCESS_MEMORY_ROUTINE64 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static intptr_t CommitPageSize()
static const char *const LogFileOpenMode
static double LocalTimeOffset(TimezoneCache *cache)
uint32_t RoundUpToPowerOf2(uint32_t x)
static char * StrChr(char *str, int c)
static uint64_t TotalPhysicalMemory()