28 #ifndef V8_V8GLOBALS_H_
29 #define V8_V8GLOBALS_H_
69 #ifdef V8_HOST_ARCH_64_BIT
100 #define PROCESSOR_CACHE_LINE_SIZE 64
124 class ExternalReference;
131 template <
typename T>
class Handle;
156 template <
typename Config,
class Allocator = FreeStoreAllocationPolicy>
167 class RecursiveMutex;
352 MaybeObject* (*setter)(
379 #define HAS_SMI_TAG(value) \
380 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
382 #define HAS_FAILURE_TAG(value) \
383 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
386 #define OBJECT_POINTER_ALIGN(value) \
387 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
390 #define POINTER_SIZE_ALIGN(value) \
391 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
394 #define CODE_POINTER_ALIGN(value) \
395 (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
403 #define TRACK_MEMORY(name) \
404 void* operator new(size_t size) { \
405 void* result = ::operator new(size); \
406 Logger::NewEventStatic(name, result, size); \
409 void operator delete(void* object) { \
410 Logger::DeleteEventStatic(name, object); \
411 ::operator delete(object); \
414 #define TRACK_MEMORY(name)
568 #endif // V8_V8GLOBALS_H_
bool(* WeakSlotCallbackWithHeap)(Heap *heap, Object **pointer)
const intptr_t kSmiSignMask
const intptr_t kDoubleAlignmentMask
const uint32_t kNaNOrInfinityLowerBoundUpper32
DoubleRepresentation(double x)
const intptr_t kCodeAlignmentMask
const intptr_t kIntptrSignBit
const int kPointerSizeLog2
const intptr_t kCodeAlignment
const intptr_t kObjectAlignmentMask
void(* StoreBufferCallback)(Heap *heap, MemoryChunk *page, StoreBufferEvent event)
struct v8::internal::IeeeDoubleLittleEndianArchType::@41 bits
bool operator==(const DoubleRepresentation &other) const
const uint32_t kSlotsZapValue
const uint64_t kHoleNanInt64
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
int(* HeapObjectCallback)(HeapObject *obj)
const intptr_t kFailureTagMask
const int kFailureTagSize
const uint32_t kHoleNanUpper32
const uint32_t kHoleNanLower32
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
bool IsLexicalVariableMode(VariableMode mode)
const uint32_t kQuietNaNHighBitsMask
const Address kFromSpaceZapValue
bool IsDeclaredVariableMode(VariableMode mode)
const intptr_t kPointerAlignmentMask
const uint32_t kFreeListZapValue
void(* InlineCacheCallback)(Code *code, Address ic)
const intptr_t kObjectAlignment
const uint64_t kLastNonNaNInt64
const intptr_t kPointerAlignment
const int kObjectAlignmentBits
const int kCodeAlignmentBits
bool(* ConstraintCallback)(Address new_addr, Address old_addr)
const Address kGlobalHandleZapValue
bool IsDynamicVariableMode(VariableMode mode)
const uint32_t kDebugZapValue
bool IsImmutableVariableMode(VariableMode mode)
struct v8::internal::IeeeDoubleBigEndianArchType::@42 bits
const intptr_t kDoubleAlignment
bool(* WeakSlotCallback)(Object **pointer)
const Address kHandleZapValue