99 #define ASSERT_PAGE_ALIGNED(address) \
100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
102 #define ASSERT_OBJECT_ALIGNED(address) \
103 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
105 #define ASSERT_OBJECT_SIZE(size) \
106 ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
108 #define ASSERT_PAGE_OFFSET(offset) \
109 ASSERT((Page::kObjectStartOffset <= offset) \
110 && (offset <= Page::kPageSize))
112 #define ASSERT_MAP_PAGE_INDEX(index) \
113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
117 class MemoryAllocator;
118 class AllocationInfo;
128 : cell_(cell), mask_(mask), data_only_(data_only) { }
134 bool operator==(
const MarkBit& other) {
135 return cell_ == other.cell_ && mask_ == other.mask_;
139 inline void Set() { *cell_ |= mask_; }
140 inline bool Get() {
return (*cell_ & mask_) != 0; }
141 inline void Clear() { *cell_ &= ~mask_; }
148 return MarkBit(cell_ + 1, 1, data_only_);
150 return MarkBit(cell_, new_mask, data_only_);
193 INLINE(
static uint32_t IndexToCell(uint32_t index)) {
197 INLINE(
static uint32_t CellToIndex(uint32_t index)) {
201 INLINE(
static uint32_t CellAlignIndex(uint32_t index)) {
210 return reinterpret_cast<Address>(
this);
214 return reinterpret_cast<Bitmap*
>(addr);
220 return MarkBit(cell, mask, data_only);
225 static void PrintWord(uint32_t word, uint32_t himask = 0) {
226 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
227 if ((mask & himask) != 0)
PrintF(
"[");
228 PrintF((mask & word) ?
"1" :
"0");
229 if ((mask & himask) != 0)
PrintF(
"]");
237 void Print(uint32_t pos, uint32_t cell) {
238 if (cell == seq_type) {
258 if (seq_length > 0) {
261 seq_type == 0 ? 0 : 1,
267 static bool IsSeq(uint32_t cell) {
return cell == 0 || cell == 0xFFFFFFFF; }
278 printer.
Print(i, cells()[i]);
286 if (cells()[i] != 0) {
325 Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
329 Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
335 return reinterpret_cast<Space*
>(
reinterpret_cast<intptr_t
>(
owner_) -
345 ASSERT((reinterpret_cast<intptr_t>(
owner_) & kFailureTagMask) ==
441 flags_ |=
static_cast<uintptr_t
>(1) << flag;
445 flags_ &= ~(
static_cast<uintptr_t
>(1) << flag);
457 return (
flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
504 if (FLAG_gc_verbose) {
505 PrintF(
"ResetLiveBytes:%p:%x->0\n",
511 if (FLAG_gc_verbose) {
512 printf(
"UpdateLiveBytes:%p:%x%c=%x->%x\n",
514 ((by < 0) ?
'-' :
'+'), ((by < 0) ? -by : by),
640 const intptr_t offset =
801 int offset =
static_cast<int>(a -
address());
846 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
847 type name() { return name##_; } \
848 void set_##name(type name) { name##_ = name; } \
849 void add_##name(type name) { name##_ += name; }
857 #undef FRAGMENTATION_STATS_ACCESSORS
896 : heap_(heap), id_(id), executable_(executable) {}
909 virtual intptr_t
Size() = 0;
924 virtual void Print() = 0;
949 bool SetUp(
const size_t requested_size);
961 if (
this ==
NULL || code_range_ ==
NULL)
return false;
963 return start <= address && address < start + code_range_->
size();
970 const size_t commit_size,
984 FreeBlock(
Address start_arg,
size_t size_arg)
989 FreeBlock(
void* start_arg,
size_t size_arg)
1002 List<FreeBlock> free_list_;
1005 List<FreeBlock> allocation_list_;
1006 int current_allocation_block_index_;
1012 void GetNextAllocationBlock(
size_t requested);
1014 static int CompareFreeBlockAddress(
const FreeBlock* left,
1015 const FreeBlock* right);
1028 for (
int idx = 0; idx < kSize; idx++) {
1029 starts_[idx] =
reinterpret_cast<Address>(-1);
1040 for (
int idx = start_region; idx <= end_region; idx++) {
1041 if (starts_[idx] > addr) starts_[idx] = addr;
1061 static const int kRegionSizeLog2 = 13;
1062 static const int kRegionSize = 1 << kRegionSizeLog2;
1084 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1097 intptr_t
Available() {
return capacity_ < size_ ? 0 : capacity_ - size_; }
1104 if (capacity_executable_ < size_executable_)
return 0;
1105 return capacity_executable_ - size_executable_;
1119 return address < lowest_ever_allocated_ ||
1120 address >= highest_ever_allocated_;
1125 void ReportStatistics();
1132 intptr_t commit_area_size,
1195 size_t reserved_size);
1203 size_t capacity_executable_;
1208 size_t size_executable_;
1215 void* lowest_ever_allocated_;
1216 void* highest_ever_allocated_;
1218 struct MemoryAllocationCallbackRegistration {
1222 : callback(callback), space(space), action(action) {
1230 List<MemoryAllocationCallbackRegistration>
1231 memory_allocation_callbacks_;
1237 Page* InitializePagesInChunk(
int chunk_id,
int pages_in_chunk,
1240 void UpdateAllocatedSpaceLimits(
void* low,
void* high) {
1241 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1242 highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1289 if (next_obj !=
NULL)
return next_obj;
1290 }
while (AdvanceToNextPage());
1299 enum PageMode { kOnePageOnly, kAllPagesInSpace };
1305 PageMode page_mode_;
1308 inline HeapObject* FromCurrentPage();
1312 bool AdvanceToNextPage();
1315 inline void Initialize(PagedSpace* owner,
1328 explicit inline PageIterator(PagedSpace* space);
1330 inline bool has_next();
1331 inline Page* next();
1386 bool VerifyPagedAllocation() {
1387 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1388 && (top_ <= limit_);
1448 capacity_ += size_in_bytes;
1449 size_ += size_in_bytes;
1450 if (capacity_ > max_capacity_) {
1451 max_capacity_ = capacity_;
1460 capacity_ -= size_in_bytes;
1461 size_ -= size_in_bytes;
1467 size_ += size_in_bytes;
1473 size_ -= size_in_bytes;
1479 size_ -= size_in_bytes;
1480 waste_ += size_in_bytes;
1486 intptr_t max_capacity_;
1524 ASSERT(!maybe->IsFailure());
1581 intptr_t SumFreeList();
1582 int FreeListLength();
1655 intptr_t SumFreeLists();
1675 FreeListNode* FindNodeFor(
int size_in_bytes,
int* node_size);
1682 static const int kMediumListMax = 0x7ff *
kPointerSize;
1683 static const int kLargeListMax = 0x3fff *
kPointerSize;
1684 static const int kSmallAllocationMax = kSmallListMin -
kPointerSize;
1685 static const int kMediumAllocationMax = kSmallListMax;
1686 static const int kLargeAllocationMax = kMediumListMax;
1692 DISALLOW_IMPLICIT_CONSTRUCTORS(
FreeList);
1700 intptr_t max_capacity,
1824 return size_in_bytes - wasted;
1844 int old_linear_size =
static_cast<int>(
limit() -
top());
1863 virtual void Verify(ObjectVisitor* visitor);
1872 virtual void Print();
1875 void ReportStatistics();
1878 void CollectCodeStatistics();
1879 static void ReportCodeStatistics(
Isolate* isolate);
1880 static void ResetCodeStatistics(
Isolate* isolate);
2025 const char*
name() {
return name_; }
2085 return reinterpret_cast<Address>(
this);
2091 reinterpret_cast<Address>(
reinterpret_cast<uintptr_t
>(address_in_page) &
2113 InitializeAsAnchor(owner);
2116 static NewSpacePage* Initialize(Heap*
heap,
2146 current_page_(
NULL) { }
2149 void SetUp(
Address start,
int initial_capacity,
int maximum_capacity);
2161 bool GrowTo(
int new_capacity);
2191 if (next_page == anchor())
return false;
2192 current_page_ = next_page;
2206 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2207 ==
reinterpret_cast<uintptr_t
>(start_);
2213 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2231 virtual void Verify();
2235 virtual void Print();
2267 void FlipPages(intptr_t
flags, intptr_t flag_mask);
2270 void SetCapacity(
int new_capacity);
2276 int maximum_capacity_;
2277 int initial_capacity_;
2279 intptr_t maximum_committed_;
2287 uintptr_t address_mask_;
2288 uintptr_t object_mask_;
2289 uintptr_t object_expected_;
2294 NewSpacePage anchor_;
2295 NewSpacePage* current_page_;
2326 if (current_ == limit_)
return NULL;
2332 if (current_ == limit_)
return NULL;
2336 int size = (size_func_ ==
NULL) ? object->
Size() : size_func_(
object);
2346 void Initialize(
Address start,
2364 explicit inline NewSpacePageIterator(NewSpace* space);
2368 explicit inline NewSpacePageIterator(SemiSpace* space);
2374 inline bool has_next();
2375 inline NewSpacePage* next();
2378 NewSpacePage* prev_page_;
2381 NewSpacePage* next_page_;
2383 NewSpacePage* last_page_;
2401 inline_allocation_limit_step_(0) {}
2404 bool SetUp(
int reserved_semispace_size_,
int max_semispace_size);
2428 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2429 ==
reinterpret_cast<uintptr_t
>(start_);
2434 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2495 return allocation_info_.top();
2500 allocation_info_.set_top(top);
2506 return allocation_info_.limit();
2520 uintptr_t
mask() {
return address_mask_; }
2550 inline_allocation_limit_step_ = step;
2552 top_on_previous_step_ = allocation_info_.top();
2568 return to_space_.
Contains(address);
2571 return from_space_.
Contains(address);
2588 virtual void Verify();
2593 virtual void Print() { to_space_.Print(); }
2612 return from_space_.
Commit();
2621 return inline_allocation_limit_step_;
2628 void UpdateAllocationInfo();
2631 uintptr_t chunk_size_;
2641 uintptr_t address_mask_;
2642 uintptr_t object_mask_;
2643 uintptr_t object_expected_;
2653 intptr_t inline_allocation_limit_step_;
2655 Address top_on_previous_step_;
2677 intptr_t max_capacity,
2680 :
PagedSpace(heap, max_capacity, id, executable) {
2690 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2691 SLOW_ASSERT((space).page_low() <= (info).top() \
2692 && (info).top() <= (space).page_high() \
2693 && (info).limit() <= (space).page_high())
2726 int CompactionThreshold() {
2727 return kMapsPerPage * (max_map_space_pages_ - 1);
2730 const int max_map_space_pages_;
2826 return objects_size_;
2830 return maximum_committed_;
2865 virtual void Verify();
2869 virtual void Print();
2870 void ReportStatistics();
2871 void CollectCodeStatistics();
2878 intptr_t max_capacity_;
2879 intptr_t maximum_committed_;
2884 intptr_t objects_size_;
2915 inline explicit PointerChunkIterator(Heap* heap);
2920 case kOldPointerState: {
2921 if (old_pointer_iterator_.has_next()) {
2922 return old_pointer_iterator_.next();
2928 if (map_iterator_.has_next()) {
2929 return map_iterator_.next();
2931 state_ = kLargeObjectState;
2934 case kLargeObjectState: {
2937 heap_object = lo_iterator_.Next();
2938 if (heap_object ==
NULL) {
2939 state_ = kFinishedState;
2944 }
while (!heap_object->IsFixedArray());
2948 case kFinishedState:
2966 PageIterator old_pointer_iterator_;
2967 PageIterator map_iterator_;
2968 LargeObjectIterator lo_iterator_;
2973 struct CommentStatistic {
2974 const char* comment;
2983 static const int kMaxComments = 64;
2990 #endif // V8_SPACES_H_
void increment_number(int num)
INLINE(Address limit()) const
static const int kHeaderSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
intptr_t MaximumCommittedMemory()
void ClearEvacuationCandidate()
size_t CommittedPhysicalMemory()
static bool OnSamePage(Address address1, Address address2)
bool CommitRawMemory(Address start, size_t length)
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static uint32_t FastAddressToMarkbitIndex(Address addr)
static const size_t kSlotsBufferOffset
Space(Heap *heap, AllocationSpace id, Executability executable)
#define SLOW_ASSERT(condition)
void ResetUnsweptFreeBytes()
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
static const int kEvacuationCandidateMask
intptr_t Concatenate(FreeListCategory *category)
void set_next_page(Page *page)
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
void ZapBlock(Address start, size_t size)
static int CellsForLength(int length)
bool GrowTo(int new_capacity)
size_t CommittedPhysicalMemory()
void RepairFreeList(Heap *heap)
static int SizeFor(int cells_count)
static int CodePageAreaSize()
void RepairFreeListsAfterBoot()
INLINE(Address top()) const
static void PrintWord(uint32_t word, uint32_t himask=0)
FreeListCategory * medium_list()
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
size_t MaximumCommittedMemory()
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
void set_size(Heap *heap, int size_in_bytes)
Address FromSpacePageHigh()
static FreeListNode * cast(MaybeObject *maybe)
bool Contains(Address addr)
friend class PageIterator
Address * limit_address()
void PrintF(const char *format,...)
virtual intptr_t SizeOfObjects()
void increment_bytes(int size)
bool was_swept_conservatively()
void RepairLists(Heap *heap)
void set_next(FreeListNode *next)
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
intptr_t MaximumCommittedMemory()
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
void PrepareForMarkCompact()
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
V8_INLINE bool IsOutsideAllocatedSpace(const void *address) const
bool SetUp(const size_t requested_size)
bool CommitArea(size_t requested)
void set_scan_on_scavenge(bool scan)
virtual void VerifyObject(HeapObject *obj)
intptr_t EvictFreeListItemsInList(Page *p)
Page * first_unswept_page_
virtual void VerifyObject(HeapObject *obj)
void set_top(Address top)
static const int kMaxMapPageIndex
bool IsLeftOfProgressBar(Object **slot)
#define ASSERT_NOT_NULL(p)
static bool ShouldBeSweptLazily(Page *p)
static MemoryChunk * FromAddress(Address a)
LargeObjectIterator(LargeObjectSpace *space)
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
static const intptr_t kSizeOffset
int store_buffer_counter_
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
int store_buffer_counter()
INLINE(static uint32_t IndexToCell(uint32_t index))
void TakeControl(VirtualMemory *from)
void set_name(const char *name)
void ResetAllocationInfo()
void AddObject(Address addr, int size)
Address * allocation_top_address()
bool UncommitRawMemory(Address start, size_t length)
INLINE(void set_limit(Address limit))
intptr_t SizeOfFirstPage()
static const intptr_t kPageAlignmentMask
intptr_t available_in_small_free_list_
static void Clear(MemoryChunk *chunk)
intptr_t inline_allocation_limit_step()
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
const int kBitsPerByteLog2
static const int kWriteBarrierCounterGranularity
#define ASSERT(condition)
void set_reserved_memory(VirtualMemory *reservation)
static const uint32_t kBitsPerCell
static void IncrementLiveBytesFromGC(Address address, int by)
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
#define ASSERT_PAGE_OFFSET(offset)
const int kPointerSizeLog2
bool FromSpaceContains(Object *o)
void DecrementUnsweptFreeBytes(intptr_t by)
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
static const int kFlagsOffset
bool WasSweptConservatively()
const intptr_t kCodeAlignment
static void UpdateHighWaterMark(Address mark)
#define POINTER_SIZE_ALIGN(value)
void RecordAllocation(HeapObject *obj)
HeapObject * AllocateLinearly(int size_in_bytes)
NewSpacePage * current_page()
MemoryAllocator(Isolate *isolate)
virtual HeapObject * next_object()
intptr_t EffectiveCapacity()
void FreeUnmarkedObjects()
virtual HeapObject * next_object()
Address OffsetToAddress(int offset)
static const int kPageSize
INLINE(static Bitmap *FromAddress(Address addr))
static bool IsAlignedToPageSize(Address a)
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
CodeRange(Isolate *isolate)
void FreeMemory(VirtualMemory *reservation, Executability executable)
static bool IsAtEnd(Address addr)
virtual intptr_t Size()=0
static const size_t kLength
void ClearSweptConservatively()
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
intptr_t MaximumCommittedMemory()
bool CommitMemory(Address addr, size_t size, Executability executable)
FreeListNode ** GetEndAddress()
bool ContainsPageFreeListItems(Page *p)
void IncreaseUnsweptFreeBytes(Page *p)
kInstanceClassNameOffset flag
AllocationStats accounting_stats_
void Free(MemoryChunk *chunk)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Executability executable()
NewSpacePage * first_page()
bool was_swept_conservatively_
SlotsBuffer * slots_buffer_
intptr_t AvailableExecutable()
size_t CommittedPhysicalMemory()
void ReleasePage(Page *page, bool unlink)
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
void ClearSweptPrecisely()
void set_available(int available)
FreeList(PagedSpace *owner)
static const uint32_t kBytesPerCellLog2
FreeListCategory * small_list()
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, VirtualMemory *controller)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
void AllocateBytes(intptr_t size_in_bytes)
static const size_t kHeaderSize
int(* HeapObjectCallback)(HeapObject *obj)
virtual HeapObject * next_object()=0
void SetFlagTo(int flag, bool value)
LargePage * FindPage(Address a)
virtual void VerifyObject(HeapObject *obj)
intptr_t non_available_small_blocks_
FreeListNode ** next_address()
virtual ~ObjectIterator()
Address * allocation_top_address()
bool AdvanceSweeper(intptr_t bytes_to_sweep)
intptr_t CommittedMemory()
const intptr_t kFailureTagMask
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
FreeListNode * PickNodeFromList(int *node_size)
intptr_t CommittedMemory()
void EmptyAllocationInfo()
intptr_t Concatenate(FreeList *free_list)
static const size_t kWriteBarrierCounterOffset
static NewSpacePage * FromAddress(Address address_in_page)
friend class NewSpacePageIterator
bool UncommitBlock(Address start, size_t size)
int * GetAvailableAddress()
void LowerInlineAllocationLimit(intptr_t step)
static int CodePageAreaStartOffset()
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
void EvictEvacuationCandidatesFromFreeLists()
virtual int RoundSizeDownToObjectAlignment(int size)
bool ContainsLimit(Address addr)
INLINE(void set_top(Address top))
void MarkEvacuationCandidate()
static int CodePageGuardSize()
bool IsAligned(T value, U alignment)
void InitializeReservedMemory()
virtual intptr_t SizeOfObjects()
size_t CommittedPhysicalMemory()
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
intptr_t EvictFreeListItems(Page *p)
static const int kMaxRegularHeapObjectSize
void DecreaseUnsweptFreeBytes(Page *p)
void AddToAccountingStats(intptr_t bytes)
int Free(Address start, int size_in_bytes)
bool ShouldSkipEvacuationSlotRecording()
void initialize_scan_on_scavenge(bool scan)
#define ASSERT_LE(v1, v2)
intptr_t available_in_large_free_list_
bool ContainsPageFreeListItemsInList(Page *p)
void IncreaseCapacity(int size)
void set_top(FreeListNode *top)
void set_age_mark(Address mark)
void RecordPromotion(HeapObject *obj)
bool contains(Address address)
virtual int RoundSizeDownToObjectAlignment(int size)
void set_prev_page(Page *page)
#define TRACK_MEMORY(name)
static const intptr_t kAlignmentMask
static int CodePageAreaEndOffset()
bool FromSpaceContains(Address address)
bool ToSpaceContains(Address address)
int Free(Address start, int size_in_bytes)
static const uint32_t kBytesPerCell
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
void MarkSweptConservatively()
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void IncrementUnsweptFreeBytes(intptr_t by)
bool ToSpaceContains(Object *o)
intptr_t HeapObjectTagMask()
MaybeObject * FindObject(Address a)
void SetArea(Address area_start, Address area_end)
FreeListCategory * large_list()
VirtualMemory reservation_
Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr)
FreeListCategory * huge_list()
static const int kObjectStartOffset
void ExpandSpace(int size_in_bytes)
bool SlowContains(Address addr)
void set_prev_page(NewSpacePage *page)
bool Contains(HeapObject *obj)
intptr_t SizeExecutable()
static void Swap(SemiSpace *from, SemiSpace *to)
void set_progress_bar(int progress_bar)
void InitializeAsAnchor(PagedSpace *owner)
void Print(uint32_t pos, uint32_t cell)
static const int kSkipEvacuationSlotsRecordingMask
void MarkSweptPrecisely()
SemiSpaceIterator(NewSpace *space)
INLINE(static Page *FromAllocationTop(Address top))
static const intptr_t kLiveBytesOffset
bool Contains(HeapObject *o)
void set_write_barrier_counter(int counter)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
void set_parallel_sweeping(ParallelSweepingState state)
PropertyCellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
INLINE(int Offset(Address a))
Address * allocation_limit_address()
void set_next_page(NewSpacePage *page)
void ShrinkSpace(int size_in_bytes)
AtomicWord parallel_sweeping_
static NewSpacePage * FromLimit(Address address_limit)
virtual ~LargeObjectSpace()
void IncrementLiveBytes(int by)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
intptr_t CommittedMemory()
void SetTopAndLimit(Address top, Address limit)
SlotsBuffer ** slots_buffer_address()
static FreeListNode * FromAddress(Address address)
SemiSpace(Heap *heap, SemiSpaceId semispace)
NewSpacePage * next_page() const
bool IsLazySweepingComplete()
virtual HeapObject * next_object()
INLINE(static uint32_t CellToIndex(uint32_t index))
void Free(FreeListNode *node, int size_in_bytes)
static const intptr_t kAlignment
void set_owner(Space *space)
static bool IsSeq(uint32_t cell)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
static void IncrementLiveBytesFromMutator(Address address, int by)
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
static const int kAreaSize
static const intptr_t kCopyOnFlipFlagsMask
LargePage * next_page() const
static const int kPointersFromHereAreInterestingMask
static const int kBodyOffset
void set_prev_chunk(MemoryChunk *prev)
bool IsEvacuationCandidate()
static HeapObject * FromAddress(Address address)
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
T RoundDown(T x, intptr_t m)
void set_size(size_t size)
Address MarkbitIndexToAddress(uint32_t index)
void InsertAfter(MemoryChunk *other)
void set_next_page(LargePage *page)
void set_was_swept_conservatively(bool b)
MarkBit(CellType *cell, CellType mask, bool data_only)
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
INLINE(MarkBit::CellType *cells())
INLINE(Address MarkbitIndexToAddress(uint32_t index))
void UpdateInlineAllocationLimit(int size_in_bytes)
static const uint32_t kBitIndexMask
SemiSpace * active_space()
INLINE(Address address())
intptr_t write_barrier_counter_
void set_age_mark(Address mark)
static int CodePageGuardStartOffset()
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void SetPagesToSweep(Page *first)
NewSpacePage * prev_page() const
FreeListNode * end() const
intptr_t unswept_free_bytes_
INLINE(static Page *FromAddress(Address a))
Address * allocation_limit_address()
bool CommitBlock(Address start, size_t size, Executability executable)
int write_barrier_counter()
virtual int RoundSizeDownToObjectAlignment(int size)
AllocationInfo allocation_info_
bool TryParallelSweeping()
INLINE(static uint32_t CellAlignIndex(uint32_t index))
static const uint32_t kBitsPerCellLog2
void FreeRawMemory(Address buf, size_t length)
FreeListNode * top() const
static const int kPointersToHereAreInterestingMask
static const int kObjectStartAlignment
Executability executable()
void set_store_buffer_counter(int counter)
void ResetFreeListStatistics()
#define FRAGMENTATION_STATS_ACCESSORS(type, name)
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
static bool IsFreeListNode(HeapObject *object)
size_t CommittedPhysicalMemory()
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
#define CODE_POINTER_ALIGN(value)
static bool IsAtStart(Address addr)
bool EnsureSweeperProgress(intptr_t size_in_bytes)
virtual intptr_t SizeOfObjects()
static void Update(Address addr, int size)
intptr_t available_in_medium_free_list_
bool ShrinkTo(int new_capacity)
void set_end(FreeListNode *end)
void DeallocateBytes(intptr_t size_in_bytes)
HeapObjectIterator(PagedSpace *space)
void ResetFreeListStatistics()
ParallelSweepingState parallel_sweeping()
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
void SetFlags(intptr_t flags, intptr_t mask)
void WasteBytes(int size_in_bytes)
uint32_t AddressToMarkbitIndex(Address addr)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
void set_skip_list(SkipList *skip_list)
MemoryChunk * prev_chunk() const
intptr_t available_in_huge_free_list_
void set_next_chunk(MemoryChunk *next)
VirtualMemory * reserved_memory()
Address StartFor(Address addr)
static const size_t kSize
SlotsBuffer * slots_buffer()
static void AssertValidRange(Address from, Address to)
static int RegionNumber(Address addr)
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
bool CommitFromSpaceIfNeeded()
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
AllocationSpace identity()
virtual int RoundSizeDownToObjectAlignment(int size)
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MemoryChunk * next_chunk() const
Address FromSpacePageLow()