97 #define ASSERT_PAGE_ALIGNED(address) \
98 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
100 #define ASSERT_OBJECT_ALIGNED(address) \
101 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
103 #define ASSERT_OBJECT_SIZE(size) \
104 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
106 #define ASSERT_PAGE_OFFSET(offset) \
107 ASSERT((Page::kObjectStartOffset <= offset) \
108 && (offset <= Page::kPageSize))
110 #define ASSERT_MAP_PAGE_INDEX(index) \
111 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
115 class MemoryAllocator;
116 class AllocationInfo;
126 : cell_(cell), mask_(mask), data_only_(data_only) { }
132 bool operator==(
const MarkBit& other) {
133 return cell_ == other.cell_ && mask_ == other.mask_;
137 inline void Set() { *cell_ |= mask_; }
138 inline bool Get() {
return (*cell_ & mask_) != 0; }
139 inline void Clear() { *cell_ &= ~mask_; }
146 return MarkBit(cell_ + 1, 1, data_only_);
148 return MarkBit(cell_, new_mask, data_only_);
191 INLINE(
static uint32_t IndexToCell(uint32_t index)) {
195 INLINE(
static uint32_t CellToIndex(uint32_t index)) {
199 INLINE(
static uint32_t CellAlignIndex(uint32_t index)) {
208 return reinterpret_cast<Address>(
this);
212 return reinterpret_cast<Bitmap*
>(addr);
218 return MarkBit(cell, mask, data_only);
223 static void PrintWord(uint32_t word, uint32_t himask = 0) {
224 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
225 if ((mask & himask) != 0)
PrintF(
"[");
226 PrintF((mask & word) ?
"1" :
"0");
227 if ((mask & himask) != 0)
PrintF(
"]");
235 void Print(uint32_t pos, uint32_t cell) {
236 if (cell == seq_type) {
256 if (seq_length > 0) {
259 seq_type == 0 ? 0 : 1,
265 static bool IsSeq(uint32_t cell) {
return cell == 0 || cell == 0xFFFFFFFF; }
276 printer.
Print(i, cells()[i]);
284 if (cells()[i] != 0) {
332 ASSERT((reinterpret_cast<intptr_t>(
owner_) & kFailureTagMask) ==
422 flags_ |=
static_cast<uintptr_t
>(1) << flag;
426 flags_ &= ~(
static_cast<uintptr_t
>(1) << flag);
438 return (
flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
454 if (FLAG_gc_verbose) {
455 PrintF(
"ResetLiveBytes:%p:%x->0\n",
461 if (FLAG_gc_verbose) {
462 printf(
"UpdateLiveBytes:%p:%x%c=%x->%x\n",
464 ((by < 0) ?
'-' :
'+'), ((by < 0) ? -by : by),
564 const intptr_t offset =
698 int offset =
static_cast<int>(a -
address());
778 : heap_(heap), id_(id), executable_(executable) {}
791 virtual intptr_t
Size() = 0;
806 virtual void Print() = 0;
831 bool SetUp(
const size_t requested_size);
839 if (
this ==
NULL || code_range_ ==
NULL)
return false;
841 return start <= address && address < start + code_range_->
size();
859 FreeBlock(
Address start_arg,
size_t size_arg)
860 : start(start_arg), size(size_arg) {
864 FreeBlock(
void* start_arg,
size_t size_arg)
865 : start(static_cast<
Address>(start_arg)), size(size_arg) {
877 List<FreeBlock> free_list_;
880 List<FreeBlock> allocation_list_;
881 int current_allocation_block_index_;
887 void GetNextAllocationBlock(
size_t requested);
889 static int CompareFreeBlockAddress(
const FreeBlock* left,
890 const FreeBlock* right);
903 for (
int idx = 0; idx < kSize; idx++) {
904 starts_[idx] =
reinterpret_cast<Address>(-1);
915 for (
int idx = start_region; idx <= end_region; idx++) {
916 if (starts_[idx] > addr) starts_[idx] = addr;
936 static const int kRegionSizeLog2 = 13;
937 static const int kRegionSize = 1 << kRegionSizeLog2;
959 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
972 intptr_t
Available() {
return capacity_ < size_ ? 0 : capacity_ - size_; }
975 intptr_t
Size() {
return size_; }
979 if (capacity_executable_ < size_executable_)
return 0;
980 return capacity_executable_ - size_executable_;
993 void ReportStatistics();
1063 size_t capacity_executable_;
1068 size_t size_executable_;
1070 struct MemoryAllocationCallbackRegistration {
1074 : callback(callback), space(space), action(action) {
1082 List<MemoryAllocationCallbackRegistration>
1083 memory_allocation_callbacks_;
1089 Page* InitializePagesInChunk(
int chunk_id,
int pages_in_chunk,
1136 if (next_obj !=
NULL)
return next_obj;
1137 }
while (AdvanceToNextPage());
1146 enum PageMode { kOnePageOnly, kAllPagesInSpace };
1152 PageMode page_mode_;
1155 inline HeapObject* FromCurrentPage();
1159 bool AdvanceToNextPage();
1162 inline void Initialize(PagedSpace* owner,
1175 explicit inline PageIterator(PagedSpace* space);
1177 inline bool has_next();
1178 inline Page* next();
1204 bool VerifyPagedAllocation() {
1205 return (Page::FromAllocationTop(
top) == Page::FromAllocationTop(
limit))
1258 capacity_ += size_in_bytes;
1259 size_ += size_in_bytes;
1267 capacity_ -= size_in_bytes;
1268 size_ -= size_in_bytes;
1274 size_ += size_in_bytes;
1280 size_ -= size_in_bytes;
1286 size_ -= size_in_bytes;
1287 waste_ += size_in_bytes;
1330 ASSERT(!maybe->IsFailure());
1366 explicit FreeList(PagedSpace* owner);
1380 int Free(
Address start,
int size_in_bytes);
1392 intptr_t SumFreeLists();
1397 void RepairLists(
Heap* heap);
1401 return small_size_ + medium_size_ + large_size_ + huge_size_;
1412 intptr_t EvictFreeListItems(
Page* p);
1421 FreeListNode* FindNodeFor(
int size_in_bytes,
int* node_size);
1431 static const int kMediumListMax = 0x7ff *
kPointerSize;
1432 static const int kLargeListMax = 0x3fff *
kPointerSize;
1433 static const int kSmallAllocationMax = kSmallListMin -
kPointerSize;
1434 static const int kMediumAllocationMax = kSmallListMax;
1435 static const int kLargeAllocationMax = kMediumListMax;
1449 intptr_t max_capacity,
1540 int wasted =
free_list_.Free(start, size_in_bytes);
1542 return size_in_bytes - wasted;
1576 virtual void Verify(ObjectVisitor* visitor);
1580 virtual void VerifyObject(
HeapObject* obj) {}
1585 virtual void Print();
1588 void ReportStatistics();
1591 void CollectCodeStatistics();
1592 static void ReportCodeStatistics();
1593 static void ResetCodeStatistics();
1732 const char*
name() {
return name_; }
1792 return reinterpret_cast<Address>(
this);
1798 reinterpret_cast<Address>(
reinterpret_cast<uintptr_t
>(address_in_page) &
1814 InitializeAsAnchor(owner);
1817 static NewSpacePage* Initialize(Heap*
heap,
1847 current_page_(
NULL) { }
1850 void SetUp(
Address start,
int initial_capacity,
int maximum_capacity);
1862 bool GrowTo(
int new_capacity);
1892 if (next_page == anchor())
return false;
1893 current_page_ = next_page;
1907 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1908 ==
reinterpret_cast<uintptr_t
>(start_);
1914 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1937 virtual void Verify();
1941 virtual void Print();
1967 void FlipPages(intptr_t
flags, intptr_t flag_mask);
1973 int maximum_capacity_;
1974 int initial_capacity_;
1982 uintptr_t address_mask_;
1983 uintptr_t object_mask_;
1984 uintptr_t object_expected_;
1989 NewSpacePage anchor_;
1990 NewSpacePage* current_page_;
2021 if (current_ == limit_)
return NULL;
2027 if (current_ == limit_)
return NULL;
2031 int size = (size_func_ ==
NULL) ? object->
Size() : size_func_(
object);
2041 void Initialize(
Address start,
2059 explicit inline NewSpacePageIterator(NewSpace* space);
2063 explicit inline NewSpacePageIterator(SemiSpace* space);
2069 inline bool has_next();
2070 inline NewSpacePage* next();
2073 NewSpacePage* prev_page_;
2076 NewSpacePage* next_page_;
2078 NewSpacePage* last_page_;
2096 inline_allocation_limit_step_(0) {}
2099 bool SetUp(
int reserved_semispace_size_,
int max_semispace_size);
2123 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2124 ==
reinterpret_cast<uintptr_t
>(start_);
2129 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2181 return allocation_info_.
top;
2194 uintptr_t
mask() {
return address_mask_; }
2217 inline_allocation_limit_step_ = step;
2222 allocation_info_.
top + inline_allocation_limit_step_,
2223 allocation_info_.
limit);
2225 top_on_previous_step_ = allocation_info_.
top;
2241 return to_space_.
Contains(address);
2244 return from_space_.
Contains(address);
2263 template <
typename StringType>
2268 virtual void Verify();
2273 virtual void Print() { to_space_.Print(); }
2292 return from_space_.
Commit();
2301 return inline_allocation_limit_step_;
2308 void UpdateAllocationInfo();
2311 uintptr_t chunk_size_;
2321 uintptr_t address_mask_;
2322 uintptr_t object_mask_;
2323 uintptr_t object_expected_;
2333 intptr_t inline_allocation_limit_step_;
2335 Address top_on_previous_step_;
2357 intptr_t max_capacity,
2360 :
PagedSpace(heap, max_capacity, id, executable) {
2376 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2377 SLOW_ASSERT((space).page_low() <= (info).top \
2378 && (info).top <= (space).page_high() \
2379 && (info).limit <= (space).page_high())
2388 intptr_t max_capacity,
2393 object_size_in_bytes_(object_size_in_bytes),
2410 int object_size_in_bytes_;
2447 int CompactionThreshold() {
2448 return kMapsPerPage * (max_map_space_pages_ - 1);
2451 const int max_map_space_pages_;
2520 return objects_size_;
2557 virtual void Verify();
2561 virtual void Print();
2562 void ReportStatistics();
2563 void CollectCodeStatistics();
2570 intptr_t max_capacity_;
2575 intptr_t objects_size_;
2606 inline explicit PointerChunkIterator(Heap* heap);
2611 case kOldPointerState: {
2612 if (old_pointer_iterator_.has_next()) {
2613 return old_pointer_iterator_.next();
2619 if (map_iterator_.has_next()) {
2620 return map_iterator_.next();
2622 state_ = kLargeObjectState;
2625 case kLargeObjectState: {
2628 heap_object = lo_iterator_.Next();
2629 if (heap_object ==
NULL) {
2630 state_ = kFinishedState;
2635 }
while (!heap_object->IsFixedArray());
2639 case kFinishedState:
2657 PageIterator old_pointer_iterator_;
2658 PageIterator map_iterator_;
2659 LargeObjectIterator lo_iterator_;
2664 struct CommentStatistic {
2665 const char* comment;
2674 static const int kMaxComments = 64;
2681 #endif // V8_SPACES_H_
void increment_number(int num)
virtual bool ReserveSpace(int bytes)
static const int kHeaderSize
virtual Address PageAllocationLimit(Page *page)
void ClearEvacuationCandidate()
STATIC_CHECK((kStringRepresentationMask|kStringEncodingMask)==Internals::kFullStringRepresentationMask)
static uint32_t FastAddressToMarkbitIndex(Address addr)
static const size_t kSlotsBufferOffset
Space(Heap *heap, AllocationSpace id, Executability executable)
void ShrinkStringAtAllocationBoundary(String *string, int len)
#define SLOW_ASSERT(condition)
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
static const int kEvacuationCandidateMask
void set_next_page(Page *page)
void ZapBlock(Address start, size_t size)
static int CellsForLength(int length)
bool GrowTo(int new_capacity)
static int SizeFor(int cells_count)
static int CodePageAreaSize()
virtual void RepairFreeListsAfterBoot()
static void PrintWord(uint32_t word, uint32_t himask=0)
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
void set_size(Heap *heap, int size_in_bytes)
Address FromSpacePageHigh()
static FreeListNode * cast(MaybeObject *maybe)
bool Contains(Address addr)
friend class PageIterator
FixedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, int object_size_in_bytes, const char *name)
void PrintF(const char *format,...)
virtual intptr_t SizeOfObjects()
void increment_bytes(int size)
void SetTop(Address top, Address limit)
bool was_swept_conservatively()
void set_next(FreeListNode *next)
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
OldSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
virtual void PrepareForMarkCompact()
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
bool SetUp(const size_t requested_size)
void ReleaseAllUnusedPages()
void set_scan_on_scavenge(bool scan)
virtual void VerifyObject(HeapObject *obj)
Page * first_unswept_page_
virtual void VerifyObject(HeapObject *obj)
MemoryChunk * next_chunk_
static const int kMaxMapPageIndex
#define ASSERT_NOT_NULL(p)
static bool ShouldBeSweptLazily(Page *p)
static MemoryChunk * FromAddress(Address a)
LargeObjectIterator(LargeObjectSpace *space)
INLINE(uint32_t AddressToMarkbitIndex(Address addr))
static const intptr_t kSizeOffset
int store_buffer_counter_
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
int store_buffer_counter()
INLINE(static uint32_t IndexToCell(uint32_t index))
void TakeControl(VirtualMemory *from)
void set_name(const char *name)
void ResetAllocationInfo()
void AddObject(Address addr, int size)
Address * allocation_top_address()
intptr_t SizeOfFirstPage()
static const intptr_t kPageAlignmentMask
static void Clear(MemoryChunk *chunk)
intptr_t inline_allocation_limit_step()
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
const int kBitsPerByteLog2
static const int kWriteBarrierCounterGranularity
#define ASSERT(condition)
void set_reserved_memory(VirtualMemory *reservation)
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static const uint32_t kBitsPerCell
static void IncrementLiveBytesFromGC(Address address, int by)
#define ASSERT_PAGE_OFFSET(offset)
const int kPointerSizeLog2
bool FromSpaceContains(Object *o)
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
static const int kFlagsOffset
bool WasSweptConservatively()
const intptr_t kCodeAlignment
#define POINTER_SIZE_ALIGN(value)
void RecordAllocation(HeapObject *obj)
HeapObject * AllocateLinearly(int size_in_bytes)
NewSpacePage * current_page()
virtual bool ReserveSpace(int bytes)
MemoryAllocator(Isolate *isolate)
virtual HeapObject * next_object()
intptr_t EffectiveCapacity()
void FreeUnmarkedObjects()
virtual HeapObject * next_object()
Address OffsetToAddress(int offset)
static const int kPageSize
INLINE(static Bitmap *FromAddress(Address addr))
static bool IsAlignedToPageSize(Address a)
CodeRange(Isolate *isolate)
bool IsSweepingComplete()
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
void FreeMemory(VirtualMemory *reservation, Executability executable)
static bool IsAtEnd(Address addr)
virtual intptr_t Size()=0
static const size_t kLength
void ClearSweptConservatively()
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
void IncrementUnsweptFreeBytes(int by)
void IncreaseUnsweptFreeBytes(Page *p)
AllocationStats accounting_stats_
void Free(MemoryChunk *chunk)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Executability executable()
NewSpacePage * first_page()
bool was_swept_conservatively_
SlotsBuffer * slots_buffer_
intptr_t AvailableExecutable()
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
void ClearSweptPrecisely()
static const uint32_t kBytesPerCellLog2
void AllocateBytes(intptr_t size_in_bytes)
static const size_t kHeaderSize
int(* HeapObjectCallback)(HeapObject *obj)
virtual HeapObject * next_object()=0
void SetFlagTo(int flag, bool value)
LargePage * FindPage(Address a)
FreeListNode ** next_address()
virtual ~ObjectIterator()
bool AdvanceSweeper(intptr_t bytes_to_sweep)
intptr_t CommittedMemory()
const intptr_t kFailureTagMask
intptr_t CommittedMemory()
static const size_t kWriteBarrierCounterOffset
static NewSpacePage * FromAddress(Address address_in_page)
friend class NewSpacePageIterator
bool UncommitBlock(Address start, size_t size)
virtual intptr_t SizeOfObjects()
void LowerInlineAllocationLimit(intptr_t step)
static int CodePageAreaStartOffset()
MapSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
void EvictEvacuationCandidatesFromFreeLists()
virtual int RoundSizeDownToObjectAlignment(int size)
bool ContainsLimit(Address addr)
void MarkEvacuationCandidate()
static int CodePageGuardSize()
bool IsAligned(T value, U alignment)
int object_size_in_bytes()
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
void InitializeReservedMemory()
void CountFreeListItems(Page *p, FreeList::SizeStats *sizes)
virtual bool ReserveSpace(int bytes)
void DecreaseUnsweptFreeBytes(Page *p)
bool ShouldSkipEvacuationSlotRecording()
void initialize_scan_on_scavenge(bool scan)
#define ASSERT_LE(v1, v2)
void IncreaseCapacity(int size)
void set_age_mark(Address mark)
void RecordPromotion(HeapObject *obj)
static const int kMaxNonCodeHeapObjectSize
bool contains(Address address)
virtual int RoundSizeDownToObjectAlignment(int size)
void set_prev_page(Page *page)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
#define TRACK_MEMORY(name)
static const intptr_t kAlignmentMask
static int CodePageAreaEndOffset()
bool FromSpaceContains(Address address)
bool ToSpaceContains(Address address)
int Free(Address start, int size_in_bytes)
static const uint32_t kBytesPerCell
void ReleasePage(Page *page)
MemoryChunk * prev_chunk_
void MarkSweptConservatively()
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
bool ToSpaceContains(Object *o)
MaybeObject * FindObject(Address a)
void SetArea(Address area_start, Address area_end)
VirtualMemory reservation_
static const int kObjectStartOffset
void ExpandSpace(int size_in_bytes)
bool SlowContains(Address addr)
void set_prev_page(NewSpacePage *page)
bool Contains(HeapObject *obj)
intptr_t SizeExecutable()
static void Swap(SemiSpace *from, SemiSpace *to)
void InitializeAsAnchor(PagedSpace *owner)
void Print(uint32_t pos, uint32_t cell)
static const int kSkipEvacuationSlotsRecordingMask
void MarkSweptPrecisely()
SemiSpaceIterator(NewSpace *space)
INLINE(static Page *FromAllocationTop(Address top))
static const intptr_t kLiveBytesOffset
bool Contains(HeapObject *o)
void set_write_barrier_counter(int counter)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
INLINE(int Offset(Address a))
void set_next_page(NewSpacePage *page)
void ShrinkSpace(int size_in_bytes)
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
static NewSpacePage * FromLimit(Address address_limit)
virtual ~LargeObjectSpace()
void IncrementLiveBytes(int by)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
intptr_t CommittedMemory()
SlotsBuffer ** slots_buffer_address()
virtual bool ReserveSpace(int bytes)
static FreeListNode * FromAddress(Address address)
SemiSpace(Heap *heap, SemiSpaceId semispace)
NewSpacePage * next_page() const
virtual HeapObject * next_object()
INLINE(static uint32_t CellToIndex(uint32_t index))
static const intptr_t kAlignment
void set_owner(Space *space)
static bool IsSeq(uint32_t cell)
static void IncrementLiveBytesFromMutator(Address address, int by)
CellSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
static const int kAreaSize
static const intptr_t kCopyOnFlipFlagsMask
LargePage * next_page() const
static const int kPointersFromHereAreInterestingMask
static const int kBodyOffset
void set_prev_chunk(MemoryChunk *prev)
bool IsEvacuationCandidate()
static HeapObject * FromAddress(Address address)
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
T RoundDown(T x, intptr_t m)
void set_size(size_t size)
Address MarkbitIndexToAddress(uint32_t index)
void InsertAfter(MemoryChunk *other)
void set_next_page(LargePage *page)
void set_was_swept_conservatively(bool b)
MarkBit(CellType *cell, CellType mask, bool data_only)
INLINE(MarkBit::CellType *cells())
INLINE(Address MarkbitIndexToAddress(uint32_t index))
static const uint32_t kBitIndexMask
SemiSpace * active_space()
virtual Address PageAllocationLimit(Page *page)
virtual void PrepareForMarkCompact()
INLINE(Address address())
intptr_t write_barrier_counter_
void set_age_mark(Address mark)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static int CodePageGuardStartOffset()
void SetPagesToSweep(Page *first)
NewSpacePage * prev_page() const
intptr_t unswept_free_bytes_
INLINE(static Page *FromAddress(Address a))
Address * allocation_limit_address()
bool CommitBlock(Address start, size_t size, Executability executable)
int write_barrier_counter()
virtual int RoundSizeDownToObjectAlignment(int size)
AllocationInfo allocation_info_
INLINE(static uint32_t CellAlignIndex(uint32_t index))
static const uint32_t kBitsPerCellLog2
void FreeRawMemory(Address buf, size_t length)
static const int kPointersToHereAreInterestingMask
static const int kObjectStartAlignment
Executability executable()
void set_store_buffer_counter(int counter)
MarkBit MarkBitFromIndex(uint32_t index, bool data_only=false)
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
static bool IsFreeListNode(HeapObject *object)
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
#define CODE_POINTER_ALIGN(value)
static bool IsAtStart(Address addr)
virtual intptr_t SizeOfObjects()
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
static void Update(Address addr, int size)
bool ShrinkTo(int new_capacity)
void DeallocateBytes(intptr_t size_in_bytes)
HeapObjectIterator(PagedSpace *space)
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
void SetFlags(intptr_t flags, intptr_t mask)
void WasteBytes(int size_in_bytes)
uint32_t AddressToMarkbitIndex(Address addr)
void set_skip_list(SkipList *skip_list)
MemoryChunk * prev_chunk() const
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
void set_next_chunk(MemoryChunk *next)
VirtualMemory * reserved_memory()
Address StartFor(Address addr)
static const size_t kSize
SlotsBuffer * slots_buffer()
static void AssertValidRange(Address from, Address to)
static MemoryChunk * FromAnyPointerAddress(Address addr)
static int RegionNumber(Address addr)
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
bool CommitFromSpaceIfNeeded()
AllocationSpace identity()
static const int kNonCodeObjectAreaSize
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MemoryChunk * next_chunk() const
Address FromSpacePageLow()
kPropertyAccessorsOffset kNamedPropertyHandlerOffset kInstanceTemplateOffset kAccessCheckInfoOffset kEvalFrominstructionsOffsetOffset kInstanceClassNameOffset flag