28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
45 for (
int i = 0; i < bitmap->
CellsCount(); i++) bitmap->cells()[i] = 0;
56 prev_page_(&space->anchor_),
57 next_page_(prev_page_->next_page()) { }
60 bool PageIterator::has_next() {
61 return next_page_ != &space_->anchor_;
65 Page* PageIterator::next() {
67 prev_page_ = next_page_;
68 next_page_ = next_page_->next_page();
77 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
78 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
79 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
80 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
82 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
83 : prev_page_(space->anchor()),
84 next_page_(prev_page_->next_page()),
85 last_page_(prev_page_->prev_page()) { }
87 NewSpacePageIterator::NewSpacePageIterator(
Address start,
Address limit)
88 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
89 next_page_(NewSpacePage::FromAddress(start)),
90 last_page_(NewSpacePage::FromLimit(limit)) {
95 bool NewSpacePageIterator::has_next() {
96 return prev_page_ != last_page_;
100 NewSpacePage* NewSpacePageIterator::next() {
102 prev_page_ = next_page_;
103 next_page_ = next_page_->next_page();
110 HeapObject* HeapObjectIterator::FromCurrentPage() {
111 while (cur_addr_ != cur_end_) {
112 if (cur_addr_ == space_->
top() && cur_addr_ != space_->
limit()) {
113 cur_addr_ = space_->
limit();
117 int obj_size = (size_func_ ==
NULL) ? obj->Size() : size_func_(obj);
118 cur_addr_ += obj_size;
119 ASSERT(cur_addr_ <= cur_end_);
120 if (!obj->IsFiller()) {
132 #ifdef ENABLE_HEAP_PROTECTION
134 void MemoryAllocator::Protect(
Address start,
size_t size) {
135 OS::Protect(start, size);
139 void MemoryAllocator::Unprotect(
Address start,
142 OS::Unprotect(start, size, executable);
146 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
147 int id = GetChunkId(page);
148 OS::Protect(chunks_[
id].address(), chunks_[
id].
size());
152 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
153 int id = GetChunkId(page);
154 OS::Unprotect(chunks_[
id].address(), chunks_[
id].
size(),
155 chunks_[
id].owner()->executable() ==
EXECUTABLE);
167 Page* page =
reinterpret_cast<Page*
>(chunk);
182 return p->
owner() ==
this;
201 if (maybe->
owner() !=
NULL)
return maybe;
203 for (
HeapObject* o = iterator.Next(); o !=
NULL; o = iterator.Next()) {
206 if (o->IsFixedArray()) {
219 if (mark ==
NULL)
return;
224 int new_mark =
static_cast<int>(mark - chunk->
address());
231 PointerChunkIterator::PointerChunkIterator(
Heap* heap)
232 : state_(kOldPointerState),
233 old_pointer_iterator_(heap->old_pointer_space()),
234 map_iterator_(heap->map_space()),
235 lo_iterator_(heap->lo_space()) { }
268 Address new_top = current_top + size_in_bytes;
279 if (
object !=
NULL) {
291 if (
object !=
NULL) {
299 if (
object !=
NULL) {
314 MaybeObject* NewSpace::AllocateRaw(
int size_in_bytes) {
315 Address old_top = allocation_info_.top();
319 if (FLAG_stress_compaction && !
heap()->linear_allocation()) {
320 if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
321 int filler_size = size_in_bytes * 4;
323 *(
reinterpret_cast<Object**
>(old_top + i)) =
324 heap()->one_pointer_filler_map();
326 old_top += filler_size;
327 allocation_info_.set_top(allocation_info_.top() + filler_size);
332 if (allocation_info_.limit() - old_top < size_in_bytes) {
333 return SlowAllocateRaw(size_in_bytes);
337 allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
344 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
345 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
346 return static_cast<LargePage*
>(chunk);
357 Heap* heap =
object->GetHeap();
358 return map == heap->raw_unchecked_free_space_map()
359 || map == heap->raw_unchecked_one_pointer_filler_map()
360 || map == heap->raw_unchecked_two_pointer_filler_map();
365 #endif // V8_SPACES_INL_H_
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
void set_next_page(Page *page)
bool Contains(Address addr)
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
void set_scan_on_scavenge(bool scan)
static MemoryChunk * FromAddress(Address a)
static void Clear(MemoryChunk *chunk)
#define ASSERT(condition)
static void UpdateHighWaterMark(Address mark)
HeapObject * AllocateLinearly(int size_in_bytes)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
static Failure * RetryAfterGC()
void decrement_scan_on_scavenge_pages()
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
static const int kMaxRegularHeapObjectSize
void IncreaseCapacity(int size)
void set_prev_page(Page *page)
LargeObjectSpace * lo_space()
int Free(Address start, int size_in_bytes)
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void SetOldSpacePageFlags(MemoryChunk *chunk)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
IncrementalMarking * incremental_marking()
void set_prev_chunk(MemoryChunk *prev)
static HeapObject * FromAddress(Address address)
#define ASSERT_OBJECT_SIZE(size)
void increment_scan_on_scavenge_pages()
AllocationInfo allocation_info_
static bool IsFreeListNode(HeapObject *object)
static void Update(Address addr, int size)
MemoryChunk * prev_chunk() const
void set_next_chunk(MemoryChunk *next)
static void AssertValidRange(Address from, Address to)
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
AllocationSpace identity()
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MemoryChunk * next_chunk() const