28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
44 for (
int i = 0; i < bitmap->
CellsCount(); i++) bitmap->cells()[i] = 0;
55 prev_page_(&space->anchor_),
56 next_page_(prev_page_->next_page()) { }
59 bool PageIterator::has_next() {
60 return next_page_ != &space_->anchor_;
64 Page* PageIterator::next() {
66 prev_page_ = next_page_;
67 next_page_ = next_page_->next_page();
76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
77 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
78 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
79 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
82 : prev_page_(space->anchor()),
83 next_page_(prev_page_->next_page()),
84 last_page_(prev_page_->prev_page()) { }
86 NewSpacePageIterator::NewSpacePageIterator(
Address start,
Address limit)
87 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
88 next_page_(NewSpacePage::FromAddress(start)),
89 last_page_(NewSpacePage::FromLimit(limit)) {
94 bool NewSpacePageIterator::has_next() {
95 return prev_page_ != last_page_;
99 NewSpacePage* NewSpacePageIterator::next() {
101 prev_page_ = next_page_;
102 next_page_ = next_page_->next_page();
109 HeapObject* HeapObjectIterator::FromCurrentPage() {
110 while (cur_addr_ != cur_end_) {
111 if (cur_addr_ == space_->
top() && cur_addr_ != space_->
limit()) {
112 cur_addr_ = space_->
limit();
116 int obj_size = (size_func_ ==
NULL) ? obj->Size() : size_func_(obj);
117 cur_addr_ += obj_size;
118 ASSERT(cur_addr_ <= cur_end_);
119 if (!obj->IsFiller()) {
131 #ifdef ENABLE_HEAP_PROTECTION
133 void MemoryAllocator::Protect(
Address start,
size_t size) {
134 OS::Protect(start, size);
138 void MemoryAllocator::Unprotect(
Address start,
141 OS::Unprotect(start, size, executable);
145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
146 int id = GetChunkId(page);
147 OS::Protect(chunks_[
id].address(), chunks_[
id].size());
151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
152 int id = GetChunkId(page);
153 OS::Unprotect(chunks_[
id].address(), chunks_[
id].size(),
154 chunks_[
id].owner()->executable() ==
EXECUTABLE);
166 Page* page =
reinterpret_cast<Page*
>(chunk);
181 return p->
owner() ==
this;
200 if (maybe->
owner() !=
NULL)
return maybe;
202 for (
HeapObject* o = iterator.Next(); o !=
NULL; o = iterator.Next()) {
205 if (o->IsFixedArray()) {
217 PointerChunkIterator::PointerChunkIterator(
Heap* heap)
218 : state_(kOldPointerState),
219 old_pointer_iterator_(heap->old_pointer_space()),
220 map_iterator_(heap->map_space()),
221 lo_iterator_(heap->lo_space()) { }
254 Address new_top = current_top + size_in_bytes;
265 if (
object !=
NULL) {
273 if (
object !=
NULL) {
281 if (
object !=
NULL) {
296 MaybeObject* NewSpace::AllocateRaw(
int size_in_bytes) {
301 if (FLAG_stress_compaction && !
HEAP->linear_allocation()) {
302 if (allocation_info_.
limit - old_top >= size_in_bytes * 4) {
303 int filler_size = size_in_bytes * 4;
305 *(
reinterpret_cast<Object**
>(old_top + i)) =
306 HEAP->one_pointer_filler_map();
308 old_top += filler_size;
309 allocation_info_.
top += filler_size;
314 if (allocation_info_.
limit - old_top < size_in_bytes) {
315 return SlowAllocateRaw(size_in_bytes);
319 allocation_info_.
top += size_in_bytes;
326 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
327 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
328 return static_cast<LargePage*
>(chunk);
337 template <
typename StringType>
339 ASSERT(length <= string->length());
340 ASSERT(string->IsSeqString());
342 allocation_info_.
top);
344 allocation_info_.
top =
345 string->address() + StringType::SizeFor(length);
346 string->set_length(length);
348 int delta =
static_cast<int>(old_top - allocation_info_.
top);
356 Heap* heap =
object->GetHeap();
357 return map == heap->raw_unchecked_free_space_map()
358 || map == heap->raw_unchecked_one_pointer_filler_map()
359 || map == heap->raw_unchecked_two_pointer_filler_map();
364 #endif // V8_SPACES_INL_H_
static bool IsBlack(MarkBit mark_bit)
void ShrinkStringAtAllocationBoundary(String *string, int len)
void set_next_page(Page *page)
bool Contains(Address addr)
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
void set_scan_on_scavenge(bool scan)
static MemoryChunk * FromAddress(Address a)
static void Clear(MemoryChunk *chunk)
#define ASSERT(condition)
HeapObject * AllocateLinearly(int size_in_bytes)
static const int kPageSize
static MarkBit MarkBitFrom(Address addr)
static Failure * RetryAfterGC()
void decrement_scan_on_scavenge_pages()
void IncreaseCapacity(int size)
void set_prev_page(Page *page)
int Free(Address start, int size_in_bytes)
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void SetOldSpacePageFlags(MemoryChunk *chunk)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
IncrementalMarking * incremental_marking()
static void IncrementLiveBytesFromMutator(Address address, int by)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
void set_prev_chunk(MemoryChunk *prev)
static HeapObject * FromAddress(Address address)
#define ASSERT_OBJECT_SIZE(size)
void increment_scan_on_scavenge_pages()
AllocationInfo allocation_info_
static bool IsFreeListNode(HeapObject *object)
static void Update(Address addr, int size)
MemoryChunk * prev_chunk() const
void set_next_chunk(MemoryChunk *next)
static void AssertValidRange(Address from, Address to)
static MemoryChunk * FromAnyPointerAddress(Address addr)
AllocationSpace identity()
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MemoryChunk * next_chunk() const