28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
44 for (
int i = 0; i < bitmap->
CellsCount(); i++) bitmap->cells()[i] = 0;
55 prev_page_(&space->anchor_),
56 next_page_(prev_page_->next_page()) { }
59 bool PageIterator::has_next() {
60 return next_page_ != &space_->anchor_;
64 Page* PageIterator::next() {
66 prev_page_ = next_page_;
67 next_page_ = next_page_->next_page();
76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
77 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
78 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
79 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
82 : prev_page_(space->anchor()),
83 next_page_(prev_page_->next_page()),
84 last_page_(prev_page_->prev_page()) { }
86 NewSpacePageIterator::NewSpacePageIterator(
Address start,
Address limit)
87 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
88 next_page_(NewSpacePage::FromAddress(start)),
89 last_page_(NewSpacePage::FromLimit(limit)) {
94 bool NewSpacePageIterator::has_next() {
95 return prev_page_ != last_page_;
99 NewSpacePage* NewSpacePageIterator::next() {
101 prev_page_ = next_page_;
102 next_page_ = next_page_->next_page();
109 HeapObject* HeapObjectIterator::FromCurrentPage() {
110 while (cur_addr_ != cur_end_) {
111 if (cur_addr_ == space_->
top() && cur_addr_ != space_->
limit()) {
112 cur_addr_ = space_->
limit();
116 int obj_size = (size_func_ ==
NULL) ? obj->Size() : size_func_(obj);
117 cur_addr_ += obj_size;
118 ASSERT(cur_addr_ <= cur_end_);
119 if (!obj->IsFiller()) {
131 #ifdef ENABLE_HEAP_PROTECTION
133 void MemoryAllocator::Protect(
Address start,
size_t size) {
134 OS::Protect(start, size);
138 void MemoryAllocator::Unprotect(
Address start,
141 OS::Unprotect(start, size, executable);
145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
146 int id = GetChunkId(page);
147 OS::Protect(chunks_[
id].address(), chunks_[
id].size());
151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
152 int id = GetChunkId(page);
153 OS::Unprotect(chunks_[
id].address(), chunks_[
id].size(),
154 chunks_[
id].owner()->executable() ==
EXECUTABLE);
166 Page* page =
reinterpret_cast<Page*
>(chunk);
181 return p->
owner() ==
this;
200 if (maybe->
owner() !=
NULL)
return maybe;
202 for (
HeapObject* o = iterator.Next(); o !=
NULL; o = iterator.Next()) {
205 if (o->IsFixedArray()) {
217 PointerChunkIterator::PointerChunkIterator(
Heap* heap)
218 : state_(kOldPointerState),
219 old_pointer_iterator_(heap->old_pointer_space()),
220 map_iterator_(heap->map_space()),
221 lo_iterator_(heap->lo_space()) { }
254 Address new_top = current_top + size_in_bytes;
265 if (
object !=
NULL) {
277 if (
object !=
NULL) {
285 if (
object !=
NULL) {
300 MaybeObject* NewSpace::AllocateRaw(
int size_in_bytes) {
305 if (FLAG_stress_compaction && !
HEAP->linear_allocation()) {
306 if (allocation_info_.
limit - old_top >= size_in_bytes * 4) {
307 int filler_size = size_in_bytes * 4;
309 *(
reinterpret_cast<Object**
>(old_top + i)) =
310 HEAP->one_pointer_filler_map();
312 old_top += filler_size;
313 allocation_info_.
top += filler_size;
318 if (allocation_info_.
limit - old_top < size_in_bytes) {
319 return SlowAllocateRaw(size_in_bytes);
323 allocation_info_.
top += size_in_bytes;
330 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
331 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
332 return static_cast<LargePage*
>(chunk);
341 template <
typename StringType>
343 ASSERT(length <= string->length());
344 ASSERT(string->IsSeqString());
346 allocation_info_.
top);
348 allocation_info_.
top =
349 string->address() + StringType::SizeFor(length);
350 string->set_length(length);
352 int delta =
static_cast<int>(old_top - allocation_info_.
top);
360 Heap* heap =
object->GetHeap();
361 return map == heap->raw_unchecked_free_space_map()
362 || map == heap->raw_unchecked_one_pointer_filler_map()
363 || map == heap->raw_unchecked_two_pointer_filler_map();
368 #endif // V8_SPACES_INL_H_
static bool IsBlack(MarkBit mark_bit)
void ShrinkStringAtAllocationBoundary(String *string, int len)
void set_next_page(Page *page)
bool Contains(Address addr)
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
void set_scan_on_scavenge(bool scan)
static MemoryChunk * FromAddress(Address a)
static void Clear(MemoryChunk *chunk)
#define ASSERT(condition)
HeapObject * AllocateLinearly(int size_in_bytes)
static const int kPageSize
static MarkBit MarkBitFrom(Address addr)
static Failure * RetryAfterGC()
void decrement_scan_on_scavenge_pages()
void IncreaseCapacity(int size)
void set_prev_page(Page *page)
int Free(Address start, int size_in_bytes)
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void SetOldSpacePageFlags(MemoryChunk *chunk)
static intptr_t ObjectSizeFor(intptr_t chunk_size)
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
IncrementalMarking * incremental_marking()
static void IncrementLiveBytesFromMutator(Address address, int by)
void set_prev_chunk(MemoryChunk *prev)
static HeapObject * FromAddress(Address address)
#define ASSERT_OBJECT_SIZE(size)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
void increment_scan_on_scavenge_pages()
AllocationInfo allocation_info_
static bool IsFreeListNode(HeapObject *object)
static void Update(Address addr, int size)
MemoryChunk * prev_chunk() const
void set_next_chunk(MemoryChunk *next)
static void AssertValidRange(Address from, Address to)
static MemoryChunk * FromAnyPointerAddress(Address addr)
AllocationSpace identity()
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MemoryChunk * next_chunk() const