v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces-inl.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
30 
31 #include "isolate.h"
32 #include "spaces.h"
33 #include "v8memory.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 // -----------------------------------------------------------------------------
40 // Bitmap
41 
42 void Bitmap::Clear(MemoryChunk* chunk) {
43  Bitmap* bitmap = chunk->markbits();
44  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
45  chunk->ResetLiveBytes();
46 }
47 
48 
49 // -----------------------------------------------------------------------------
50 // PageIterator
51 
52 
53 PageIterator::PageIterator(PagedSpace* space)
54  : space_(space),
55  prev_page_(&space->anchor_),
56  next_page_(prev_page_->next_page()) { }
57 
58 
59 bool PageIterator::has_next() {
60  return next_page_ != &space_->anchor_;
61 }
62 
63 
64 Page* PageIterator::next() {
65  ASSERT(has_next());
66  prev_page_ = next_page_;
67  next_page_ = next_page_->next_page();
68  return prev_page_;
69 }
70 
71 
72 // -----------------------------------------------------------------------------
73 // NewSpacePageIterator
74 
75 
76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
77  : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
78  next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
79  last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
80 
81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
82  : prev_page_(space->anchor()),
83  next_page_(prev_page_->next_page()),
84  last_page_(prev_page_->prev_page()) { }
85 
86 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
87  : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
88  next_page_(NewSpacePage::FromAddress(start)),
89  last_page_(NewSpacePage::FromLimit(limit)) {
90  SemiSpace::AssertValidRange(start, limit);
91 }
92 
93 
94 bool NewSpacePageIterator::has_next() {
95  return prev_page_ != last_page_;
96 }
97 
98 
99 NewSpacePage* NewSpacePageIterator::next() {
100  ASSERT(has_next());
101  prev_page_ = next_page_;
102  next_page_ = next_page_->next_page();
103  return prev_page_;
104 }
105 
106 
107 // -----------------------------------------------------------------------------
108 // HeapObjectIterator
109 HeapObject* HeapObjectIterator::FromCurrentPage() {
110  while (cur_addr_ != cur_end_) {
111  if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
112  cur_addr_ = space_->limit();
113  continue;
114  }
115  HeapObject* obj = HeapObject::FromAddress(cur_addr_);
116  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
117  cur_addr_ += obj_size;
118  ASSERT(cur_addr_ <= cur_end_);
119  if (!obj->IsFiller()) {
120  ASSERT_OBJECT_SIZE(obj_size);
121  return obj;
122  }
123  }
124  return NULL;
125 }
126 
127 
128 // -----------------------------------------------------------------------------
129 // MemoryAllocator
130 
131 #ifdef ENABLE_HEAP_PROTECTION
132 
133 void MemoryAllocator::Protect(Address start, size_t size) {
134  OS::Protect(start, size);
135 }
136 
137 
138 void MemoryAllocator::Unprotect(Address start,
139  size_t size,
140  Executability executable) {
141  OS::Unprotect(start, size, executable);
142 }
143 
144 
145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
146  int id = GetChunkId(page);
147  OS::Protect(chunks_[id].address(), chunks_[id].size());
148 }
149 
150 
151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
152  int id = GetChunkId(page);
153  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
154  chunks_[id].owner()->executable() == EXECUTABLE);
155 }
156 
157 #endif
158 
159 
160 // --------------------------------------------------------------------------
161 // PagedSpace
163  MemoryChunk* chunk,
164  Executability executable,
165  PagedSpace* owner) {
166  Page* page = reinterpret_cast<Page*>(chunk);
167  ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
168  ASSERT(chunk->owner() == owner);
169  owner->IncreaseCapacity(page->area_size());
170  owner->Free(page->area_start(), page->area_size());
171 
173 
174  return page;
175 }
176 
177 
179  Page* p = Page::FromAddress(addr);
180  if (!p->is_valid()) return false;
181  return p->owner() == this;
182 }
183 
184 
186  if (scan) {
189  } else {
192  }
194 }
195 
196 
198  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
199  OffsetFrom(addr) & ~Page::kPageAlignmentMask);
200  if (maybe->owner() != NULL) return maybe;
201  LargeObjectIterator iterator(HEAP->lo_space());
202  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
203  // Fixed arrays are the only pointer-containing objects in large object
204  // space.
205  if (o->IsFixedArray()) {
206  MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
207  if (chunk->Contains(addr)) {
208  return chunk;
209  }
210  }
211  }
212  UNREACHABLE();
213  return NULL;
214 }
215 
216 
217 PointerChunkIterator::PointerChunkIterator(Heap* heap)
218  : state_(kOldPointerState),
219  old_pointer_iterator_(heap->old_pointer_space()),
220  map_iterator_(heap->map_space()),
221  lo_iterator_(heap->lo_space()) { }
222 
223 
225  ASSERT(next_chunk()->owner() == owner());
226  return static_cast<Page*>(next_chunk());
227 }
228 
229 
231  ASSERT(prev_chunk()->owner() == owner());
232  return static_cast<Page*>(prev_chunk());
233 }
234 
235 
237  ASSERT(page->owner() == owner());
238  set_next_chunk(page);
239 }
240 
241 
243  ASSERT(page->owner() == owner());
244  set_prev_chunk(page);
245 }
246 
247 
248 // Try linear allocation in the page of alloc_info's allocation top. Does
249 // not contain slow case logic (e.g. move to the next page or try free list
250 // allocation) so it can be used by all the allocation functions and for all
251 // the paged spaces.
253  Address current_top = allocation_info_.top;
254  Address new_top = current_top + size_in_bytes;
255  if (new_top > allocation_info_.limit) return NULL;
256 
257  allocation_info_.top = new_top;
258  return HeapObject::FromAddress(current_top);
259 }
260 
261 
262 // Raw allocation.
263 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
264  HeapObject* object = AllocateLinearly(size_in_bytes);
265  if (object != NULL) {
266  if (identity() == CODE_SPACE) {
267  SkipList::Update(object->address(), size_in_bytes);
268  }
269  return object;
270  }
271 
272  ASSERT(!heap()->linear_allocation() ||
273  (anchor_.next_chunk() == &anchor_ &&
274  anchor_.prev_chunk() == &anchor_));
275 
276  object = free_list_.Allocate(size_in_bytes);
277  if (object != NULL) {
278  if (identity() == CODE_SPACE) {
279  SkipList::Update(object->address(), size_in_bytes);
280  }
281  return object;
282  }
283 
284  object = SlowAllocateRaw(size_in_bytes);
285  if (object != NULL) {
286  if (identity() == CODE_SPACE) {
287  SkipList::Update(object->address(), size_in_bytes);
288  }
289  return object;
290  }
291 
293 }
294 
295 
296 // -----------------------------------------------------------------------------
297 // NewSpace
298 
299 
300 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
301  Address old_top = allocation_info_.top;
302 #ifdef DEBUG
303  // If we are stressing compaction we waste some memory in new space
304  // in order to get more frequent GCs.
305  if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
306  if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
307  int filler_size = size_in_bytes * 4;
308  for (int i = 0; i < filler_size; i += kPointerSize) {
309  *(reinterpret_cast<Object**>(old_top + i)) =
310  HEAP->one_pointer_filler_map();
311  }
312  old_top += filler_size;
313  allocation_info_.top += filler_size;
314  }
315  }
316 #endif
317 
318  if (allocation_info_.limit - old_top < size_in_bytes) {
319  return SlowAllocateRaw(size_in_bytes);
320  }
321 
322  Object* obj = HeapObject::FromAddress(old_top);
323  allocation_info_.top += size_in_bytes;
324  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
325 
326  return obj;
327 }
328 
329 
330 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
331  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
332  return static_cast<LargePage*>(chunk);
333 }
334 
335 
337  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
338 }
339 
340 
341 template <typename StringType>
343  ASSERT(length <= string->length());
344  ASSERT(string->IsSeqString());
345  ASSERT(string->address() + StringType::SizeFor(string->length()) ==
346  allocation_info_.top);
347  Address old_top = allocation_info_.top;
348  allocation_info_.top =
349  string->address() + StringType::SizeFor(length);
350  string->set_length(length);
351  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
352  int delta = static_cast<int>(old_top - allocation_info_.top);
354  }
355 }
356 
357 
359  Map* map = object->map();
360  Heap* heap = object->GetHeap();
361  return map == heap->raw_unchecked_free_space_map()
362  || map == heap->raw_unchecked_one_pointer_filler_map()
363  || map == heap->raw_unchecked_two_pointer_filler_map();
364 }
365 
366 } } // namespace v8::internal
367 
368 #endif // V8_SPACES_INL_H_
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:157
void ShrinkStringAtAllocationBoundary(String *string, int len)
Definition: spaces-inl.h:342
void set_next_page(Page *page)
Definition: spaces-inl.h:236
bool Contains(Address addr)
Definition: spaces.h:364
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:185
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
#define ASSERT(condition)
Definition: checks.h:270
void ClearFlag(int flag)
Definition: spaces.h:425
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:252
static const int kPageSize
Definition: spaces.h:711
static MarkBit MarkBitFrom(Address addr)
#define UNREACHABLE()
Definition: checks.h:50
bool Contains(Address a)
Definition: spaces-inl.h:178
void SetFlag(int flag)
Definition: spaces.h:421
static Failure * RetryAfterGC()
Definition: objects-inl.h:1040
const int kPointerSize
Definition: globals.h:220
intptr_t OffsetFrom(T x)
Definition: utils.h:126
void decrement_scan_on_scavenge_pages()
Definition: heap.h:1123
size_t size() const
Definition: spaces.h:519
void IncreaseCapacity(int size)
Definition: spaces.h:1561
void set_prev_page(Page *page)
Definition: spaces-inl.h:242
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1539
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:162
void SetOldSpacePageFlags(MemoryChunk *chunk)
Space * owner() const
Definition: spaces.h:320
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2502
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2376
Heap * heap() const
Definition: spaces.h:782
IncrementalMarking * incremental_marking()
Definition: heap.h:1553
Page * prev_page()
Definition: spaces-inl.h:230
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:769
#define HEAP
Definition: isolate.h:1433
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:318
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1171
#define ASSERT_OBJECT_SIZE(size)
Definition: spaces.h:103
Page * next_page()
Definition: spaces-inl.h:224
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
void increment_scan_on_scavenge_pages()
Definition: heap.h:1116
AllocationInfo allocation_info_
Definition: spaces.h:1670
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:358
static void Update(Address addr, int size)
Definition: spaces.h:924
MemoryChunk * prev_chunk() const
Definition: spaces.h:315
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:317
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1948
static MemoryChunk * FromAnyPointerAddress(Address addr)
Definition: spaces-inl.h:197
AllocationSpace identity()
Definition: spaces.h:788
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2354
MemoryChunk * next_chunk() const
Definition: spaces.h:314