v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces-inl.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
30 
31 #include "heap-profiler.h"
32 #include "isolate.h"
33 #include "spaces.h"
34 #include "v8memory.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 
40 // -----------------------------------------------------------------------------
41 // Bitmap
42 
43 void Bitmap::Clear(MemoryChunk* chunk) {
44  Bitmap* bitmap = chunk->markbits();
45  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
46  chunk->ResetLiveBytes();
47 }
48 
49 
50 // -----------------------------------------------------------------------------
51 // PageIterator
52 
53 
54 PageIterator::PageIterator(PagedSpace* space)
55  : space_(space),
56  prev_page_(&space->anchor_),
57  next_page_(prev_page_->next_page()) { }
58 
59 
60 bool PageIterator::has_next() {
61  return next_page_ != &space_->anchor_;
62 }
63 
64 
65 Page* PageIterator::next() {
66  ASSERT(has_next());
67  prev_page_ = next_page_;
68  next_page_ = next_page_->next_page();
69  return prev_page_;
70 }
71 
72 
73 // -----------------------------------------------------------------------------
74 // NewSpacePageIterator
75 
76 
77 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
78  : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
79  next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
80  last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
81 
82 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
83  : prev_page_(space->anchor()),
84  next_page_(prev_page_->next_page()),
85  last_page_(prev_page_->prev_page()) { }
86 
87 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
88  : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
89  next_page_(NewSpacePage::FromAddress(start)),
90  last_page_(NewSpacePage::FromLimit(limit)) {
91  SemiSpace::AssertValidRange(start, limit);
92 }
93 
94 
95 bool NewSpacePageIterator::has_next() {
96  return prev_page_ != last_page_;
97 }
98 
99 
100 NewSpacePage* NewSpacePageIterator::next() {
101  ASSERT(has_next());
102  prev_page_ = next_page_;
103  next_page_ = next_page_->next_page();
104  return prev_page_;
105 }
106 
107 
108 // -----------------------------------------------------------------------------
109 // HeapObjectIterator
110 HeapObject* HeapObjectIterator::FromCurrentPage() {
111  while (cur_addr_ != cur_end_) {
112  if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
113  cur_addr_ = space_->limit();
114  continue;
115  }
116  HeapObject* obj = HeapObject::FromAddress(cur_addr_);
117  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
118  cur_addr_ += obj_size;
119  ASSERT(cur_addr_ <= cur_end_);
120  if (!obj->IsFiller()) {
121  ASSERT_OBJECT_SIZE(obj_size);
122  return obj;
123  }
124  }
125  return NULL;
126 }
127 
128 
129 // -----------------------------------------------------------------------------
130 // MemoryAllocator
131 
132 #ifdef ENABLE_HEAP_PROTECTION
133 
134 void MemoryAllocator::Protect(Address start, size_t size) {
135  OS::Protect(start, size);
136 }
137 
138 
139 void MemoryAllocator::Unprotect(Address start,
140  size_t size,
141  Executability executable) {
142  OS::Unprotect(start, size, executable);
143 }
144 
145 
146 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
147  int id = GetChunkId(page);
148  OS::Protect(chunks_[id].address(), chunks_[id].size());
149 }
150 
151 
152 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
153  int id = GetChunkId(page);
154  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
155  chunks_[id].owner()->executable() == EXECUTABLE);
156 }
157 
158 #endif
159 
160 
161 // --------------------------------------------------------------------------
162 // PagedSpace
164  MemoryChunk* chunk,
165  Executability executable,
166  PagedSpace* owner) {
167  Page* page = reinterpret_cast<Page*>(chunk);
169  ASSERT(chunk->owner() == owner);
170  owner->IncreaseCapacity(page->area_size());
171  owner->Free(page->area_start(), page->area_size());
172 
174 
175  return page;
176 }
177 
178 
180  Page* p = Page::FromAddress(addr);
181  if (!p->is_valid()) return false;
182  return p->owner() == this;
183 }
184 
185 
187  if (scan) {
190  } else {
193  }
195 }
196 
197 
199  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
200  OffsetFrom(addr) & ~Page::kPageAlignmentMask);
201  if (maybe->owner() != NULL) return maybe;
202  LargeObjectIterator iterator(heap->lo_space());
203  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
204  // Fixed arrays are the only pointer-containing objects in large object
205  // space.
206  if (o->IsFixedArray()) {
207  MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
208  if (chunk->Contains(addr)) {
209  return chunk;
210  }
211  }
212  }
213  UNREACHABLE();
214  return NULL;
215 }
216 
217 
219  if (mark == NULL) return;
220  // Need to subtract one from the mark because when a chunk is full the
221  // top points to the next address after the chunk, which effectively belongs
222  // to another chunk. See the comment to Page::FromAllocationTop.
223  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
224  int new_mark = static_cast<int>(mark - chunk->address());
225  if (new_mark > chunk->high_water_mark_) {
226  chunk->high_water_mark_ = new_mark;
227  }
228 }
229 
230 
231 PointerChunkIterator::PointerChunkIterator(Heap* heap)
232  : state_(kOldPointerState),
233  old_pointer_iterator_(heap->old_pointer_space()),
234  map_iterator_(heap->map_space()),
235  lo_iterator_(heap->lo_space()) { }
236 
237 
239  ASSERT(next_chunk()->owner() == owner());
240  return static_cast<Page*>(next_chunk());
241 }
242 
243 
245  ASSERT(prev_chunk()->owner() == owner());
246  return static_cast<Page*>(prev_chunk());
247 }
248 
249 
251  ASSERT(page->owner() == owner());
252  set_next_chunk(page);
253 }
254 
255 
257  ASSERT(page->owner() == owner());
258  set_prev_chunk(page);
259 }
260 
261 
262 // Try linear allocation in the page of alloc_info's allocation top. Does
263 // not contain slow case logic (e.g. move to the next page or try free list
264 // allocation) so it can be used by all the allocation functions and for all
265 // the paged spaces.
267  Address current_top = allocation_info_.top();
268  Address new_top = current_top + size_in_bytes;
269  if (new_top > allocation_info_.limit()) return NULL;
270 
271  allocation_info_.set_top(new_top);
272  return HeapObject::FromAddress(current_top);
273 }
274 
275 
276 // Raw allocation.
277 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
278  HeapObject* object = AllocateLinearly(size_in_bytes);
279  if (object != NULL) {
280  if (identity() == CODE_SPACE) {
281  SkipList::Update(object->address(), size_in_bytes);
282  }
283  return object;
284  }
285 
286  ASSERT(!heap()->linear_allocation() ||
287  (anchor_.next_chunk() == &anchor_ &&
288  anchor_.prev_chunk() == &anchor_));
289 
290  object = free_list_.Allocate(size_in_bytes);
291  if (object != NULL) {
292  if (identity() == CODE_SPACE) {
293  SkipList::Update(object->address(), size_in_bytes);
294  }
295  return object;
296  }
297 
298  object = SlowAllocateRaw(size_in_bytes);
299  if (object != NULL) {
300  if (identity() == CODE_SPACE) {
301  SkipList::Update(object->address(), size_in_bytes);
302  }
303  return object;
304  }
305 
307 }
308 
309 
310 // -----------------------------------------------------------------------------
311 // NewSpace
312 
313 
314 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
315  Address old_top = allocation_info_.top();
316 #ifdef DEBUG
317  // If we are stressing compaction we waste some memory in new space
318  // in order to get more frequent GCs.
319  if (FLAG_stress_compaction && !heap()->linear_allocation()) {
320  if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
321  int filler_size = size_in_bytes * 4;
322  for (int i = 0; i < filler_size; i += kPointerSize) {
323  *(reinterpret_cast<Object**>(old_top + i)) =
324  heap()->one_pointer_filler_map();
325  }
326  old_top += filler_size;
327  allocation_info_.set_top(allocation_info_.top() + filler_size);
328  }
329  }
330 #endif
331 
332  if (allocation_info_.limit() - old_top < size_in_bytes) {
333  return SlowAllocateRaw(size_in_bytes);
334  }
335 
336  HeapObject* obj = HeapObject::FromAddress(old_top);
337  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
338  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
339 
340  return obj;
341 }
342 
343 
344 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
345  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
346  return static_cast<LargePage*>(chunk);
347 }
348 
349 
351  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
352 }
353 
354 
356  Map* map = object->map();
357  Heap* heap = object->GetHeap();
358  return map == heap->raw_unchecked_free_space_map()
359  || map == heap->raw_unchecked_one_pointer_filler_map()
360  || map == heap->raw_unchecked_two_pointer_filler_map();
361 }
362 
363 } } // namespace v8::internal
364 
365 #endif // V8_SPACES_INL_H_
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void set_next_page(Page *page)
Definition: spaces-inl.h:250
bool Contains(Address addr)
Definition: spaces.h:377
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:277
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:186
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:43
#define ASSERT(condition)
Definition: checks.h:329
void ClearFlag(int flag)
Definition: spaces.h:444
static void UpdateHighWaterMark(Address mark)
Definition: spaces-inl.h:218
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:266
#define UNREACHABLE()
Definition: checks.h:52
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
Definition: flags.cc:211
bool Contains(Address a)
Definition: spaces-inl.h:179
void SetFlag(int flag)
Definition: spaces.h:440
static Failure * RetryAfterGC()
Definition: objects-inl.h:1255
const int kPointerSize
Definition: globals.h:268
intptr_t OffsetFrom(T x)
Definition: utils.h:120
void decrement_scan_on_scavenge_pages()
Definition: heap.h:1295
static MemoryChunk * FromAnyPointerAddress(Heap *heap, Address addr)
Definition: spaces-inl.h:198
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
void IncreaseCapacity(int size)
Definition: spaces.cc:1113
void set_prev_page(Page *page)
Definition: spaces-inl.h:256
LargeObjectSpace * lo_space()
Definition: heap.h:646
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1821
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:163
void SetOldSpacePageFlags(MemoryChunk *chunk)
Space * owner() const
Definition: spaces.h:332
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2808
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2690
Heap * heap() const
Definition: spaces.h:900
IncrementalMarking * incremental_marking()
Definition: heap.h:1781
Page * prev_page()
Definition: spaces-inl.h:244
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:328
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1369
#define ASSERT_OBJECT_SIZE(size)
Definition: spaces.h:105
Page * next_page()
Definition: spaces-inl.h:238
HeapObject * obj
void increment_scan_on_scavenge_pages()
Definition: heap.h:1288
AllocationInfo allocation_info_
Definition: spaces.h:1968
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:355
static void Update(Address addr, int size)
Definition: spaces.h:1049
MemoryChunk * prev_chunk() const
Definition: spaces.h:320
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:324
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:2242
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
Definition: spaces.cc:2376
AllocationSpace identity()
Definition: spaces.h:906
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2651
MemoryChunk * next_chunk() const
Definition: spaces.h:316