v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
spaces-inl.h
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_
30 
31 #include "isolate.h"
32 #include "spaces.h"
33 #include "v8memory.h"
34 
35 namespace v8 {
36 namespace internal {
37 
38 
39 // -----------------------------------------------------------------------------
40 // Bitmap
41 
42 void Bitmap::Clear(MemoryChunk* chunk) {
43  Bitmap* bitmap = chunk->markbits();
44  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
45  chunk->ResetLiveBytes();
46 }
47 
48 
49 // -----------------------------------------------------------------------------
50 // PageIterator
51 
52 
53 PageIterator::PageIterator(PagedSpace* space)
54  : space_(space),
55  prev_page_(&space->anchor_),
56  next_page_(prev_page_->next_page()) { }
57 
58 
59 bool PageIterator::has_next() {
60  return next_page_ != &space_->anchor_;
61 }
62 
63 
64 Page* PageIterator::next() {
65  ASSERT(has_next());
66  prev_page_ = next_page_;
67  next_page_ = next_page_->next_page();
68  return prev_page_;
69 }
70 
71 
72 // -----------------------------------------------------------------------------
73 // NewSpacePageIterator
74 
75 
76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
77  : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
78  next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
79  last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
80 
81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
82  : prev_page_(space->anchor()),
83  next_page_(prev_page_->next_page()),
84  last_page_(prev_page_->prev_page()) { }
85 
86 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
87  : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
88  next_page_(NewSpacePage::FromAddress(start)),
89  last_page_(NewSpacePage::FromLimit(limit)) {
90  SemiSpace::AssertValidRange(start, limit);
91 }
92 
93 
94 bool NewSpacePageIterator::has_next() {
95  return prev_page_ != last_page_;
96 }
97 
98 
99 NewSpacePage* NewSpacePageIterator::next() {
100  ASSERT(has_next());
101  prev_page_ = next_page_;
102  next_page_ = next_page_->next_page();
103  return prev_page_;
104 }
105 
106 
107 // -----------------------------------------------------------------------------
108 // HeapObjectIterator
109 HeapObject* HeapObjectIterator::FromCurrentPage() {
110  while (cur_addr_ != cur_end_) {
111  if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
112  cur_addr_ = space_->limit();
113  continue;
114  }
115  HeapObject* obj = HeapObject::FromAddress(cur_addr_);
116  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
117  cur_addr_ += obj_size;
118  ASSERT(cur_addr_ <= cur_end_);
119  if (!obj->IsFiller()) {
120  ASSERT_OBJECT_SIZE(obj_size);
121  return obj;
122  }
123  }
124  return NULL;
125 }
126 
127 
128 // -----------------------------------------------------------------------------
129 // MemoryAllocator
130 
131 #ifdef ENABLE_HEAP_PROTECTION
132 
133 void MemoryAllocator::Protect(Address start, size_t size) {
134  OS::Protect(start, size);
135 }
136 
137 
138 void MemoryAllocator::Unprotect(Address start,
139  size_t size,
140  Executability executable) {
141  OS::Unprotect(start, size, executable);
142 }
143 
144 
145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
146  int id = GetChunkId(page);
147  OS::Protect(chunks_[id].address(), chunks_[id].size());
148 }
149 
150 
151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
152  int id = GetChunkId(page);
153  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
154  chunks_[id].owner()->executable() == EXECUTABLE);
155 }
156 
157 #endif
158 
159 
160 // --------------------------------------------------------------------------
161 // PagedSpace
163  MemoryChunk* chunk,
164  Executability executable,
165  PagedSpace* owner) {
166  Page* page = reinterpret_cast<Page*>(chunk);
167  ASSERT(chunk->size() <= static_cast<size_t>(kPageSize));
168  ASSERT(chunk->owner() == owner);
169  owner->IncreaseCapacity(page->area_size());
170  owner->Free(page->area_start(), page->area_size());
171 
173 
174  return page;
175 }
176 
177 
179  Page* p = Page::FromAddress(addr);
180  if (!p->is_valid()) return false;
181  return p->owner() == this;
182 }
183 
184 
186  if (scan) {
189  } else {
192  }
194 }
195 
196 
198  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
199  OffsetFrom(addr) & ~Page::kPageAlignmentMask);
200  if (maybe->owner() != NULL) return maybe;
201  LargeObjectIterator iterator(HEAP->lo_space());
202  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
203  // Fixed arrays are the only pointer-containing objects in large object
204  // space.
205  if (o->IsFixedArray()) {
206  MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
207  if (chunk->Contains(addr)) {
208  return chunk;
209  }
210  }
211  }
212  UNREACHABLE();
213  return NULL;
214 }
215 
216 
217 PointerChunkIterator::PointerChunkIterator(Heap* heap)
218  : state_(kOldPointerState),
219  old_pointer_iterator_(heap->old_pointer_space()),
220  map_iterator_(heap->map_space()),
221  lo_iterator_(heap->lo_space()) { }
222 
223 
225  ASSERT(next_chunk()->owner() == owner());
226  return static_cast<Page*>(next_chunk());
227 }
228 
229 
231  ASSERT(prev_chunk()->owner() == owner());
232  return static_cast<Page*>(prev_chunk());
233 }
234 
235 
237  ASSERT(page->owner() == owner());
238  set_next_chunk(page);
239 }
240 
241 
243  ASSERT(page->owner() == owner());
244  set_prev_chunk(page);
245 }
246 
247 
248 // Try linear allocation in the page of alloc_info's allocation top. Does
249 // not contain slow case logic (e.g. move to the next page or try free list
250 // allocation) so it can be used by all the allocation functions and for all
251 // the paged spaces.
253  Address current_top = allocation_info_.top;
254  Address new_top = current_top + size_in_bytes;
255  if (new_top > allocation_info_.limit) return NULL;
256 
257  allocation_info_.top = new_top;
258  return HeapObject::FromAddress(current_top);
259 }
260 
261 
262 // Raw allocation.
263 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
264  HeapObject* object = AllocateLinearly(size_in_bytes);
265  if (object != NULL) {
266  if (identity() == CODE_SPACE) {
267  SkipList::Update(object->address(), size_in_bytes);
268  }
269  return object;
270  }
271 
272  object = free_list_.Allocate(size_in_bytes);
273  if (object != NULL) {
274  if (identity() == CODE_SPACE) {
275  SkipList::Update(object->address(), size_in_bytes);
276  }
277  return object;
278  }
279 
280  object = SlowAllocateRaw(size_in_bytes);
281  if (object != NULL) {
282  if (identity() == CODE_SPACE) {
283  SkipList::Update(object->address(), size_in_bytes);
284  }
285  return object;
286  }
287 
289 }
290 
291 
292 // -----------------------------------------------------------------------------
293 // NewSpace
294 
295 
296 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
297  Address old_top = allocation_info_.top;
298 #ifdef DEBUG
299  // If we are stressing compaction we waste some memory in new space
300  // in order to get more frequent GCs.
301  if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
302  if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
303  int filler_size = size_in_bytes * 4;
304  for (int i = 0; i < filler_size; i += kPointerSize) {
305  *(reinterpret_cast<Object**>(old_top + i)) =
306  HEAP->one_pointer_filler_map();
307  }
308  old_top += filler_size;
309  allocation_info_.top += filler_size;
310  }
311  }
312 #endif
313 
314  if (allocation_info_.limit - old_top < size_in_bytes) {
315  return SlowAllocateRaw(size_in_bytes);
316  }
317 
318  Object* obj = HeapObject::FromAddress(old_top);
319  allocation_info_.top += size_in_bytes;
320  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
321 
322  return obj;
323 }
324 
325 
326 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
327  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
328  return static_cast<LargePage*>(chunk);
329 }
330 
331 
333  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
334 }
335 
336 
337 template <typename StringType>
339  ASSERT(length <= string->length());
340  ASSERT(string->IsSeqString());
341  ASSERT(string->address() + StringType::SizeFor(string->length()) ==
342  allocation_info_.top);
343  Address old_top = allocation_info_.top;
344  allocation_info_.top =
345  string->address() + StringType::SizeFor(length);
346  string->set_length(length);
347  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
348  int delta = static_cast<int>(old_top - allocation_info_.top);
350  }
351 }
352 
353 
355  Map* map = object->map();
356  Heap* heap = object->GetHeap();
357  return map == heap->raw_unchecked_free_space_map()
358  || map == heap->raw_unchecked_one_pointer_filler_map()
359  || map == heap->raw_unchecked_two_pointer_filler_map();
360 }
361 
362 } } // namespace v8::internal
363 
364 #endif // V8_SPACES_INL_H_
static bool IsBlack(MarkBit mark_bit)
Definition: mark-compact.h:70
byte * Address
Definition: globals.h:172
void ShrinkStringAtAllocationBoundary(String *string, int len)
Definition: spaces-inl.h:338
void set_next_page(Page *page)
Definition: spaces-inl.h:236
bool Contains(Address addr)
Definition: spaces.h:365
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
void set_scan_on_scavenge(bool scan)
Definition: spaces-inl.h:185
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:304
static void Clear(MemoryChunk *chunk)
Definition: spaces-inl.h:42
#define ASSERT(condition)
Definition: checks.h:270
void ClearFlag(int flag)
Definition: spaces.h:421
HeapObject * AllocateLinearly(int size_in_bytes)
Definition: spaces-inl.h:252
static const int kPageSize
Definition: spaces.h:695
static MarkBit MarkBitFrom(Address addr)
#define UNREACHABLE()
Definition: checks.h:50
bool Contains(Address a)
Definition: spaces-inl.h:178
void SetFlag(int flag)
Definition: spaces.h:417
static Failure * RetryAfterGC()
Definition: objects-inl.h:1032
const int kPointerSize
Definition: globals.h:234
intptr_t OffsetFrom(T x)
Definition: utils.h:126
void decrement_scan_on_scavenge_pages()
Definition: heap.h:1088
size_t size() const
Definition: spaces.h:504
void IncreaseCapacity(int size)
Definition: spaces.h:1541
void set_prev_page(Page *page)
Definition: spaces-inl.h:242
int Free(Address start, int size_in_bytes)
Definition: spaces.h:1519
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
Definition: spaces-inl.h:162
void SetOldSpacePageFlags(MemoryChunk *chunk)
Space * owner() const
Definition: spaces.h:321
static intptr_t ObjectSizeFor(intptr_t chunk_size)
Definition: spaces.h:2478
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
Definition: spaces.h:2348
Heap * heap() const
Definition: spaces.h:766
IncrementalMarking * incremental_marking()
Definition: heap.h:1524
Page * prev_page()
Definition: spaces-inl.h:230
static void IncrementLiveBytesFromMutator(Address address, int by)
Definition: spaces.cc:764
#define HEAP
Definition: isolate.h:1408
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
void set_prev_chunk(MemoryChunk *prev)
Definition: spaces.h:319
static HeapObject * FromAddress(Address address)
Definition: objects-inl.h:1163
#define ASSERT_OBJECT_SIZE(size)
Definition: spaces.h:106
Page * next_page()
Definition: spaces-inl.h:224
void increment_scan_on_scavenge_pages()
Definition: heap.h:1081
AllocationInfo allocation_info_
Definition: spaces.h:1648
static bool IsFreeListNode(HeapObject *object)
Definition: spaces-inl.h:354
static void Update(Address addr, int size)
Definition: spaces.h:916
MemoryChunk * prev_chunk() const
Definition: spaces.h:316
void set_next_chunk(MemoryChunk *next)
Definition: spaces.h:318
static void AssertValidRange(Address from, Address to)
Definition: spaces.h:1923
static MemoryChunk * FromAnyPointerAddress(Address addr)
Definition: spaces-inl.h:197
AllocationSpace identity()
Definition: spaces.h:772
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
Definition: spaces.cc:2319
MemoryChunk * next_chunk() const
Definition: spaces.h:315