v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
test-spaces.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <stdlib.h>
29 
30 #include "v8.h"
31 #include "cctest.h"
32 
33 using namespace v8::internal;
34 
35 #if 0
36 static void VerifyRegionMarking(Address page_start) {
37 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
38  Page* p = Page::FromAddress(page_start);
39 
40  p->SetRegionMarks(Page::kAllRegionsCleanMarks);
41 
42  for (Address addr = p->ObjectAreaStart();
43  addr < p->ObjectAreaEnd();
44  addr += kPointerSize) {
45  CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
46  }
47 
48  for (Address addr = p->ObjectAreaStart();
49  addr < p->ObjectAreaEnd();
50  addr += kPointerSize) {
51  Page::FromAddress(addr)->MarkRegionDirty(addr);
52  }
53 
54  for (Address addr = p->ObjectAreaStart();
55  addr < p->ObjectAreaEnd();
56  addr += kPointerSize) {
57  CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
58  }
59 #endif
60 }
61 #endif
62 
63 
64 // TODO(gc) you can no longer allocate pages like this. Details are hidden.
65 #if 0
66 TEST(Page) {
67  byte* mem = NewArray<byte>(2*Page::kPageSize);
68  CHECK(mem != NULL);
69 
70  Address start = reinterpret_cast<Address>(mem);
71  Address page_start = RoundUp(start, Page::kPageSize);
72 
73  Page* p = Page::FromAddress(page_start);
74  // Initialized Page has heap pointer, normally set by memory_allocator.
75  p->heap_ = CcTest::heap();
76  CHECK(p->address() == page_start);
77  CHECK(p->is_valid());
78 
79  p->opaque_header = 0;
80  p->SetIsLargeObjectPage(false);
81  CHECK(!p->next_page()->is_valid());
82 
83  CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
84  CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
85 
86  CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
88  CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
89 
90  CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
91  CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
92 
93  // test region marking
94  VerifyRegionMarking(page_start);
95 
96  DeleteArray(mem);
97 }
98 #endif
99 
100 
101 namespace v8 {
102 namespace internal {
103 
104 // Temporarily sets a given allocator in an isolate.
106  public:
108  : isolate_(isolate),
109  old_allocator_(isolate->memory_allocator_) {
110  isolate->memory_allocator_ = allocator;
111  }
112 
114  isolate_->memory_allocator_ = old_allocator_;
115  }
116 
117  private:
118  Isolate* isolate_;
119  MemoryAllocator* old_allocator_;
120 
122 };
123 
124 
125 // Temporarily sets a given code range in an isolate.
127  public:
128  TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
129  : isolate_(isolate),
130  old_code_range_(isolate->code_range_) {
131  isolate->code_range_ = code_range;
132  }
133 
135  isolate_->code_range_ = old_code_range_;
136  }
137 
138  private:
139  Isolate* isolate_;
140  CodeRange* old_code_range_;
141 
143 };
144 
145 } } // namespace v8::internal
146 
147 
148 static void VerifyMemoryChunk(Isolate* isolate,
149  Heap* heap,
150  CodeRange* code_range,
151  size_t reserve_area_size,
152  size_t commit_area_size,
153  size_t second_commit_area_size,
154  Executability executable) {
155  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
156  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
157  heap->MaxExecutableSize()));
158  TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
159  TestCodeRangeScope test_code_range_scope(isolate, code_range);
160 
161  size_t header_size = (executable == EXECUTABLE)
164  size_t guard_size = (executable == EXECUTABLE)
166  : 0;
167 
168  MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
169  commit_area_size,
170  executable,
171  NULL);
172  size_t alignment = code_range->exists() ?
174  size_t reserved_size = ((executable == EXECUTABLE))
175  ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
176  alignment)
177  : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
178  CHECK(memory_chunk->size() == reserved_size);
179  CHECK(memory_chunk->area_start() < memory_chunk->address() +
180  memory_chunk->size());
181  CHECK(memory_chunk->area_end() <= memory_chunk->address() +
182  memory_chunk->size());
183  CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
184 
185  Address area_start = memory_chunk->area_start();
186 
187  memory_chunk->CommitArea(second_commit_area_size);
188  CHECK(area_start == memory_chunk->area_start());
189  CHECK(memory_chunk->area_start() < memory_chunk->address() +
190  memory_chunk->size());
191  CHECK(memory_chunk->area_end() <= memory_chunk->address() +
192  memory_chunk->size());
193  CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
194  second_commit_area_size);
195 
196  memory_allocator->Free(memory_chunk);
197  memory_allocator->TearDown();
198  delete memory_allocator;
199 }
200 
201 
202 static unsigned int Pseudorandom() {
203  static uint32_t lo = 2345;
204  lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
205  return lo & 0xFFFFF;
206 }
207 
208 
210  Isolate* isolate = CcTest::i_isolate();
211  isolate->InitializeLoggingAndCounters();
212  Heap* heap = isolate->heap();
213  CHECK(heap->ConfigureHeapDefault());
214 
215  size_t reserve_area_size = 1 * MB;
216  size_t initial_commit_area_size, second_commit_area_size;
217 
218  for (int i = 0; i < 100; i++) {
219  initial_commit_area_size = Pseudorandom();
220  second_commit_area_size = Pseudorandom();
221 
222  // With CodeRange.
223  CodeRange* code_range = new CodeRange(isolate);
224  const int code_range_size = 32 * MB;
225  if (!code_range->SetUp(code_range_size)) return;
226 
227  VerifyMemoryChunk(isolate,
228  heap,
229  code_range,
230  reserve_area_size,
231  initial_commit_area_size,
232  second_commit_area_size,
233  EXECUTABLE);
234 
235  VerifyMemoryChunk(isolate,
236  heap,
237  code_range,
238  reserve_area_size,
239  initial_commit_area_size,
240  second_commit_area_size,
242  delete code_range;
243 
244  // Without CodeRange.
245  code_range = NULL;
246  VerifyMemoryChunk(isolate,
247  heap,
248  code_range,
249  reserve_area_size,
250  initial_commit_area_size,
251  second_commit_area_size,
252  EXECUTABLE);
253 
254  VerifyMemoryChunk(isolate,
255  heap,
256  code_range,
257  reserve_area_size,
258  initial_commit_area_size,
259  second_commit_area_size,
261  }
262 }
263 
264 
266  Isolate* isolate = CcTest::i_isolate();
267  isolate->InitializeLoggingAndCounters();
268  Heap* heap = isolate->heap();
269  CHECK(isolate->heap()->ConfigureHeapDefault());
270 
271  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
272  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
273  heap->MaxExecutableSize()));
274 
275  int total_pages = 0;
276  OldSpace faked_space(heap,
277  heap->MaxReserved(),
280  Page* first_page = memory_allocator->AllocatePage(
281  faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
282 
283  first_page->InsertAfter(faked_space.anchor()->prev_page());
284  CHECK(first_page->is_valid());
285  CHECK(first_page->next_page() == faked_space.anchor());
286  total_pages++;
287 
288  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
289  CHECK(p->owner() == &faked_space);
290  }
291 
292  // Again, we should get n or n - 1 pages.
293  Page* other = memory_allocator->AllocatePage(
294  faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
295  CHECK(other->is_valid());
296  total_pages++;
297  other->InsertAfter(first_page);
298  int page_count = 0;
299  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
300  CHECK(p->owner() == &faked_space);
301  page_count++;
302  }
303  CHECK(total_pages == page_count);
304 
305  Page* second_page = first_page->next_page();
306  CHECK(second_page->is_valid());
307  memory_allocator->Free(first_page);
308  memory_allocator->Free(second_page);
309  memory_allocator->TearDown();
310  delete memory_allocator;
311 }
312 
313 
315  Isolate* isolate = CcTest::i_isolate();
316  isolate->InitializeLoggingAndCounters();
317  Heap* heap = isolate->heap();
318  CHECK(heap->ConfigureHeapDefault());
319  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
320  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
321  heap->MaxExecutableSize()));
322  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
323 
324  NewSpace new_space(heap);
325 
328  CHECK(new_space.HasBeenSetUp());
329 
330  while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
331  Object* obj =
332  new_space.AllocateRaw(Page::kMaxRegularHeapObjectSize)->
333  ToObjectUnchecked();
334  CHECK(new_space.Contains(HeapObject::cast(obj)));
335  }
336 
337  new_space.TearDown();
338  memory_allocator->TearDown();
339  delete memory_allocator;
340 }
341 
342 
344  Isolate* isolate = CcTest::i_isolate();
345  isolate->InitializeLoggingAndCounters();
346  Heap* heap = isolate->heap();
347  CHECK(heap->ConfigureHeapDefault());
348  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
349  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
350  heap->MaxExecutableSize()));
351  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
352 
353  OldSpace* s = new OldSpace(heap,
354  heap->MaxOldGenerationSize(),
357  CHECK(s != NULL);
358 
359  CHECK(s->SetUp());
360 
361  while (s->Available() > 0) {
362  s->AllocateRaw(Page::kMaxRegularHeapObjectSize)->ToObjectUnchecked();
363  }
364 
365  s->TearDown();
366  delete s;
367  memory_allocator->TearDown();
368  delete memory_allocator;
369 }
370 
371 
374 
376  CHECK(lo != NULL);
377 
378  int lo_size = Page::kPageSize;
379 
380  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
381  CHECK(obj->IsHeapObject());
382 
383  HeapObject* ho = HeapObject::cast(obj);
384 
385  CHECK(lo->Contains(HeapObject::cast(obj)));
386 
387  CHECK(lo->FindObject(ho->address()) == obj);
388 
389  CHECK(lo->Contains(ho));
390 
391  while (true) {
392  intptr_t available = lo->Available();
393  { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
394  if (!maybe_obj->ToObject(&obj)) break;
395  }
396  CHECK(lo->Available() < available);
397  };
398 
399  CHECK(!lo->IsEmpty());
400 
401  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
402 }
403 
404 
405 TEST(SizeOfFirstPageIsLargeEnough) {
406  if (i::FLAG_always_opt) return;
408  Isolate* isolate = CcTest::i_isolate();
409 
410  // Freshly initialized VM gets by with one page per space.
411  for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
412  CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
413  }
414 
415  // Executing the empty script gets by with one page per space.
416  HandleScope scope(isolate);
417  CompileRun("/*empty*/");
418  for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
419  CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
420  }
421 
422  // No large objects required to perform the above steps.
423  CHECK(isolate->heap()->lo_space()->IsEmpty());
424 }
byte * Address
Definition: globals.h:186
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if available(ARM only)") DEFINE_bool(enable_sudiv
#define CHECK_EQ(expected, value)
Definition: checks.h:252
intptr_t Available()
Definition: spaces.h:1783
int ReservedSemiSpaceSize()
Definition: heap.h:597
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:277
bool SetUp(const size_t requested_size)
Definition: spaces.cc:136
bool CommitArea(size_t requested)
Definition: spaces.cc:513
intptr_t MaxReserved()
Definition: heap.h:593
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:305
static HeapObject * cast(Object *obj)
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
Definition: spaces.cc:288
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
Definition: spaces.cc:586
static i::Heap * heap()
Definition: cctest.h:106
#define CHECK(condition)
Definition: checks.h:75
Address OffsetToAddress(int offset)
Definition: spaces.h:806
static const int kPageSize
Definition: spaces.h:814
unsigned int Pseudorandom()
Definition: test-alloc.cc:170
void Free(MemoryChunk *chunk)
Definition: spaces.cc:751
uint8_t byte
Definition: globals.h:185
intptr_t MaxOldGenerationSize()
Definition: heap.h:599
bool Contains(Address a)
Definition: spaces.h:2427
bool ConfigureHeapDefault()
Definition: heap.cc:6456
PagedSpace * paged_space(int idx)
Definition: heap.h:647
const int kPointerSize
Definition: globals.h:268
TestMemoryAllocatorScope(Isolate *isolate, MemoryAllocator *allocator)
Definition: test-spaces.cc:107
static int CodePageGuardSize()
Definition: spaces.cc:868
intptr_t Available()
Definition: spaces.h:2476
static const int kMaxRegularHeapObjectSize
Definition: spaces.h:820
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:359
T RoundUp(T x, intptr_t m)
Definition: utils.h:144
static i::Isolate * i_isolate()
Definition: cctest.h:102
size_t size() const
Definition: spaces.h:595
LargeObjectSpace * lo_space()
Definition: heap.h:646
MaybeObject * FindObject(Address a)
Definition: spaces.cc:3003
static const int kObjectStartOffset
Definition: spaces.h:592
TestCodeRangeScope(Isolate *isolate, CodeRange *code_range)
Definition: test-spaces.cc:128
bool Contains(HeapObject *obj)
Definition: spaces.cc:3083
Space * owner() const
Definition: spaces.h:332
static void InitializeVM()
Definition: cctest.h:116
static const intptr_t kAlignment
Definition: spaces.h:563
void InitializeLoggingAndCounters()
Definition: isolate.cc:1851
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:562
Page * next_page()
Definition: spaces-inl.h:238
static int CodePageGuardStartOffset()
Definition: spaces.cc:861
HeapObject * obj
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1220
void DeleteArray(T *array)
Definition: allocation.h:91
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2935
static intptr_t CommitPageSize()
intptr_t MaxExecutableSize()
Definition: heap.h:600
static bool Initialize()
Definition: api.cc:4967
const int MB
Definition: globals.h:246