v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
test-spaces.cc
Go to the documentation of this file.
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <stdlib.h>
29 
30 #include "v8.h"
31 #include "cctest.h"
32 
33 using namespace v8::internal;
34 
35 #if 0
36 static void VerifyRegionMarking(Address page_start) {
37 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
38  Page* p = Page::FromAddress(page_start);
39 
40  p->SetRegionMarks(Page::kAllRegionsCleanMarks);
41 
42  for (Address addr = p->ObjectAreaStart();
43  addr < p->ObjectAreaEnd();
44  addr += kPointerSize) {
45  CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
46  }
47 
48  for (Address addr = p->ObjectAreaStart();
49  addr < p->ObjectAreaEnd();
50  addr += kPointerSize) {
51  Page::FromAddress(addr)->MarkRegionDirty(addr);
52  }
53 
54  for (Address addr = p->ObjectAreaStart();
55  addr < p->ObjectAreaEnd();
56  addr += kPointerSize) {
57  CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
58  }
59 #endif
60 }
61 #endif
62 
63 
64 // TODO(gc) you can no longer allocate pages like this. Details are hidden.
65 #if 0
66 TEST(Page) {
67  byte* mem = NewArray<byte>(2*Page::kPageSize);
68  CHECK(mem != NULL);
69 
70  Address start = reinterpret_cast<Address>(mem);
71  Address page_start = RoundUp(start, Page::kPageSize);
72 
73  Page* p = Page::FromAddress(page_start);
74  // Initialized Page has heap pointer, normally set by memory_allocator.
75  p->heap_ = HEAP;
76  CHECK(p->address() == page_start);
77  CHECK(p->is_valid());
78 
79  p->opaque_header = 0;
80  p->SetIsLargeObjectPage(false);
81  CHECK(!p->next_page()->is_valid());
82 
83  CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
84  CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
85 
86  CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
88  CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
89 
90  CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
91  CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
92 
93  // test region marking
94  VerifyRegionMarking(page_start);
95 
96  DeleteArray(mem);
97 }
98 #endif
99 
100 
101 namespace v8 {
102 namespace internal {
103 
104 // Temporarily sets a given allocator in an isolate.
106  public:
108  : isolate_(isolate),
109  old_allocator_(isolate->memory_allocator_) {
110  isolate->memory_allocator_ = allocator;
111  }
112 
114  isolate_->memory_allocator_ = old_allocator_;
115  }
116 
117  private:
118  Isolate* isolate_;
119  MemoryAllocator* old_allocator_;
120 
122 };
123 
124 } } // namespace v8::internal
125 
126 
128  OS::SetUp();
129  Isolate* isolate = Isolate::Current();
130  isolate->InitializeLoggingAndCounters();
131  Heap* heap = isolate->heap();
132  CHECK(isolate->heap()->ConfigureHeapDefault());
133 
134  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
135  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
136  heap->MaxExecutableSize()));
137 
138  int total_pages = 0;
139  OldSpace faked_space(heap,
140  heap->MaxReserved(),
143  Page* first_page = memory_allocator->AllocatePage(
144  faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
145 
146  first_page->InsertAfter(faked_space.anchor()->prev_page());
147  CHECK(first_page->is_valid());
148  CHECK(first_page->next_page() == faked_space.anchor());
149  total_pages++;
150 
151  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
152  CHECK(p->owner() == &faked_space);
153  }
154 
155  // Again, we should get n or n - 1 pages.
156  Page* other = memory_allocator->AllocatePage(
157  faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
158  CHECK(other->is_valid());
159  total_pages++;
160  other->InsertAfter(first_page);
161  int page_count = 0;
162  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
163  CHECK(p->owner() == &faked_space);
164  page_count++;
165  }
166  CHECK(total_pages == page_count);
167 
168  Page* second_page = first_page->next_page();
169  CHECK(second_page->is_valid());
170  memory_allocator->Free(first_page);
171  memory_allocator->Free(second_page);
172  memory_allocator->TearDown();
173  delete memory_allocator;
174 }
175 
176 
178  OS::SetUp();
179  Isolate* isolate = Isolate::Current();
180  isolate->InitializeLoggingAndCounters();
181  Heap* heap = isolate->heap();
182  CHECK(heap->ConfigureHeapDefault());
183  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
184  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
185  heap->MaxExecutableSize()));
186  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
187 
188  NewSpace new_space(heap);
189 
190  CHECK(new_space.SetUp(HEAP->ReservedSemiSpaceSize(),
191  HEAP->ReservedSemiSpaceSize()));
192  CHECK(new_space.HasBeenSetUp());
193 
194  while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
195  Object* obj =
196  new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
197  ToObjectUnchecked();
198  CHECK(new_space.Contains(HeapObject::cast(obj)));
199  }
200 
201  new_space.TearDown();
202  memory_allocator->TearDown();
203  delete memory_allocator;
204 }
205 
206 
208  OS::SetUp();
209  Isolate* isolate = Isolate::Current();
210  isolate->InitializeLoggingAndCounters();
211  Heap* heap = isolate->heap();
212  CHECK(heap->ConfigureHeapDefault());
213  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
214  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
215  heap->MaxExecutableSize()));
216  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
217 
218  OldSpace* s = new OldSpace(heap,
219  heap->MaxOldGenerationSize(),
222  CHECK(s != NULL);
223 
224  CHECK(s->SetUp());
225 
226  while (s->Available() > 0) {
227  s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
228  }
229 
230  s->TearDown();
231  delete s;
232  memory_allocator->TearDown();
233  delete memory_allocator;
234 }
235 
236 
239 
240  LargeObjectSpace* lo = HEAP->lo_space();
241  CHECK(lo != NULL);
242 
243  int lo_size = Page::kPageSize;
244 
245  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
246  CHECK(obj->IsHeapObject());
247 
248  HeapObject* ho = HeapObject::cast(obj);
249 
250  CHECK(lo->Contains(HeapObject::cast(obj)));
251 
252  CHECK(lo->FindObject(ho->address()) == obj);
253 
254  CHECK(lo->Contains(ho));
255 
256  while (true) {
257  intptr_t available = lo->Available();
258  { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
259  if (!maybe_obj->ToObject(&obj)) break;
260  }
261  CHECK(lo->Available() < available);
262  };
263 
264  CHECK(!lo->IsEmpty());
265 
266  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
267 }
byte * Address
Definition: globals.h:157
intptr_t Available()
Definition: spaces.h:1506
MUST_USE_RESULT MaybeObject * AllocateRaw(int size_in_bytes)
Definition: spaces-inl.h:263
intptr_t MaxReserved()
Definition: heap.h:471
static MemoryChunk * FromAddress(Address a)
Definition: spaces.h:303
static HeapObject * cast(Object *obj)
#define CHECK(condition)
Definition: checks.h:56
Address OffsetToAddress(int offset)
Definition: spaces.h:703
static const int kPageSize
Definition: spaces.h:711
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if available(X64 only)") DEFINE_bool(enable_vfp3
uint8_t byte
Definition: globals.h:156
intptr_t MaxOldGenerationSize()
Definition: heap.h:477
bool Contains(Address a)
Definition: spaces.h:2122
bool ConfigureHeapDefault()
Definition: heap.cc:5903
const int kPointerSize
Definition: globals.h:220
TestMemoryAllocatorScope(Isolate *isolate, MemoryAllocator *allocator)
Definition: test-spaces.cc:107
intptr_t Available()
Definition: spaces.h:2162
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition: globals.h:307
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
static const int kMaxNonCodeHeapObjectSize
Definition: spaces.h:717
MaybeObject * FindObject(Address a)
Definition: spaces.cc:2702
static const int kObjectStartOffset
Definition: spaces.h:516
bool Contains(HeapObject *obj)
Definition: spaces.cc:2781
Space * owner() const
Definition: spaces.h:320
#define HEAP
Definition: isolate.h:1433
static void SetUp()
void InitializeLoggingAndCounters()
Definition: isolate.cc:1781
void InsertAfter(MemoryChunk *other)
Definition: spaces.cc:471
Page * next_page()
Definition: spaces-inl.h:224
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
Definition: flags.cc:301
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
Definition: spaces.cc:1046
void DeleteArray(T *array)
Definition: allocation.h:91
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
Definition: spaces.cc:2650
intptr_t MaxExecutableSize()
Definition: heap.h:478
static bool Initialize()
Definition: api.cc:4269