78 Initialize(reinterpret_cast<PagedSpace*>(owner),
87 void HeapObjectIterator::Initialize(
PagedSpace* space,
89 HeapObjectIterator::PageMode
mode,
104 bool HeapObjectIterator::AdvanceToNextPage() {
105 ASSERT(cur_addr_ == cur_end_);
106 if (page_mode_ == kOnePageOnly)
return false;
108 if (cur_addr_ ==
NULL) {
109 cur_page = space_->
anchor();
112 ASSERT(cur_addr_ == cur_page->area_end());
114 cur_page = cur_page->next_page();
115 if (cur_page == space_->
anchor())
return false;
116 cur_addr_ = cur_page->area_start();
117 cur_end_ = cur_page->area_end();
118 ASSERT(cur_page->WasSweptPrecisely());
132 current_allocation_block_index_(0) {
149 LOG(isolate_, NewEvent(
"CodeRange", code_range_->
address(), requested));
154 size_t size = code_range_->
size() - (aligned_base - base);
155 allocation_list_.
Add(FreeBlock(aligned_base, size));
156 current_allocation_block_index_ = 0;
161 int CodeRange::CompareFreeBlockAddress(
const FreeBlock* left,
162 const FreeBlock* right) {
166 return static_cast<int>(left->start - right->start);
170 void CodeRange::GetNextAllocationBlock(
size_t requested) {
171 for (current_allocation_block_index_++;
172 current_allocation_block_index_ < allocation_list_.length();
173 current_allocation_block_index_++) {
174 if (requested <= allocation_list_[current_allocation_block_index_].
size) {
180 free_list_.
AddAll(allocation_list_);
181 allocation_list_.Clear();
182 free_list_.
Sort(&CompareFreeBlockAddress);
183 for (
int i = 0; i < free_list_.length();) {
184 FreeBlock merged = free_list_[i];
187 while (i < free_list_.length() &&
188 free_list_[i].start == merged.start + merged.size) {
189 merged.size += free_list_[i].size;
192 if (merged.size > 0) {
193 allocation_list_.
Add(merged);
198 for (current_allocation_block_index_ = 0;
199 current_allocation_block_index_ < allocation_list_.length();
200 current_allocation_block_index_++) {
201 if (requested <= allocation_list_[current_allocation_block_index_].
size) {
212 const size_t commit_size,
214 ASSERT(commit_size <= requested_size);
215 ASSERT(current_allocation_block_index_ < allocation_list_.length());
216 if (requested_size > allocation_list_[current_allocation_block_index_].
size) {
219 GetNextAllocationBlock(requested_size);
223 FreeBlock current = allocation_list_[current_allocation_block_index_];
226 *allocated = current.size;
228 *allocated = aligned_requested;
230 ASSERT(*allocated <= current.size);
239 allocation_list_[current_allocation_block_index_].start += *allocated;
240 allocation_list_[current_allocation_block_index_].size -= *allocated;
241 if (*allocated == current.size) {
242 GetNextAllocationBlock(0);
244 return current.start;
254 return code_range_->
Uncommit(start, length);
260 free_list_.
Add(FreeBlock(address, length));
261 code_range_->
Uncommit(address, length);
269 allocation_list_.
Free();
280 capacity_executable_(0),
283 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
284 highest_ever_allocated_(reinterpret_cast<void*>(0)) {
291 ASSERT_GE(capacity_, capacity_executable_);
294 size_executable_ = 0;
306 capacity_executable_ = 0;
316 UpdateAllocatedSpaceLimits(base, base + size);
329 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
332 ASSERT(size_executable_ >= size);
333 size_executable_ -=
size;
337 static_cast<Address>(reservation->
address())));
350 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
353 ASSERT(size_executable_ >= size);
354 size_executable_ -=
size;
374 size_ += reservation.
size();
387 ASSERT(commit_size <= reserve_size);
400 if (reservation.
Commit(base, commit_size,
false)) {
401 UpdateAllocatedSpaceLimits(base, base + commit_size);
453 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
521 if (commit_size > committed_size) {
523 ASSERT(commit_size <=
size() - 2 * guard_size);
526 size_t length = commit_size - committed_size;
530 if (!
heap()->isolate()->memory_allocator()->CommitMemory(
531 start, length, executable)) {
543 }
else if (commit_size < committed_size) {
546 size_t length = committed_size - commit_size;
587 intptr_t commit_area_size,
590 ASSERT(commit_area_size <= reserve_area_size);
634 if (size_executable_ + chunk_size > capacity_executable_) {
636 StringEvent(
"MemoryAllocator::AllocateRawMemory",
637 "V8 Executable Allocation capacity exceeded"));
655 size_executable_ += chunk_size;
664 size_executable_ += reservation.
size();
673 area_end = area_start + commit_area_size;
692 area_end = area_start + commit_area_size;
697 isolate_->
counters()->memory_allocated()->
698 Increment(static_cast<int>(chunk_size));
700 LOG(isolate_, NewEvent(
"MemoryChunk", base, chunk_size));
747 return LargePage::Initialize(isolate_->
heap(), chunk);
752 LOG(isolate_, DeleteEvent(
"MemoryChunk", chunk));
779 if (!
CommitMemory(start, size, executable))
return false;
785 isolate_->
counters()->memory_allocated()->Increment(static_cast<int>(size));
792 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
807 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
808 MemoryAllocationCallbackRegistration registration =
809 memory_allocation_callbacks_[i];
810 if ((registration.space & space) == space &&
811 (registration.action & action) == action)
812 registration.callback(space, action, static_cast<int>(size));
819 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
820 if (memory_allocation_callbacks_[i].callback == callback)
return true;
831 MemoryAllocationCallbackRegistration registration(callback, space, action);
833 return memory_allocation_callbacks_.
Add(registration);
840 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
841 if (memory_allocation_callbacks_[i].callback == callback) {
842 memory_allocation_callbacks_.
Remove(i);
851 void MemoryAllocator::ReportStatistics() {
852 float pct =
static_cast<float>(capacity_ - size_) / capacity_;
855 ", available: %%%d\n\n",
856 capacity_, size_, static_cast<int>(pct*100));
890 size_t reserved_size) {
915 UpdateAllocatedSpaceLimits(start,
927 if (!chunk->
InNewSpace() && !
static_cast<Page*
>(chunk)->WasSwept()) {
928 static_cast<PagedSpace*
>(chunk->
owner())->IncrementUnsweptFreeBytes(-by);
938 intptr_t max_capacity,
941 :
Space(heap, id, executable),
943 was_swept_conservatively_(
false),
944 first_unswept_page_(
Page::FromAddress(
NULL)),
945 unswept_free_bytes_(0) {
975 while (iterator.has_next()) {
989 while (it.has_next()) {
990 size += it.next()->CommittedPhysicalMemory();
998 ASSERT(!
heap()->mark_compact_collector()->in_use());
1007 if ((cur <= addr) && (addr < next))
return obj;
1040 if (p ==
NULL)
return false;
1069 if (
heap()->isolate()->code_range()->exists()) {
1088 while (it.has_next()) {
1097 sizes->
huge_size_ = page->available_in_huge_free_list();
1098 sizes->
small_size_ = page->available_in_small_free_list();
1099 sizes->
medium_size_ = page->available_in_medium_free_list();
1100 sizes->
large_size_ = page->available_in_large_free_list();
1106 while (page_iterator.has_next()) {
1107 Page* page = page_iterator.next();
1167 void PagedSpace::Verify(ObjectVisitor* visitor) {
1171 bool allocation_pointer_found_in_space =
1174 while (page_iterator.has_next()) {
1175 Page* page = page_iterator.next();
1176 CHECK(page->owner() ==
this);
1178 allocation_pointer_found_in_space =
true;
1180 CHECK(page->WasSweptPrecisely());
1181 HeapObjectIterator it(page,
NULL);
1182 Address end_of_previous_object = page->area_start();
1185 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
1186 CHECK(end_of_previous_object <= object->address());
1190 Map*
map =
object->map();
1191 CHECK(map->IsMap());
1195 VerifyObject(
object);
1201 int size =
object->Size();
1202 object->IterateBody(map->instance_type(),
size, visitor);
1203 if (Marking::IsBlack(Marking::MarkBitFrom(
object))) {
1207 CHECK(object->address() + size <=
top);
1208 end_of_previous_object =
object->address() +
size;
1210 CHECK_LE(black_size, page->LiveBytes());
1212 CHECK(allocation_pointer_found_in_space);
1214 #endif // VERIFY_HEAP
1221 int maximum_semispace_capacity) {
1228 size_t size = 2 * reserved_semispace_capacity;
1231 size, size, &reservation_);
1232 if (base ==
NULL)
return false;
1235 chunk_size_ =
static_cast<uintptr_t
>(
size);
1236 LOG(
heap()->isolate(), NewEvent(
"InitialChunk", chunk_base_, chunk_size_));
1238 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1242 allocated_histogram_ = NewArray<HistogramInfo>(
LAST_TYPE + 1);
1243 promoted_histogram_ = NewArray<HistogramInfo>(
LAST_TYPE + 1);
1245 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1246 promoted_histogram_[name].set_name(#name);
1250 ASSERT(reserved_semispace_capacity ==
heap()->ReservedSemiSpaceSize());
1251 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1252 2 *
heap()->ReservedSemiSpaceSize());
1255 to_space_.
SetUp(chunk_base_,
1256 initial_semispace_capacity,
1257 maximum_semispace_capacity);
1258 from_space_.
SetUp(chunk_base_ + reserved_semispace_capacity,
1259 initial_semispace_capacity,
1260 maximum_semispace_capacity);
1261 if (!to_space_.
Commit()) {
1266 start_ = chunk_base_;
1267 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1269 object_expected_ =
reinterpret_cast<uintptr_t
>(start_) |
kHeapObjectTag;
1278 if (allocated_histogram_) {
1280 allocated_histogram_ =
NULL;
1282 if (promoted_histogram_) {
1284 promoted_histogram_ =
NULL;
1288 allocation_info_.set_top(
NULL);
1289 allocation_info_.set_limit(
NULL);
1294 LOG(
heap()->isolate(), DeleteEvent(
"InitialChunk", chunk_base_));
1313 if (to_space_.
GrowTo(new_capacity)) {
1315 if (!from_space_.
GrowTo(new_capacity)) {
1331 int rounded_new_capacity =
RoundUp(new_capacity, Page::kPageSize);
1332 if (rounded_new_capacity <
Capacity() &&
1333 to_space_.
ShrinkTo(rounded_new_capacity)) {
1335 from_space_.
Reset();
1336 if (!from_space_.
ShrinkTo(rounded_new_capacity)) {
1350 void NewSpace::UpdateAllocationInfo() {
1352 allocation_info_.set_top(to_space_.
page_low());
1353 allocation_info_.set_limit(to_space_.
page_high());
1361 UpdateAllocationInfo();
1364 NewSpacePageIterator it(&to_space_);
1365 while (it.has_next()) {
1372 if (
heap()->inline_allocation_disabled()) {
1375 Address new_top = allocation_info_.top() + size_in_bytes;
1376 allocation_info_.set_limit(
Min(new_top, high));
1379 allocation_info_.set_limit(to_space_.
page_high());
1383 Address new_top = allocation_info_.top() + size_in_bytes;
1384 Address new_limit = new_top + inline_allocation_limit_step_;
1385 allocation_info_.set_limit(
Min(new_limit, high));
1392 Address top = allocation_info_.top();
1414 int remaining_in_page =
static_cast<int>(limit -
top);
1417 UpdateAllocationInfo();
1423 MaybeObject* NewSpace::SlowAllocateRaw(
int size_in_bytes) {
1424 Address old_top = allocation_info_.top();
1426 if (allocation_info_.limit() < high) {
1430 Address new_top = old_top + size_in_bytes;
1431 int bytes_allocated =
static_cast<int>(new_top - top_on_previous_step_);
1435 top_on_previous_step_ = new_top;
1436 return AllocateRaw(size_in_bytes);
1439 int bytes_allocated =
static_cast<int>(old_top - top_on_previous_step_);
1442 top_on_previous_step_ = to_space_.
page_low();
1443 return AllocateRaw(size_in_bytes);
1453 void NewSpace::Verify() {
1462 while (current !=
top()) {
1472 Map* map =
object->map();
1473 CHECK(map->IsMap());
1477 CHECK(!object->IsMap());
1478 CHECK(!object->IsCode());
1484 VerifyPointersVisitor visitor;
1485 int size =
object->Size();
1486 object->IterateBody(map->instance_type(),
size, &visitor);
1493 CHECK(!page->is_anchor());
1494 current = page->area_start();
1501 from_space_.Verify();
1510 int initial_capacity,
1511 int maximum_capacity) {
1518 ASSERT(maximum_capacity >= Page::kPageSize);
1519 initial_capacity_ =
RoundDown(initial_capacity, Page::kPageSize);
1520 capacity_ = initial_capacity;
1521 maximum_capacity_ =
RoundDown(maximum_capacity, Page::kPageSize);
1522 maximum_committed_ = 0;
1525 address_mask_ = ~(maximum_capacity - 1);
1527 object_expected_ =
reinterpret_cast<uintptr_t
>(start) |
kHeapObjectTag;
1541 if (!
heap()->isolate()->memory_allocator()->CommitBlock(start_,
1548 for (
int i = 0; i < pages; i++) {
1550 NewSpacePage::Initialize(
heap(), start_ + i * Page::kPageSize,
this);
1555 SetCapacity(capacity_);
1564 Address start = start_ + maximum_capacity_ - capacity_;
1565 if (!
heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1580 while (it.has_next()) {
1581 size += it.next()->CommittedPhysicalMemory();
1589 if (!
Commit())
return false;
1592 ASSERT(new_capacity <= maximum_capacity_);
1593 ASSERT(new_capacity > capacity_);
1597 size_t delta = new_capacity - capacity_;
1600 if (!
heap()->isolate()->memory_allocator()->CommitBlock(
1604 SetCapacity(new_capacity);
1606 ASSERT(last_page != anchor());
1607 for (
int i = pages_before; i < pages_after; i++) {
1617 last_page = new_page;
1625 ASSERT(new_capacity >= initial_capacity_);
1626 ASSERT(new_capacity < capacity_);
1628 size_t delta = capacity_ - new_capacity;
1632 if (!allocator->
UncommitBlock(start_ + new_capacity, delta)) {
1641 ASSERT((current_page_ >=
first_page()) && (current_page_ <= new_last_page));
1644 SetCapacity(new_capacity);
1650 void SemiSpace::FlipPages(intptr_t
flags, intptr_t mask) {
1660 while (page != &anchor_) {
1663 if (becomes_to_space) {
1664 page->
ClearFlag(MemoryChunk::IN_FROM_SPACE);
1665 page->
SetFlag(MemoryChunk::IN_TO_SPACE);
1669 page->
SetFlag(MemoryChunk::IN_FROM_SPACE);
1670 page->
ClearFlag(MemoryChunk::IN_TO_SPACE);
1674 page->
IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1703 from->FlipPages(0, 0);
1707 void SemiSpace::SetCapacity(
int new_capacity) {
1708 capacity_ = new_capacity;
1709 if (capacity_ > maximum_committed_) {
1710 maximum_committed_ = capacity_;
1720 while (it.has_next()) {
1731 void SemiSpace::Verify() {
1733 NewSpacePage* page = anchor_.
next_page();
1735 while (page != &anchor_) {
1736 CHECK(page->semi_space() ==
this);
1737 CHECK(page->InNewSpace());
1738 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1739 : MemoryChunk::IN_TO_SPACE));
1740 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1741 : MemoryChunk::IN_FROM_SPACE));
1743 if (!is_from_space) {
1746 if (page->heap()->incremental_marking()->IsMarking()) {
1749 CHECK(!page->IsFlagSet(
1756 CHECK(page->prev_page()->next_page() == page);
1757 page = page->next_page();
1768 CHECK_EQ(space, end_page->semi_space());
1772 if (page == end_page) {
1773 CHECK(start <= end);
1775 while (page != end_page) {
1776 page = page->next_page();
1793 Initialize(space->
bottom(), space->
top(), size_func);
1798 Initialize(start, space->
top(),
NULL);
1803 Initialize(from, to,
NULL);
1807 void SemiSpaceIterator::Initialize(
Address start,
1813 size_func_ = size_func;
1819 static void ClearHistograms(
Isolate* isolate) {
1821 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1823 #undef DEF_TYPE_NAME
1825 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1827 #undef CLEAR_HISTOGRAM
1829 isolate->js_spill_information()->Clear();
1833 static void ClearCodeKindStatistics(
int* code_kind_statistics) {
1835 code_kind_statistics[i] = 0;
1840 static void ReportCodeKindStatistics(
int* code_kind_statistics) {
1841 PrintF(
"\n Code kind histograms: \n");
1843 if (code_kind_statistics[i] > 0) {
1844 PrintF(
" %-20s: %10d bytes\n",
1846 code_kind_statistics[i]);
1853 static int CollectHistogramInfo(HeapObject*
obj) {
1854 Isolate* isolate = obj->GetIsolate();
1857 ASSERT(isolate->heap_histograms()[type].name() !=
NULL);
1858 isolate->heap_histograms()[type].increment_number(1);
1859 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1861 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1863 isolate->js_spill_information());
1870 static void ReportHistogram(Isolate* isolate,
bool print_spill) {
1871 PrintF(
"\n Object Histogram:\n");
1873 if (isolate->heap_histograms()[i].number() > 0) {
1874 PrintF(
" %-34s%10d (%10d bytes)\n",
1875 isolate->heap_histograms()[i].name(),
1876 isolate->heap_histograms()[i].number(),
1877 isolate->heap_histograms()[i].bytes());
1883 int string_number = 0;
1884 int string_bytes = 0;
1885 #define INCREMENT(type, size, name, camel_name) \
1886 string_number += isolate->heap_histograms()[type].number(); \
1887 string_bytes += isolate->heap_histograms()[type].bytes();
1890 if (string_number > 0) {
1891 PrintF(
" %-34s%10d (%10d bytes)\n\n",
"STRING_TYPE", string_number,
1895 if (FLAG_collect_heap_spill_statistics && print_spill) {
1896 isolate->js_spill_information()->Print();
1905 allocated_histogram_[i].clear();
1906 promoted_histogram_[i].clear();
1922 static void DoReportStatistics(
Isolate* isolate,
1924 LOG(isolate, HeapSampleBeginEvent(
"NewSpace", description));
1926 int string_number = 0;
1927 int string_bytes = 0;
1928 #define INCREMENT(type, size, name, camel_name) \
1929 string_number += info[type].number(); \
1930 string_bytes += info[type].bytes();
1933 if (string_number > 0) {
1935 HeapSampleItemEvent(
"STRING_TYPE", string_number, string_bytes));
1940 if (info[i].number() > 0) {
1942 HeapSampleItemEvent(info[i].
name(), info[i].number(),
1946 LOG(isolate, HeapSampleEndEvent(
"NewSpace", description));
1952 if (FLAG_heap_stats) {
1957 PrintF(
"\n Object Histogram:\n");
1959 if (allocated_histogram_[i].number() > 0) {
1960 PrintF(
" %-34s%10d (%10d bytes)\n",
1961 allocated_histogram_[i].
name(),
1962 allocated_histogram_[i].number(),
1963 allocated_histogram_[i].bytes());
1972 DoReportStatistics(isolate, allocated_histogram_,
"allocated");
1973 DoReportStatistics(isolate, promoted_histogram_,
"promoted");
1981 allocated_histogram_[type].increment_number(1);
1982 allocated_histogram_[type].increment_bytes(obj->
Size());
1989 promoted_histogram_[type].increment_number(1);
1990 promoted_histogram_[type].increment_bytes(obj->
Size());
2009 ASSERT(size_in_bytes > 0);
2023 this_as_free_space->
set_size(size_in_bytes);
2038 if (
map() ==
GetHeap()->raw_unchecked_free_space_map()) {
2051 if (
map() ==
GetHeap()->raw_unchecked_free_space_map()) {
2065 if (
map() ==
GetHeap()->raw_unchecked_free_space_map()) {
2077 intptr_t free_bytes = 0;
2078 if (category->
top() !=
NULL) {
2082 LockGuard<Mutex> target_lock_guard(
mutex());
2083 LockGuard<Mutex> source_lock_guard(category->
mutex());
2087 end_ = category->
end();
2111 while (*n !=
NULL) {
2114 sum += free_space->
Size();
2131 while (node !=
NULL) {
2133 node = node->
next();
2144 while (node !=
NULL &&
2146 available_ -=
reinterpret_cast<FreeSpace*
>(node)->Size();
2147 node = node->
next();
2152 *node_size =
reinterpret_cast<FreeSpace*
>(node)->Size();
2153 available_ -= *node_size;
2169 if (node !=
NULL && *node_size < size_in_bytes) {
2170 Free(node, *node_size);
2184 available_ += size_in_bytes;
2192 if (*map_location ==
NULL) {
2193 *map_location = heap->free_space_map();
2195 ASSERT(*map_location == heap->free_space_map());
2203 : owner_(owner), heap_(owner->heap()) {
2209 intptr_t free_bytes = 0;
2219 small_list_.
Reset();
2220 medium_list_.
Reset();
2221 large_list_.
Reset();
2227 if (size_in_bytes == 0)
return 0;
2230 node->
set_size(heap_, size_in_bytes);
2234 if (size_in_bytes < kSmallListMin) {
2235 page->add_non_available_small_blocks(size_in_bytes);
2236 return size_in_bytes;
2241 if (size_in_bytes <= kSmallListMax) {
2242 small_list_.
Free(node, size_in_bytes);
2243 page->add_available_in_small_free_list(size_in_bytes);
2244 }
else if (size_in_bytes <= kMediumListMax) {
2245 medium_list_.
Free(node, size_in_bytes);
2246 page->add_available_in_medium_free_list(size_in_bytes);
2247 }
else if (size_in_bytes <= kLargeListMax) {
2248 large_list_.
Free(node, size_in_bytes);
2249 page->add_available_in_large_free_list(size_in_bytes);
2251 huge_list_.
Free(node, size_in_bytes);
2252 page->add_available_in_huge_free_list(size_in_bytes);
2260 FreeListNode* FreeList::FindNodeFor(
int size_in_bytes,
int* node_size) {
2264 if (size_in_bytes <= kSmallAllocationMax) {
2267 ASSERT(size_in_bytes <= *node_size);
2269 page->add_available_in_small_free_list(-(*node_size));
2275 if (size_in_bytes <= kMediumAllocationMax) {
2278 ASSERT(size_in_bytes <= *node_size);
2280 page->add_available_in_medium_free_list(-(*node_size));
2286 if (size_in_bytes <= kLargeAllocationMax) {
2289 ASSERT(size_in_bytes <= *node_size);
2291 page->add_available_in_large_free_list(-(*node_size));
2297 int huge_list_available = huge_list_.
available();
2298 FreeListNode* top_node = huge_list_.
top();
2299 for (FreeListNode** cur = &top_node;
2301 cur = (*cur)->next_address()) {
2302 FreeListNode* cur_node = *cur;
2303 while (cur_node !=
NULL &&
2305 int size =
reinterpret_cast<FreeSpace*
>(cur_node)->Size();
2306 huge_list_available -=
size;
2308 page->add_available_in_huge_free_list(-size);
2309 cur_node = cur_node->next();
2313 if (cur_node ==
NULL) {
2318 ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2319 FreeSpace* cur_as_free_space =
reinterpret_cast<FreeSpace*
>(*cur);
2320 int size = cur_as_free_space->Size();
2321 if (size >= size_in_bytes) {
2324 *cur = node->
next();
2326 huge_list_available -=
size;
2328 page->add_available_in_huge_free_list(-size);
2334 if (huge_list_.
top() ==
NULL) {
2344 if (size_in_bytes <= kSmallListMax) {
2347 ASSERT(size_in_bytes <= *node_size);
2349 page->add_available_in_small_free_list(-(*node_size));
2351 }
else if (size_in_bytes <= kMediumListMax) {
2354 ASSERT(size_in_bytes <= *node_size);
2356 page->add_available_in_medium_free_list(-(*node_size));
2358 }
else if (size_in_bytes <= kLargeListMax) {
2361 ASSERT(size_in_bytes <= *node_size);
2363 page->add_available_in_large_free_list(-(*node_size));
2377 ASSERT(0 < size_in_bytes);
2378 ASSERT(size_in_bytes <= kMaxBlockSize);
2383 int old_linear_size =
static_cast<int>(owner_->
limit() - owner_->
top());
2387 owner_->
Free(owner_->
top(), old_linear_size);
2390 size_in_bytes - old_linear_size);
2392 int new_node_size = 0;
2393 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2394 if (new_node ==
NULL) {
2399 int bytes_left = new_node_size - size_in_bytes;
2403 for (
int i = 0; i < size_in_bytes /
kPointerSize; i++) {
2412 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2423 owner_->
Free(new_node->
address() + size_in_bytes, bytes_left);
2425 }
else if (bytes_left > kThreshold &&
2427 FLAG_incremental_marking_steps) {
2432 owner_->
Free(new_node->
address() + size_in_bytes + linear_size,
2433 new_node_size - size_in_bytes - linear_size);
2435 new_node->
address() + size_in_bytes + linear_size);
2436 }
else if (bytes_left > 0) {
2440 new_node->
address() + new_node_size);
2453 p->set_available_in_huge_free_list(0);
2455 if (sum < p->area_size()) {
2459 p->set_available_in_small_free_list(0);
2460 p->set_available_in_medium_free_list(0);
2461 p->set_available_in_large_free_list(0);
2485 intptr_t FreeListCategory::SumFreeList() {
2488 while (cur !=
NULL) {
2491 sum += cur_as_free_space->
Size();
2498 static const int kVeryLongFreeList = 500;
2501 int FreeListCategory::FreeListLength() {
2503 FreeListNode* cur =
top();
2504 while (cur !=
NULL) {
2507 if (length == kVeryLongFreeList)
return length;
2513 bool FreeList::IsVeryLong() {
2514 if (small_list_.FreeListLength() == kVeryLongFreeList)
return true;
2515 if (medium_list_.FreeListLength() == kVeryLongFreeList)
return true;
2516 if (large_list_.FreeListLength() == kVeryLongFreeList)
return true;
2517 if (huge_list_.FreeListLength() == kVeryLongFreeList)
return true;
2525 intptr_t FreeList::SumFreeLists() {
2526 intptr_t sum = small_list_.SumFreeList();
2527 sum += medium_list_.SumFreeList();
2528 sum += large_list_.SumFreeList();
2529 sum += huge_list_.SumFreeList();
2552 if (FLAG_gc_verbose) {
2554 reinterpret_cast<intptr_t>(p));
2586 intptr_t freed_bytes = 0;
2591 if (FLAG_gc_verbose) {
2593 reinterpret_cast<intptr_t>(p));
2597 MarkCompactCollector::
2598 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2602 }
while (p !=
anchor() && freed_bytes < bytes_to_sweep);
2620 IsEvacuationCandidate()) {
2656 const int kMaxSweepingTries = 5;
2657 bool sweeping_complete =
false;
2659 for (
int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
2664 if (
object !=
NULL)
return object;
2670 if (!
heap()->always_allocate() &&
2671 heap()->OldGenerationAllocationLimitReached()) {
2688 if (
object !=
NULL)
return object;
2697 void PagedSpace::ReportCodeStatistics(
Isolate* isolate) {
2698 CommentStatistic* comments_statistics =
2699 isolate->paged_space_comments_statistics();
2700 ReportCodeKindStatistics(isolate->code_kind_statistics());
2701 PrintF(
"Code comment statistics (\" [ comment-txt : size/ "
2702 "count (average)\"):\n");
2703 for (
int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2704 const CommentStatistic&
cs = comments_statistics[i];
2706 PrintF(
" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2714 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2715 CommentStatistic* comments_statistics =
2716 isolate->paged_space_comments_statistics();
2717 ClearCodeKindStatistics(isolate->code_kind_statistics());
2718 for (
int i = 0; i < CommentStatistic::kMaxComments; i++) {
2719 comments_statistics[i].Clear();
2721 comments_statistics[CommentStatistic::kMaxComments].comment =
"Unknown";
2722 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2723 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2729 static void EnterComment(Isolate* isolate,
const char* comment,
int delta) {
2730 CommentStatistic* comments_statistics =
2731 isolate->paged_space_comments_statistics();
2733 if (delta <= 0)
return;
2734 CommentStatistic*
cs = &comments_statistics[CommentStatistic::kMaxComments];
2737 for (
int i = 0; i < CommentStatistic::kMaxComments; i++) {
2738 if (comments_statistics[i].comment ==
NULL) {
2739 cs = &comments_statistics[i];
2740 cs->comment = comment;
2742 }
else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2743 cs = &comments_statistics[i];
2755 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2757 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2758 const char* tmp =
reinterpret_cast<const char*
>(it->rinfo()->data());
2759 if (tmp[0] !=
'[') {
2765 const char*
const comment_txt =
2766 reinterpret_cast<const char*
>(it->rinfo()->data());
2767 const byte* prev_pc = it->rinfo()->pc();
2774 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2775 const char*
const txt =
2776 reinterpret_cast<const char*
>(it->rinfo()->data());
2777 flat_delta +=
static_cast<int>(it->rinfo()->pc() - prev_pc);
2778 if (txt[0] ==
']')
break;
2780 CollectCommentStatistics(isolate, it);
2782 prev_pc = it->rinfo()->pc();
2786 EnterComment(isolate, comment_txt, flat_delta);
2793 void PagedSpace::CollectCodeStatistics() {
2795 HeapObjectIterator obj_it(
this);
2796 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
2797 if (obj->IsCode()) {
2799 isolate->code_kind_statistics()[code->kind()] += code->Size();
2800 RelocIterator it(code);
2802 const byte* prev_pc = code->instruction_start();
2803 while (!it.done()) {
2804 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2805 delta +=
static_cast<int>(it.rinfo()->pc() - prev_pc);
2806 CollectCommentStatistics(isolate, &it);
2807 prev_pc = it.rinfo()->pc();
2812 ASSERT(code->instruction_start() <= prev_pc &&
2813 prev_pc <= code->instruction_end());
2814 delta +=
static_cast<int>(code->instruction_end() - prev_pc);
2815 EnterComment(isolate,
"NoComment", delta);
2821 void PagedSpace::ReportStatistics() {
2829 ClearHistograms(
heap()->isolate());
2830 HeapObjectIterator obj_it(
this);
2831 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next())
2832 CollectHistogramInfo(obj);
2833 ReportHistogram(
heap()->isolate(),
true);
2845 CHECK(object->IsMap());
2856 CHECK(object->IsCell());
2861 CHECK(object->IsPropertyCell());
2869 current_ = space->first_page_;
2876 current_ = space->first_page_;
2877 size_func_ = size_func;
2892 static bool ComparePointers(
void* key1,
void* key2) {
2893 return key1 == key2;
2898 intptr_t max_capacity,
2901 max_capacity_(max_capacity),
2906 chunk_map_(ComparePointers, 1024) {}
2912 maximum_committed_ = 0;
2921 while (first_page_ !=
NULL) {
2924 LOG(
heap()->isolate(), DeleteEvent(
"LargeObjectChunk", page->
address()));
2939 if (!
heap()->always_allocate() &&
2940 heap()->OldGenerationAllocationLimitReached()) {
2944 if (
Size() + object_size > max_capacity_) {
2949 AllocateLargePage(object_size,
this, executable);
2953 size_ +=
static_cast<int>(page->
size());
2954 objects_size_ += object_size;
2959 if (size_ > maximum_committed_) {
2960 maximum_committed_ = size_;
2967 for (uintptr_t key = base; key <= limit; key++) {
2968 HashMap::Entry* entry = chunk_map_.
Lookup(reinterpret_cast<void*>(key),
2969 static_cast<uint32_t>(key),
2972 entry->value = page;
2980 reinterpret_cast<Object**
>(
object->address())[0] =
2981 heap()->fixed_array_map();
2994 while (current !=
NULL) {
3014 HashMap::Entry* e = chunk_map_.
Lookup(reinterpret_cast<void*>(key),
3015 static_cast<uint32_t>(key),
3032 while (current !=
NULL) {
3036 bool is_pointer_object =
object->IsFixedArray();
3037 MarkBit mark_bit = Marking::MarkBitFrom(
object);
3038 if (mark_bit.
Get()) {
3048 if (previous ==
NULL) {
3049 first_page_ = current;
3056 object,
heap()->isolate());
3057 size_ -=
static_cast<int>(page->
size());
3058 objects_size_ -=
object->Size();
3065 uintptr_t base =
reinterpret_cast<uintptr_t
>(page)/alignment;
3066 uintptr_t limit = base + (page->
size()-1)/alignment;
3067 for (uintptr_t key = base; key <= limit; key++) {
3068 chunk_map_.
Remove(reinterpret_cast<void*>(key),
3069 static_cast<uint32_t>(key));
3072 if (is_pointer_object) {
3084 Address address =
object->address();
3087 bool owned = (chunk->
owner() ==
this);
3098 void LargeObjectSpace::Verify() {
3101 chunk = chunk->next_page()) {
3110 Map* map =
object->
map();
3111 CHECK(map->IsMap());
3117 CHECK(object->IsCode() ||
object->IsSeqString() ||
3118 object->IsExternalString() ||
object->IsFixedArray() ||
3119 object->IsFixedDoubleArray() ||
object->IsByteArray());
3125 if (object->IsCode()) {
3127 object->IterateBody(map->instance_type(),
3130 }
else if (object->IsFixedArray()) {
3132 for (
int j = 0; j < array->length(); j++) {
3133 Object* element = array->get(j);
3134 if (element->IsHeapObject()) {
3137 CHECK(element_object->map()->IsMap());
3149 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
3155 void LargeObjectSpace::ReportStatistics() {
3156 PrintF(
" size: %" V8_PTR_PREFIX
"d\n", size_);
3157 int num_objects = 0;
3158 ClearHistograms(
heap()->isolate());
3160 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
3162 CollectHistogramInfo(obj);
3165 PrintF(
" number of objects %d, "
3166 "size of objects %" V8_PTR_PREFIX
"d\n", num_objects, objects_size_);
3167 if (num_objects > 0) ReportHistogram(
heap()->isolate(),
false);
3171 void LargeObjectSpace::CollectCodeStatistics() {
3174 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
3175 if (obj->IsCode()) {
3177 isolate->code_kind_statistics()[code->kind()] += code->Size();
3185 PrintF(
"Page@%p in %s\n",
3188 printf(
" --------------------------------------\n");
3189 HeapObjectIterator objects(
this,
heap()->GcSafeSizeOfOldObjectFunction());
3190 unsigned mark_size = 0;
3191 for (HeapObject*
object = objects.Next();
3193 object = objects.Next()) {
3194 bool is_marked = Marking::MarkBitFrom(
object).Get();
3195 PrintF(
" %c ", (is_marked ?
'!' :
' '));
3199 object->ShortPrint();
3202 printf(
" --------------------------------------\n");
3203 printf(
" Marked: %x, LiveCount: %x\n", mark_size,
LiveBytes());
static const int kHeaderSize
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
size_t CommittedPhysicalMemory()
bool CommitRawMemory(Address start, size_t length)
#define SLOW_ASSERT(condition)
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
void WaitUntilSweepingCompleted()
#define CHECK_EQ(expected, value)
intptr_t Concatenate(FreeListCategory *category)
bool AreSweeperThreadsActivated()
void set_next_page(Page *page)
MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory *vm, Address start, size_t commit_size, size_t reserved_size)
void ZapBlock(Address start, size_t size)
bool GrowTo(int new_capacity)
size_t CommittedPhysicalMemory()
void RepairFreeList(Heap *heap)
#define MSAN_MEMORY_IS_INITIALIZED(p, s)
void RepairFreeListsAfterBoot()
FreeListCategory * medium_list()
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
#define INSTANCE_TYPE_LIST(V)
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
void set_size(Heap *heap, int size_in_bytes)
bool Contains(Address addr)
friend class PageIterator
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf map
bool was_swept_conservatively()
bool IsMarkingIncomplete()
void RepairLists(Heap *heap)
void set_next(FreeListNode *next)
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
void PrepareForMarkCompact()
bool SetUp(const size_t requested_size)
bool CommitArea(size_t requested)
static Smi * FromInt(int value)
#define LOG(isolate, Call)
virtual void VerifyObject(HeapObject *obj)
intptr_t EvictFreeListItemsInList(Page *p)
Page * first_unswept_page_
virtual void VerifyObject(HeapObject *obj)
static bool ShouldBeSweptLazily(Page *p)
static MemoryChunk * FromAddress(Address a)
LargeObjectIterator(LargeObjectSpace *space)
static HeapObject * cast(Object *obj)
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
void TakeControl(VirtualMemory *from)
void ResetAllocationInfo()
void SetNewSpacePageFlags(NewSpacePage *chunk)
bool UncommitRawMemory(Address start, size_t length)
intptr_t SizeOfFirstPage()
static const intptr_t kPageAlignmentMask
intptr_t available_in_small_free_list_
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
static void Clear(MemoryChunk *chunk)
static Failure * Exception()
PromotionQueue * promotion_queue()
bool IsConcurrentSweepingInProgress()
intptr_t inline_allocation_limit_step()
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
const char * AllocationSpaceName(AllocationSpace space)
static const int kWriteBarrierCounterGranularity
#define ASSERT(condition)
void set_reserved_memory(VirtualMemory *reservation)
#define ASSERT_GE(v1, v2)
MemoryChunk * AllocateChunk(intptr_t reserve_area_size, intptr_t commit_area_size, Executability executable, Space *space)
void Step(intptr_t allocated, CompletionAction action)
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
static const int kFlagsOffset
static void UpdateHighWaterMark(Address mark)
void RecordAllocation(HeapObject *obj)
NewSpacePage * current_page()
#define INCREMENT(type, size, name, camel_name)
MemoryAllocator(Isolate *isolate)
#define STRING_TYPE_LIST(V)
void FreeUnmarkedObjects()
static const int kPageSize
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
CodeRange(Isolate *isolate)
static Code * cast(Object *obj)
const intptr_t kHeapObjectTagMask
void FreeMemory(VirtualMemory *reservation, Executability executable)
static bool IsAtEnd(Address addr)
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
static bool HasLazyCommits()
bool CommitMemory(Address addr, size_t size, Executability executable)
bool ContainsPageFreeListItems(Page *p)
AllocationStats accounting_stats_
void Free(MemoryChunk *chunk)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Executability executable()
NewSpacePage * first_page()
bool was_swept_conservatively_
SlotsBuffer * slots_buffer_
size_t CommittedPhysicalMemory()
void ReleasePage(Page *page, bool unlink)
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
bool Guard(void *address)
void set_available(int available)
FreeList(PagedSpace *owner)
FreeListCategory * small_list()
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, size_t alignment, Executability executable, VirtualMemory *controller)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long mode(MIPS only)") DEFINE_string(expose_natives_as
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
int(* HeapObjectCallback)(HeapObject *obj)
LargePage * FindPage(Address a)
virtual void VerifyObject(HeapObject *obj)
intptr_t non_available_small_blocks_
FreeListNode ** next_address()
bool AdvanceSweeper(intptr_t bytes_to_sweep)
intptr_t CommittedMemory()
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
#define OFFSET_OF(type, field)
FreeListNode * PickNodeFromList(int *node_size)
intptr_t CommittedMemory()
void EmptyAllocationInfo()
intptr_t Concatenate(FreeList *free_list)
static NewSpacePage * FromAddress(Address address_in_page)
friend class NewSpacePageIterator
bool UncommitBlock(Address start, size_t size)
static Failure * RetryAfterGC()
static int CodePageAreaStartOffset()
void EvictEvacuationCandidatesFromFreeLists()
virtual int RoundSizeDownToObjectAlignment(int size)
PropertyCellSpace * property_cell_space()
MemoryAllocator * memory_allocator()
static Address & Address_at(Address addr)
void QueueMemoryChunkForFree(MemoryChunk *chunk)
static int CodePageGuardSize()
bool IsAligned(T value, U alignment)
void InitializeReservedMemory()
virtual intptr_t SizeOfObjects()
size_t CommittedPhysicalMemory()
void decrement_scan_on_scavenge_pages()
bool Commit(void *address, size_t size, bool is_executable)
intptr_t EvictFreeListItems(Page *p)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
void DecreaseUnsweptFreeBytes(Page *p)
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
static const char * Kind2String(Kind kind)
bool inline_allocation_disabled()
int Free(Address start, int size_in_bytes)
void initialize_scan_on_scavenge(bool scan)
OldSpace * old_pointer_space()
T RoundUp(T x, intptr_t m)
intptr_t available_in_large_free_list_
bool ContainsPageFreeListItemsInList(Page *p)
void IncreaseCapacity(int size)
void set_top(FreeListNode *top)
void RecordPromotion(HeapObject *obj)
bool contains(Address address)
void set_prev_page(Page *page)
static int CodePageAreaEndOffset()
int Free(Address start, int size_in_bytes)
#define CHECK_NE(unexpected, value)
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void set_map_no_write_barrier(Map *value)
MaybeObject * FindObject(Address a)
FreeListCategory * large_list()
VirtualMemory reservation_
FreeListCategory * huge_list()
static const int kObjectStartOffset
void set_prev_page(NewSpacePage *page)
bool Contains(HeapObject *obj)
static void Swap(SemiSpace *from, SemiSpace *to)
void InitializeAsAnchor(PagedSpace *owner)
int InitialSemiSpaceSize()
static bool ReleaseRegion(void *base, size_t size)
SemiSpaceIterator(NewSpace *space)
static const intptr_t kLiveBytesOffset
static bool CommitRegion(void *base, size_t size, bool is_executable)
void set_parallel_sweeping(ParallelSweepingState state)
void set_next_page(NewSpacePage *page)
void Sort(int(*cmp)(const T *x, const T *y))
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
static NewSpacePage * FromLimit(Address address_limit)
IncrementalMarking * incremental_marking()
void IncrementLiveBytes(int by)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
intptr_t CommittedMemory()
void SetTopAndLimit(Address top, Address limit)
static FreeListNode * FromAddress(Address address)
SemiSpace(Heap *heap, SemiSpaceId semispace)
NewSpacePage * next_page() const
bool IsLazySweepingComplete()
void Free(FreeListNode *node, int size_in_bytes)
static bool UncommitRegion(void *base, size_t size)
static const intptr_t kAlignment
void set_owner(Space *space)
void * Remove(void *key, uint32_t hash)
void RememberUnmappedPage(Address page, bool compacted)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t *allocated)
static void IncrementLiveBytesFromMutator(Address address, int by)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
LargePage * next_page() const
static const intptr_t kCopyOnFlipFlagsMask
#define ASSERT_EQ(v1, v2)
intptr_t RefillFreeLists(PagedSpace *space)
void set_prev_chunk(MemoryChunk *prev)
InstanceType instance_type()
friend class LargeObjectIterator
bool IsEvacuationCandidate()
static bool ShouldZapGarbage()
static HeapObject * FromAddress(Address address)
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
T RoundDown(T x, intptr_t m)
static FixedArray * cast(Object *obj)
void InsertAfter(MemoryChunk *other)
void set_next_page(LargePage *page)
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
void UpdateInlineAllocationLimit(int size_in_bytes)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
static size_t AllocateAlignment()
intptr_t write_barrier_counter_
void set_age_mark(Address mark)
static int CodePageGuardStartOffset()
NewSpacePage * prev_page() const
void CreateFillerObjectAt(Address addr, int size)
FreeListNode * end() const
intptr_t unswept_free_bytes_
bool CommitBlock(Address start, size_t size, Executability executable)
AllocationInfo allocation_info_
static const intptr_t kAllocatedThreshold
void FreeRawMemory(Address buf, size_t length)
FreeListNode * top() const
Executability executable()
void ResetFreeListStatistics()
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
static bool IsFreeListNode(HeapObject *object)
size_t CommittedPhysicalMemory()
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
void DeleteArray(T *array)
static bool IsAtStart(Address addr)
bool EnsureSweeperProgress(intptr_t size_in_bytes)
void OldSpaceStep(intptr_t allocated)
intptr_t available_in_medium_free_list_
bool ShrinkTo(int new_capacity)
void set_end(FreeListNode *end)
HeapObjectIterator(PagedSpace *space)
void ResetFreeListStatistics()
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
static intptr_t CommitPageSize()
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void SetFlags(intptr_t flags, intptr_t mask)
MemoryChunk * prev_chunk() const
intptr_t available_in_huge_free_list_
void set_next_chunk(MemoryChunk *next)
static JSObject * cast(Object *obj)
VirtualMemory * reserved_memory()
SlotsBuffer * slots_buffer()
OldSpace * old_data_space()
static void AssertValidRange(Address from, Address to)
MarkCompactCollector * mark_compact_collector()
void AddAll(const List< T, AllocationPolicy > &other, AllocationPolicy allocator=AllocationPolicy())
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
void ObtainFreeListStatistics(Page *p, SizeStats *sizes)
MUST_USE_RESULT HeapObject * Allocate(int size_in_bytes)
AllocationSpace identity()
bool Uncommit(void *address, size_t size)
bool sequential_sweeping() const
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)
MemoryChunk * next_chunk() const