73 owner ==
HEAP->old_data_space() ||
74 owner ==
HEAP->map_space() ||
75 owner ==
HEAP->cell_space() ||
76 owner ==
HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
86 void HeapObjectIterator::Initialize(
PagedSpace* space,
88 HeapObjectIterator::PageMode mode,
103 bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly)
return false;
107 if (cur_addr_ ==
NULL) {
108 cur_page = space_->
anchor();
111 ASSERT(cur_addr_ == cur_page->area_end());
113 cur_page = cur_page->next_page();
114 if (cur_page == space_->
anchor())
return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
131 current_allocation_block_index_(0) {
148 LOG(isolate_, NewEvent(
"CodeRange", code_range_->
address(), requested));
153 size_t size = code_range_->
size() - (aligned_base - base);
154 allocation_list_.
Add(FreeBlock(aligned_base, size));
155 current_allocation_block_index_ = 0;
160 int CodeRange::CompareFreeBlockAddress(
const FreeBlock* left,
161 const FreeBlock* right) {
165 return static_cast<int>(left->start - right->start);
169 void CodeRange::GetNextAllocationBlock(
size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
179 free_list_.
AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.
Sort(&CompareFreeBlockAddress);
182 for (
int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
191 if (merged.size > 0) {
192 allocation_list_.
Add(merged);
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
217 GetNextAllocationBlock(requested);
221 FreeBlock current = allocation_list_[current_allocation_block_index_];
224 *allocated = current.size;
226 *allocated = aligned_requested;
228 ASSERT(*allocated <= current.size);
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0);
241 return current.start;
247 free_list_.
Add(FreeBlock(address, length));
248 code_range_->
Uncommit(address, length);
256 allocation_list_.
Free();
267 capacity_executable_(0),
269 size_executable_(0) {
276 ASSERT_GE(capacity_, capacity_executable_);
279 size_executable_ = 0;
291 capacity_executable_ = 0;
299 size_t size = reservation->
size();
303 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
311 static_cast<Address>(reservation->
address())));
324 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
348 size_ += reservation.
size();
369 if (!reservation.
Commit(base, size,
false)) {
420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
504 if (size_executable_ + chunk_size > capacity_executable_) {
506 StringEvent(
"MemoryAllocator::AllocateRawMemory",
507 "V8 Executable Allocation capacity exceeded"));
520 size_executable_ += chunk_size;
528 size_executable_ += reservation.
size();
536 area_end = area_start + body_size;
551 area_end = base + chunk_size;
554 isolate_->
counters()->memory_allocated()->
555 Increment(static_cast<int>(chunk_size));
557 LOG(isolate_, NewEvent(
"MemoryChunk", base, chunk_size));
591 return LargePage::Initialize(isolate_->
heap(), chunk);
596 LOG(isolate_, DeleteEvent(
"MemoryChunk", chunk));
627 isolate_->
counters()->memory_allocated()->Increment(static_cast<int>(size));
634 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
649 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
650 MemoryAllocationCallbackRegistration registration =
651 memory_allocation_callbacks_[i];
652 if ((registration.space & space) == space &&
653 (registration.action & action) == action)
654 registration.callback(space, action, static_cast<int>(size));
661 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
662 if (memory_allocation_callbacks_[i].callback == callback)
return true;
673 MemoryAllocationCallbackRegistration registration(callback, space, action);
675 return memory_allocation_callbacks_.
Add(registration);
682 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
683 if (memory_allocation_callbacks_[i].callback == callback) {
684 memory_allocation_callbacks_.
Remove(i);
693 void MemoryAllocator::ReportStatistics() {
694 float pct =
static_cast<float>(capacity_ - size_) / capacity_;
697 ", available: %%%d\n\n",
698 capacity_, size_, static_cast<int>(pct*100));
766 if (!chunk->
InNewSpace() && !
static_cast<Page*
>(chunk)->WasSwept()) {
767 static_cast<PagedSpace*
>(chunk->
owner())->IncrementUnsweptFreeBytes(-by);
776 intptr_t max_capacity,
779 :
Space(heap, id, executable),
781 was_swept_conservatively_(
false),
782 first_unswept_page_(
Page::FromAddress(
NULL)),
783 unswept_free_bytes_(0) {
813 while (iterator.has_next()) {
824 ASSERT(!
heap()->mark_compact_collector()->in_use());
832 Address next = cur + obj->Size();
833 if ((cur <= addr) && (addr < next))
return obj;
864 if (p ==
NULL)
return false;
909 while (it.has_next()) {
930 intptr_t size =
free_list_.EvictFreeListItems(page);
955 while (it.has_next()) {
956 Page* page = it.next();
961 if (obj->IsFreeSpace() &&
971 FreeList::SizeStats sizes;
989 void PagedSpace::Verify(ObjectVisitor* visitor) {
993 bool allocation_pointer_found_in_space =
996 while (page_iterator.has_next()) {
997 Page* page = page_iterator.next();
998 ASSERT(page->owner() ==
this);
1000 allocation_pointer_found_in_space =
true;
1002 ASSERT(page->WasSweptPrecisely());
1003 HeapObjectIterator it(page,
NULL);
1004 Address end_of_previous_object = page->area_start();
1007 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
1008 ASSERT(end_of_previous_object <= object->address());
1012 Map* map =
object->map();
1017 VerifyObject(
object);
1023 int size =
object->Size();
1024 object->IterateBody(map->instance_type(), size, visitor);
1029 ASSERT(object->address() + size <=
top);
1030 end_of_previous_object =
object->address() + size;
1032 ASSERT_LE(black_size, page->LiveBytes());
1034 ASSERT(allocation_pointer_found_in_space);
1044 int maximum_semispace_capacity) {
1051 size_t size = 2 * reserved_semispace_capacity;
1054 size, size, &reservation_);
1055 if (base ==
NULL)
return false;
1058 chunk_size_ =
static_cast<uintptr_t
>(size);
1059 LOG(
heap()->isolate(), NewEvent(
"InitialChunk", chunk_base_, chunk_size_));
1061 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1065 allocated_histogram_ = NewArray<HistogramInfo>(
LAST_TYPE + 1);
1066 promoted_histogram_ = NewArray<HistogramInfo>(
LAST_TYPE + 1);
1068 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1069 promoted_histogram_[name].set_name(#name);
1073 ASSERT(reserved_semispace_capacity ==
heap()->ReservedSemiSpaceSize());
1074 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1075 2 *
heap()->ReservedSemiSpaceSize());
1078 to_space_.
SetUp(chunk_base_,
1079 initial_semispace_capacity,
1080 maximum_semispace_capacity);
1081 from_space_.
SetUp(chunk_base_ + reserved_semispace_capacity,
1082 initial_semispace_capacity,
1083 maximum_semispace_capacity);
1084 if (!to_space_.
Commit()) {
1089 start_ = chunk_base_;
1090 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1092 object_expected_ =
reinterpret_cast<uintptr_t
>(start_) |
kHeapObjectTag;
1101 if (allocated_histogram_) {
1103 allocated_histogram_ =
NULL;
1105 if (promoted_histogram_) {
1107 promoted_histogram_ =
NULL;
1117 LOG(
heap()->isolate(), DeleteEvent(
"InitialChunk", chunk_base_));
1136 if (to_space_.
GrowTo(new_capacity)) {
1138 if (!from_space_.
GrowTo(new_capacity)) {
1154 int rounded_new_capacity =
RoundUp(new_capacity, Page::kPageSize);
1155 if (rounded_new_capacity <
Capacity() &&
1156 to_space_.
ShrinkTo(rounded_new_capacity)) {
1158 from_space_.
Reset();
1159 if (!from_space_.
ShrinkTo(rounded_new_capacity)) {
1174 void NewSpace::UpdateAllocationInfo() {
1179 if (
heap()->incremental_marking()->IsMarking() &&
1183 allocation_info_.
limit =
Min(new_limit, allocation_info_.
limit);
1191 UpdateAllocationInfo();
1194 NewSpacePageIterator it(&to_space_);
1195 while (it.has_next()) {
1224 int remaining_in_page =
static_cast<int>(limit -
top);
1227 UpdateAllocationInfo();
1233 MaybeObject* NewSpace::SlowAllocateRaw(
int size_in_bytes) {
1235 Address new_top = old_top + size_in_bytes;
1237 if (allocation_info_.
limit < high) {
1241 allocation_info_.
limit + inline_allocation_limit_step_,
1243 int bytes_allocated =
static_cast<int>(new_top - top_on_previous_step_);
1246 top_on_previous_step_ = new_top;
1247 return AllocateRaw(size_in_bytes);
1250 int bytes_allocated =
static_cast<int>(old_top - top_on_previous_step_);
1253 top_on_previous_step_ = to_space_.
page_low();
1254 return AllocateRaw(size_in_bytes);
1264 void NewSpace::Verify() {
1273 while (current !=
top()) {
1283 Map* map =
object->map();
1284 CHECK(map->IsMap());
1288 CHECK(!object->IsMap());
1289 CHECK(!object->IsCode());
1295 VerifyPointersVisitor visitor;
1296 int size =
object->Size();
1297 object->IterateBody(map->instance_type(), size, &visitor);
1304 CHECK(!page->is_anchor());
1305 current = page->area_start();
1312 from_space_.Verify();
1321 int initial_capacity,
1322 int maximum_capacity) {
1329 ASSERT(maximum_capacity >= Page::kPageSize);
1330 initial_capacity_ =
RoundDown(initial_capacity, Page::kPageSize);
1331 capacity_ = initial_capacity;
1332 maximum_capacity_ =
RoundDown(maximum_capacity, Page::kPageSize);
1335 address_mask_ = ~(maximum_capacity - 1);
1337 object_expected_ =
reinterpret_cast<uintptr_t
>(start) |
kHeapObjectTag;
1351 Address end = start_ + maximum_capacity_;
1353 if (!
heap()->isolate()->memory_allocator()->CommitBlock(start,
1360 for (
int i = 1; i <= pages; i++) {
1362 NewSpacePage::Initialize(
heap(), end - i * Page::kPageSize,
this);
1375 Address start = start_ + maximum_capacity_ - capacity_;
1376 if (!
heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1389 if (!
Commit())
return false;
1392 ASSERT(new_capacity <= maximum_capacity_);
1393 ASSERT(new_capacity > capacity_);
1397 Address end = start_ + maximum_capacity_;
1398 Address start = end - new_capacity;
1399 size_t delta = new_capacity - capacity_;
1402 if (!
heap()->isolate()->memory_allocator()->CommitBlock(
1406 capacity_ = new_capacity;
1408 ASSERT(last_page != anchor());
1409 for (
int i = pages_before + 1; i <= pages_after; i++) {
1419 last_page = new_page;
1427 ASSERT(new_capacity >= initial_capacity_);
1428 ASSERT(new_capacity < capacity_);
1434 Address old_start = space_end - capacity_;
1435 size_t delta = capacity_ - new_capacity;
1448 ASSERT((current_page_ <=
first_page()) && (current_page_ >= new_last_page));
1451 capacity_ = new_capacity;
1457 void SemiSpace::FlipPages(intptr_t
flags, intptr_t mask) {
1467 while (page != &anchor_) {
1470 if (becomes_to_space) {
1471 page->
ClearFlag(MemoryChunk::IN_FROM_SPACE);
1472 page->
SetFlag(MemoryChunk::IN_TO_SPACE);
1476 page->
SetFlag(MemoryChunk::IN_FROM_SPACE);
1477 page->
ClearFlag(MemoryChunk::IN_TO_SPACE);
1481 page->
IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1510 from->FlipPages(0, 0);
1519 while (it.has_next()) {
1529 void SemiSpace::Verify() {
1531 NewSpacePage* page = anchor_.
next_page();
1533 while (page != &anchor_) {
1534 CHECK(page->semi_space() ==
this);
1535 CHECK(page->InNewSpace());
1536 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1537 : MemoryChunk::IN_TO_SPACE));
1538 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1539 : MemoryChunk::IN_FROM_SPACE));
1541 if (!is_from_space) {
1544 if (page->heap()->incremental_marking()->IsMarking()) {
1547 CHECK(!page->IsFlagSet(
1554 CHECK(page->prev_page()->next_page() == page);
1555 page = page->next_page();
1565 CHECK_EQ(space, end_page->semi_space());
1569 if (page == end_page) {
1570 CHECK(start <= end);
1572 while (page != end_page) {
1573 page = page->next_page();
1590 Initialize(space->
bottom(), space->
top(), size_func);
1595 Initialize(start, space->
top(),
NULL);
1600 Initialize(from, to,
NULL);
1604 void SemiSpaceIterator::Initialize(
Address start,
1610 size_func_ = size_func;
1616 static void ClearHistograms() {
1617 Isolate* isolate = Isolate::Current();
1619 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1621 #undef DEF_TYPE_NAME
1623 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1625 #undef CLEAR_HISTOGRAM
1627 isolate->js_spill_information()->Clear();
1631 static void ClearCodeKindStatistics() {
1632 Isolate* isolate = Isolate::Current();
1634 isolate->code_kind_statistics()[i] = 0;
1639 static void ReportCodeKindStatistics() {
1640 Isolate* isolate = Isolate::Current();
1643 #define CASE(name) \
1644 case Code::name: table[Code::name] = #name; \
1648 switch (static_cast<Code::Kind>(i)) {
1650 CASE(OPTIMIZED_FUNCTION);
1654 CASE(KEYED_LOAD_IC);
1656 CASE(KEYED_STORE_IC);
1658 CASE(KEYED_CALL_IC);
1662 CASE(TO_BOOLEAN_IC);
1668 PrintF(
"\n Code kind histograms: \n");
1670 if (isolate->code_kind_statistics()[i] > 0) {
1671 PrintF(
" %-20s: %10d bytes\n", table[i],
1672 isolate->code_kind_statistics()[i]);
1679 static int CollectHistogramInfo(HeapObject* obj) {
1680 Isolate* isolate = Isolate::Current();
1684 isolate->heap_histograms()[
type].increment_number(1);
1685 isolate->heap_histograms()[
type].increment_bytes(obj->Size());
1687 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1689 isolate->js_spill_information());
1696 static void ReportHistogram(
bool print_spill) {
1697 Isolate* isolate = Isolate::Current();
1698 PrintF(
"\n Object Histogram:\n");
1700 if (isolate->heap_histograms()[i].number() > 0) {
1701 PrintF(
" %-34s%10d (%10d bytes)\n",
1702 isolate->heap_histograms()[i].name(),
1703 isolate->heap_histograms()[i].number(),
1704 isolate->heap_histograms()[i].bytes());
1710 int string_number = 0;
1711 int string_bytes = 0;
1712 #define INCREMENT(type, size, name, camel_name) \
1713 string_number += isolate->heap_histograms()[type].number(); \
1714 string_bytes += isolate->heap_histograms()[type].bytes();
1717 if (string_number > 0) {
1718 PrintF(
" %-34s%10d (%10d bytes)\n\n",
"STRING_TYPE", string_number,
1722 if (FLAG_collect_heap_spill_statistics && print_spill) {
1723 isolate->js_spill_information()->Print();
1732 allocated_histogram_[i].clear();
1733 promoted_histogram_[i].clear();
1748 static void DoReportStatistics(
Isolate* isolate,
1750 LOG(isolate, HeapSampleBeginEvent(
"NewSpace", description));
1752 int string_number = 0;
1753 int string_bytes = 0;
1754 #define INCREMENT(type, size, name, camel_name) \
1755 string_number += info[type].number(); \
1756 string_bytes += info[type].bytes();
1759 if (string_number > 0) {
1761 HeapSampleItemEvent(
"STRING_TYPE", string_number, string_bytes));
1766 if (info[i].number() > 0) {
1768 HeapSampleItemEvent(info[i].
name(), info[i].number(),
1772 LOG(isolate, HeapSampleEndEvent(
"NewSpace", description));
1778 if (FLAG_heap_stats) {
1783 PrintF(
"\n Object Histogram:\n");
1785 if (allocated_histogram_[i].number() > 0) {
1786 PrintF(
" %-34s%10d (%10d bytes)\n",
1787 allocated_histogram_[i].
name(),
1788 allocated_histogram_[i].number(),
1789 allocated_histogram_[i].bytes());
1798 DoReportStatistics(isolate, allocated_histogram_,
"allocated");
1799 DoReportStatistics(isolate, promoted_histogram_,
"promoted");
1807 allocated_histogram_[
type].increment_number(1);
1808 allocated_histogram_[
type].increment_bytes(obj->
Size());
1815 promoted_histogram_[
type].increment_number(1);
1816 promoted_histogram_[
type].increment_bytes(obj->
Size());
1823 ASSERT(size_in_bytes > 0);
1837 this_as_free_space->
set_size(size_in_bytes);
1852 if (
map() ==
HEAP->raw_unchecked_free_space_map()) {
1865 if (
map() ==
HEAP->raw_unchecked_free_space_map()) {
1879 if (
map() ==
HEAP->raw_unchecked_free_space_map()) {
1891 : owner_(owner), heap_(owner->heap()) {
1899 medium_list_ =
NULL;
1905 int FreeList::Free(
Address start,
int size_in_bytes) {
1906 if (size_in_bytes == 0)
return 0;
1908 node->set_size(heap_, size_in_bytes);
1911 if (size_in_bytes < kSmallListMin)
return size_in_bytes;
1915 if (size_in_bytes <= kSmallListMax) {
1916 node->set_next(small_list_);
1918 }
else if (size_in_bytes <= kMediumListMax) {
1919 node->set_next(medium_list_);
1920 medium_list_ = node;
1921 }
else if (size_in_bytes <= kLargeListMax) {
1922 node->set_next(large_list_);
1925 node->set_next(huge_list_);
1928 available_ += size_in_bytes;
1929 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1934 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
int* node_size) {
1935 FreeListNode* node = *list;
1939 while (node !=
NULL &&
1941 available_ -= node->Size();
1942 node = node->next();
1946 *node_size = node->Size();
1947 *list = node->next();
1956 FreeListNode* FreeList::FindNodeFor(
int size_in_bytes,
int* node_size) {
1957 FreeListNode* node =
NULL;
1959 if (size_in_bytes <= kSmallAllocationMax) {
1960 node = PickNodeFromList(&small_list_, node_size);
1961 if (node !=
NULL)
return node;
1964 if (size_in_bytes <= kMediumAllocationMax) {
1965 node = PickNodeFromList(&medium_list_, node_size);
1966 if (node !=
NULL)
return node;
1969 if (size_in_bytes <= kLargeAllocationMax) {
1970 node = PickNodeFromList(&large_list_, node_size);
1971 if (node !=
NULL)
return node;
1974 for (FreeListNode** cur = &huge_list_;
1976 cur = (*cur)->next_address()) {
1977 FreeListNode* cur_node = *cur;
1978 while (cur_node !=
NULL &&
1980 available_ -=
reinterpret_cast<FreeSpace*
>(cur_node)->Size();
1981 cur_node = cur_node->next();
1985 if (cur_node ==
NULL)
break;
1987 ASSERT((*cur)->map() ==
HEAP->raw_unchecked_free_space_map());
1988 FreeSpace* cur_as_free_space =
reinterpret_cast<FreeSpace*
>(*cur);
1989 int size = cur_as_free_space->Size();
1990 if (size >= size_in_bytes) {
1994 *cur = node->next();
2007 HeapObject* FreeList::Allocate(
int size_in_bytes) {
2008 ASSERT(0 < size_in_bytes);
2009 ASSERT(size_in_bytes <= kMaxBlockSize);
2012 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2014 int new_node_size = 0;
2015 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2018 available_ -= new_node_size;
2019 ASSERT(IsVeryLong() || available_ == SumFreeLists());
2021 int bytes_left = new_node_size - size_in_bytes;
2024 int old_linear_size =
static_cast<int>(owner_->limit() - owner_->top());
2028 owner_->Free(owner_->top(), old_linear_size);
2031 for (
int i = 0; i < size_in_bytes /
kPointerSize; i++) {
2036 owner_->heap()->incremental_marking()->OldSpaceStep(
2037 size_in_bytes - old_linear_size);
2042 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2048 owner_->Allocate(new_node_size);
2050 if (bytes_left > kThreshold &&
2051 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2052 FLAG_incremental_marking_steps) {
2053 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2057 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2058 new_node_size - size_in_bytes - linear_size);
2059 owner_->SetTop(new_node->address() + size_in_bytes,
2060 new_node->address() + size_in_bytes + linear_size);
2061 }
else if (bytes_left > 0) {
2064 owner_->SetTop(new_node->address() + size_in_bytes,
2065 new_node->address() + new_node_size);
2076 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2080 FreeSpace* free_space =
reinterpret_cast<FreeSpace*
>(n);
2081 sum += free_space->Size();
2089 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2090 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2091 if (sizes->huge_size_ < p->area_size()) {
2092 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2093 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2094 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2096 sizes->small_size_ = 0;
2097 sizes->medium_size_ = 0;
2098 sizes->large_size_ = 0;
2103 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2105 while (*n !=
NULL) {
2107 FreeSpace* free_space =
reinterpret_cast<FreeSpace*
>(*n);
2108 sum += free_space->Size();
2111 n = (*n)->next_address();
2118 intptr_t FreeList::EvictFreeListItems(Page* p) {
2119 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2121 if (sum < p->area_size()) {
2122 sum += EvictFreeListItemsInList(&small_list_, p) +
2123 EvictFreeListItemsInList(&medium_list_, p) +
2124 EvictFreeListItemsInList(&large_list_, p);
2127 available_ -=
static_cast<int>(sum);
2134 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2136 while (cur !=
NULL) {
2137 ASSERT(cur->map() ==
HEAP->raw_unchecked_free_space_map());
2138 FreeSpace* cur_as_free_space =
reinterpret_cast<FreeSpace*
>(cur);
2139 sum += cur_as_free_space->Size();
2146 static const int kVeryLongFreeList = 500;
2149 int FreeList::FreeListLength(FreeListNode* cur) {
2151 while (cur !=
NULL) {
2154 if (length == kVeryLongFreeList)
return length;
2160 bool FreeList::IsVeryLong() {
2161 if (FreeListLength(small_list_) == kVeryLongFreeList)
return true;
2162 if (FreeListLength(medium_list_) == kVeryLongFreeList)
return true;
2163 if (FreeListLength(large_list_) == kVeryLongFreeList)
return true;
2164 if (FreeListLength(huge_list_) == kVeryLongFreeList)
return true;
2172 intptr_t FreeList::SumFreeLists() {
2173 intptr_t sum = SumFreeList(small_list_);
2174 sum += SumFreeList(medium_list_);
2175 sum += SumFreeList(large_list_);
2176 sum += SumFreeList(huge_list_);
2193 MaybeObject* maybe = AllocateRaw(bytes);
2195 if (!maybe->ToObject(&
object))
return false;
2198 if ((top - bytes) == allocation->
address()) {
2199 allocation_info_.
top = allocation->
address();
2213 int old_linear_size =
static_cast<int>(
limit() -
top());
2226 if (FLAG_gc_verbose) {
2228 reinterpret_cast<intptr_t>(p));
2246 Address new_top = current_top + size_in_bytes;
2251 if (new_area ==
NULL)
return false;
2253 int old_linear_size =
static_cast<int>(
limit() -
top());
2277 intptr_t freed_bytes = 0;
2282 if (FLAG_gc_verbose) {
2284 reinterpret_cast<intptr_t>(p));
2290 }
while (p !=
anchor() && freed_bytes < bytes_to_sweep);
2329 if (
object !=
NULL)
return object;
2335 if (!
heap()->always_allocate() &&
2336 heap()->OldGenerationAllocationLimitReached()) {
2352 if (
object !=
NULL)
return object;
2361 void PagedSpace::ReportCodeStatistics() {
2362 Isolate* isolate = Isolate::Current();
2363 CommentStatistic* comments_statistics =
2364 isolate->paged_space_comments_statistics();
2365 ReportCodeKindStatistics();
2366 PrintF(
"Code comment statistics (\" [ comment-txt : size/ "
2367 "count (average)\"):\n");
2368 for (
int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2369 const CommentStatistic&
cs = comments_statistics[i];
2371 PrintF(
" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2379 void PagedSpace::ResetCodeStatistics() {
2380 Isolate* isolate = Isolate::Current();
2381 CommentStatistic* comments_statistics =
2382 isolate->paged_space_comments_statistics();
2383 ClearCodeKindStatistics();
2384 for (
int i = 0; i < CommentStatistic::kMaxComments; i++) {
2385 comments_statistics[i].Clear();
2387 comments_statistics[CommentStatistic::kMaxComments].comment =
"Unknown";
2388 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2389 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2395 static void EnterComment(Isolate* isolate,
const char*
comment,
int delta) {
2396 CommentStatistic* comments_statistics =
2397 isolate->paged_space_comments_statistics();
2399 if (delta <= 0)
return;
2400 CommentStatistic*
cs = &comments_statistics[CommentStatistic::kMaxComments];
2403 for (
int i = 0; i < CommentStatistic::kMaxComments; i++) {
2404 if (comments_statistics[i].comment ==
NULL) {
2405 cs = &comments_statistics[i];
2408 }
else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2409 cs = &comments_statistics[i];
2421 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2423 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2424 const char* tmp =
reinterpret_cast<const char*
>(it->rinfo()->data());
2425 if (tmp[0] !=
'[') {
2431 const char*
const comment_txt =
2432 reinterpret_cast<const char*
>(it->rinfo()->data());
2433 const byte* prev_pc = it->rinfo()->pc();
2440 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2441 const char*
const txt =
2442 reinterpret_cast<const char*
>(it->rinfo()->data());
2443 flat_delta +=
static_cast<int>(it->rinfo()->pc() - prev_pc);
2444 if (txt[0] ==
']')
break;
2446 CollectCommentStatistics(isolate, it);
2448 prev_pc = it->rinfo()->pc();
2452 EnterComment(isolate, comment_txt, flat_delta);
2459 void PagedSpace::CollectCodeStatistics() {
2461 HeapObjectIterator obj_it(
this);
2462 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
2463 if (obj->IsCode()) {
2465 isolate->code_kind_statistics()[code->kind()] += code->Size();
2466 RelocIterator it(code);
2468 const byte* prev_pc = code->instruction_start();
2469 while (!it.done()) {
2470 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2471 delta +=
static_cast<int>(it.rinfo()->pc() - prev_pc);
2472 CollectCommentStatistics(isolate, &it);
2473 prev_pc = it.rinfo()->pc();
2478 ASSERT(code->instruction_start() <= prev_pc &&
2479 prev_pc <= code->instruction_end());
2480 delta +=
static_cast<int>(code->instruction_end() - prev_pc);
2481 EnterComment(isolate,
"NoComment", delta);
2487 void PagedSpace::ReportStatistics() {
2496 HeapObjectIterator obj_it(
this);
2497 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next())
2498 CollectHistogramInfo(obj);
2499 ReportHistogram(
true);
2525 void MapSpace::VerifyObject(
HeapObject*
object) {
2527 ASSERT(object->IsMap() ||
object->IsFreeSpace());
2536 void CellSpace::VerifyObject(HeapObject*
object) {
2538 ASSERT(object->IsJSGlobalPropertyCell() ||
2539 object->map() ==
heap()->two_pointer_filler_map());
2548 current_ = space->first_page_;
2555 current_ = space->first_page_;
2556 size_func_ = size_func;
2571 static bool ComparePointers(
void* key1,
void* key2) {
2572 return key1 == key2;
2577 intptr_t max_capacity,
2580 max_capacity_(max_capacity),
2585 chunk_map_(ComparePointers, 1024) {}
2599 while (first_page_ !=
NULL) {
2602 LOG(
heap()->isolate(), DeleteEvent(
"LargeObjectChunk", page->
address()));
2617 if (!
heap()->always_allocate() &&
2618 heap()->OldGenerationAllocationLimitReached()) {
2622 if (
Size() + object_size > max_capacity_) {
2627 AllocateLargePage(object_size,
this, executable);
2631 size_ +=
static_cast<int>(page->
size());
2632 objects_size_ += object_size;
2641 for (uintptr_t key = base; key <= limit; key++) {
2642 HashMap::Entry* entry = chunk_map_.
Lookup(reinterpret_cast<void*>(key),
2643 static_cast<uint32_t>(key),
2646 entry->value = page;
2653 reinterpret_cast<Object**
>(
object->address())[0] =
2654 heap()->fixed_array_map();
2675 HashMap::Entry* e = chunk_map_.
Lookup(reinterpret_cast<void*>(key),
2676 static_cast<uint32_t>(key),
2693 while (current !=
NULL) {
2697 bool is_pointer_object =
object->IsFixedArray();
2699 if (mark_bit.
Get()) {
2708 if (previous ==
NULL) {
2709 first_page_ = current;
2716 object,
heap()->isolate());
2717 size_ -=
static_cast<int>(page->
size());
2718 objects_size_ -=
object->Size();
2725 uintptr_t base =
reinterpret_cast<uintptr_t
>(page)/alignment;
2726 uintptr_t limit = base + (page->
size()-1)/alignment;
2727 for (uintptr_t key = base; key <= limit; key++) {
2728 chunk_map_.
Remove(reinterpret_cast<void*>(key),
2729 static_cast<uint32_t>(key));
2732 if (is_pointer_object) {
2744 Address address =
object->address();
2747 bool owned = (chunk->
owner() ==
this);
2758 void LargeObjectSpace::Verify() {
2761 chunk = chunk->next_page()) {
2770 Map* map =
object->
map();
2777 ASSERT(object->IsCode() ||
object->IsSeqString() ||
2778 object->IsExternalString() ||
object->IsFixedArray() ||
2779 object->IsFixedDoubleArray() ||
object->IsByteArray());
2785 if (object->IsCode()) {
2786 VerifyPointersVisitor code_visitor;
2787 object->IterateBody(map->instance_type(),
2790 }
else if (object->IsFixedArray()) {
2792 for (
int j = 0; j < array->length(); j++) {
2793 Object* element = array->get(j);
2794 if (element->IsHeapObject()) {
2797 ASSERT(element_object->map()->IsMap());
2807 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
2813 void LargeObjectSpace::ReportStatistics() {
2814 PrintF(
" size: %" V8_PTR_PREFIX
"d\n", size_);
2815 int num_objects = 0;
2818 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
2820 CollectHistogramInfo(obj);
2823 PrintF(
" number of objects %d, "
2824 "size of objects %" V8_PTR_PREFIX
"d\n", num_objects, objects_size_);
2825 if (num_objects > 0) ReportHistogram(
false);
2829 void LargeObjectSpace::CollectCodeStatistics() {
2832 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
2833 if (obj->IsCode()) {
2835 isolate->code_kind_statistics()[code->kind()] += code->Size();
2843 PrintF(
"Page@%p in %s\n",
2846 printf(
" --------------------------------------\n");
2847 HeapObjectIterator objects(
this,
heap()->GcSafeSizeOfOldObjectFunction());
2848 unsigned mark_size = 0;
2849 for (HeapObject*
object = objects.Next();
2851 object = objects.Next()) {
2853 PrintF(
" %c ", (is_marked ?
'!' :
' '));
2857 object->ShortPrint();
2860 printf(
" --------------------------------------\n");
2861 printf(
" Marked: %x, LiveCount: %x\n", mark_size,
LiveBytes());
static bool IsBlack(MarkBit mark_bit)
virtual bool ReserveSpace(int bytes)
static const int kHeaderSize
#define SLOW_ASSERT(condition)
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
#define CHECK_EQ(expected, value)
void set_next_page(Page *page)
void ZapBlock(Address start, size_t size)
bool GrowTo(int new_capacity)
intptr_t OldGenerationCapacityAvailable()
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
#define INSTANCE_TYPE_LIST(V)
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
void set_size(Heap *heap, int size_in_bytes)
bool Contains(Address addr)
friend class PageIterator
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
void SetTop(Address top, Address limit)
bool was_swept_conservatively()
void set_next(FreeListNode *next)
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
virtual void PrepareForMarkCompact()
bool SetUp(const size_t requested_size)
void ReleaseAllUnusedPages()
static Smi * FromInt(int value)
#define LOG(isolate, Call)
Page * first_unswept_page_
MemoryChunk * next_chunk_
static bool ShouldBeSweptLazily(Page *p)
static MemoryChunk * FromAddress(Address a)
LargeObjectIterator(LargeObjectSpace *space)
value format" "after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false, "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false, "Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true, "Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false, "Never perform compaction on full GC-testing only") DEFINE_bool(compact_code_space, true, "Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and" "flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator" "(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true, "allows verbose printing") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(check_icache, false, "Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8, "Stack alingment in bytes in simulator(4 or 8, 8 is default)") DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions" "(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys(0 means random)" "(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness") DEFINE_string(testing_serialization_file, "/tmp/serdes", "file in which to serialize heap") DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT, "Pass all remaining arguments to the script.Alias for\"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#43"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2#define FLAG_MODE_DEFINE_DEFAULTS#1"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flag-definitions.h"1#define FLAG_FULL(ftype, ctype, nam, def, cmt)#define FLAG_READONLY(ftype, ctype, nam, def, cmt)#define DEFINE_implication(whenflag, thenflag)#define DEFINE_bool(nam, def, cmt)#define DEFINE_int(nam, def, cmt)#define DEFINE_float(nam, def, cmt)#define DEFINE_string(nam, def, cmt)#define DEFINE_args(nam, def, cmt)#define FLAG DEFINE_bool(use_strict, false,"enforce strict mode") DEFINE_bool(es5_readonly, false,"activate correct semantics for inheriting readonliness") DEFINE_bool(es52_globals, false,"activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false,"enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false,"enable harmony block scoping") DEFINE_bool(harmony_modules, false,"enable harmony modules (implies block scoping)") DEFINE_bool(harmony_proxies, false,"enable harmony proxies") DEFINE_bool(harmony_collections, false,"enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony, false,"enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_bool(packed_arrays, false,"optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true,"tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true,"Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(unbox_double_arrays, true,"automatically unbox arrays of doubles") DEFINE_bool(string_slices, true,"use string slices") DEFINE_bool(crankshaft, true,"use crankshaft") DEFINE_string(hydrogen_filter,"","optimization filter") DEFINE_bool(use_range, true,"use hydrogen range analysis") DEFINE_bool(eliminate_dead_phis, true,"eliminate dead phis") DEFINE_bool(use_gvn, true,"use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true,"use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true,"use function inlining") DEFINE_int(max_inlined_source_size, 600,"maximum source size in bytes considered for a single inlining") DEFINE_int(max_inlined_nodes, 196,"maximum number of AST nodes considered for a single inlining") DEFINE_int(max_inlined_nodes_cumulative, 196,"maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true,"loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,"crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false,"print statistics for hydrogen") DEFINE_bool(trace_hydrogen, false,"trace generated hydrogen to file") DEFINE_string(trace_phase,"Z","trace generated IR for specified phases") DEFINE_bool(trace_inlining, false,"trace inlining decisions") DEFINE_bool(trace_alloc, false,"trace register allocator") DEFINE_bool(trace_all_uses, false,"trace all use positions") DEFINE_bool(trace_range, false,"trace range analysis") DEFINE_bool(trace_gvn, false,"trace global value numbering") DEFINE_bool(trace_representation, false,"trace representation types") DEFINE_bool(stress_pointer_maps, false,"pointer map for every instruction") DEFINE_bool(stress_environments, false,"environment for every instruction") DEFINE_int(deopt_every_n_times, 0,"deoptimize every n times a deopt point is passed") DEFINE_bool(trap_on_deopt, false,"put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true,"deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true,"polymorphic inlining") DEFINE_bool(use_osr, true,"use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, false,"perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, false,"perform array index dehoisting") DEFINE_bool(trace_osr, false,"trace on-stack replacement") DEFINE_int(stress_runs, 0,"number of stress runs") DEFINE_bool(optimize_closures, true,"optimize closures") DEFINE_bool(inline_construct, true,"inline constructor calls") DEFINE_bool(inline_arguments, true,"inline functions with arguments object") DEFINE_int(loop_weight, 1,"loop weight for representation inference") DEFINE_bool(optimize_for_in, true,"optimize functions containing for-in loops") DEFINE_bool(experimental_profiler, true,"enable all profiler experiments") DEFINE_bool(watch_ic_patching, false,"profiler considers IC stability") DEFINE_int(frame_count, 1,"number of stack frames inspected by the profiler") DEFINE_bool(self_optimization, false,"primitive functions trigger their own optimization") DEFINE_bool(direct_self_opt, false,"call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false,"re-try self-optimization if it failed") DEFINE_bool(count_based_interrupts, false,"trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false,"insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false,"weight back edges by jump distance for interrupt triggering") DEFINE_int(interrupt_budget, 5900,"execution budget before interrupt is triggered") DEFINE_int(type_info_threshold, 15,"percentage of ICs that must have type info to allow optimization") DEFINE_int(self_opt_count, 130,"call count before self-optimization") DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, retry_self_opt) DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) DEFINE_bool(trace_opt_verbose, false,"extra verbose compilation tracing") DEFINE_implication(trace_opt_verbose, trace_opt) DEFINE_bool(debug_code, false,"generate extra code (assertions) for debugging") DEFINE_bool(code_comments, false,"emit comments in code disassembly") DEFINE_bool(enable_sse2, true,"enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true,"enable use of SSE3 instructions if available") DEFINE_bool(enable_sse4_1, true,"enable use of SSE4.1 instructions if available") DEFINE_bool(enable_cmov, true,"enable use of CMOV instruction if available") DEFINE_bool(enable_rdtsc, true,"enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true,"enable use of SAHF instruction if available (X64 only)") DEFINE_bool(enable_vfp3, true,"enable use of VFP3 instructions if available - this implies ""enabling ARMv7 instructions (ARM only)") DEFINE_bool(enable_armv7, true,"enable use of ARMv7 instructions if available (ARM only)") DEFINE_bool(enable_fpu, true,"enable use of MIPS FPU instructions if available (MIPS only)") DEFINE_string(expose_natives_as, NULL,"expose natives in global object") DEFINE_string(expose_debug_as, NULL,"expose debug in global object") DEFINE_bool(expose_gc, false,"expose gc extension") DEFINE_bool(expose_externalize_string, false,"expose externalize string extension") DEFINE_int(stack_trace_limit, 10,"number of stack frames to capture") DEFINE_bool(builtins_in_stack_traces, false,"show built-in functions in stack traces") DEFINE_bool(disable_native_files, false,"disable builtin natives files") DEFINE_bool(inline_new, true,"use fast inline allocation") DEFINE_bool(stack_trace_on_abort, true,"print a stack trace if an assertion failure occurs") DEFINE_bool(trace, false,"trace function calls") DEFINE_bool(mask_constants_with_cookie, true,"use random jit cookie to mask large constants") DEFINE_bool(lazy, true,"use lazy compilation") DEFINE_bool(trace_opt, false,"trace lazy optimization") DEFINE_bool(trace_opt_stats, false,"trace lazy optimization statistics") DEFINE_bool(opt, true,"use adaptive optimizations") DEFINE_bool(always_opt, false,"always try to optimize functions") DEFINE_bool(prepare_always_opt, false,"prepare for turning on always opt") DEFINE_bool(trace_deopt, false,"trace deoptimization") DEFINE_int(min_preparse_length, 1024,"minimum length for automatic enable preparsing") DEFINE_bool(always_full_compiler, false,"try to use the dedicated run-once backend for all code") DEFINE_bool(trace_bailout, false,"print reasons for falling back to using the classic V8 backend") DEFINE_bool(compilation_cache, true,"enable compilation cache") DEFINE_bool(cache_prototype_transitions, true,"cache prototype transitions") DEFINE_bool(trace_debug_json, false,"trace debugging JSON request/response") DEFINE_bool(debugger_auto_break, true,"automatically set the debug break flag when debugger commands are ""in the queue") DEFINE_bool(enable_liveedit, true,"enable liveedit experimental feature") DEFINE_bool(break_on_abort, true,"always cause a debug break before aborting") DEFINE_int(stack_size, kPointerSize *123,"default size of stack region v8 is allowed to use (in kBytes)") DEFINE_int(max_stack_trace_source_length, 300,"maximum length of function source code printed in a stack trace.") DEFINE_bool(always_inline_smi_code, false,"always inline smi code in non-opt code") DEFINE_int(max_new_space_size, 0,"max size of the new generation (in kBytes)") DEFINE_int(max_old_space_size, 0,"max size of the old generation (in Mbytes)") DEFINE_int(max_executable_size, 0,"max size of executable memory (in Mbytes)") DEFINE_bool(gc_global, false,"always perform global GCs") DEFINE_int(gc_interval,-1,"garbage collect after <n> allocations") DEFINE_bool(trace_gc, false,"print one trace line following each garbage collection") DEFINE_bool(trace_gc_nvp, false,"print one detailed trace line in name=value format ""after each garbage collection") DEFINE_bool(print_cumulative_gc_stat, false,"print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false,"print more details following each garbage collection") DEFINE_bool(trace_fragmentation, false,"report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true,"garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true,"flush code that we expect not to use again before full gc") DEFINE_bool(incremental_marking, true,"use incremental marking") DEFINE_bool(incremental_marking_steps, true,"do incremental marking steps") DEFINE_bool(trace_incremental_marking, false,"trace progress of the incremental marking") DEFINE_bool(use_idle_notification, true,"Use idle notification to reduce memory footprint.") DEFINE_bool(send_idle_notification, false,"Send idle notifcation between stress runs.") DEFINE_bool(use_ic, true,"use inline caching") DEFINE_bool(native_code_counters, false,"generate extra code for manipulating stats counters") DEFINE_bool(always_compact, false,"Perform compaction on every full GC") DEFINE_bool(lazy_sweeping, true,"Use lazy sweeping for old pointer and data spaces") DEFINE_bool(never_compact, false,"Never perform compaction on full GC - testing only") DEFINE_bool(compact_code_space, true,"Compact code space on full non-incremental collections") DEFINE_bool(cleanup_code_caches_at_gc, true,"Flush inline caches prior to mark compact collection and ""flush code caches in maps during mark compact cycle.") DEFINE_int(random_seed, 0,"Default seed for initializing random generator ""(0, the default, means to use system random).") DEFINE_bool(use_verbose_printer, true,"allows verbose printing") DEFINE_bool(allow_natives_syntax, false,"allow natives syntax") DEFINE_bool(trace_sim, false,"Trace simulator execution") DEFINE_bool(check_icache, false,"Check icache flushes in ARM and MIPS simulator") DEFINE_int(stop_sim_at, 0,"Simulator stop after x number of instructions") DEFINE_int(sim_stack_alignment, 8,"Stack alingment in bytes in simulator (4 or 8, 8 is default)") DEFINE_bool(trace_exception, false,"print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false,"preallocate some memory to build stack traces.") DEFINE_bool(randomize_hashes, true,"randomize hashes to avoid predictable hash collisions ""(with snapshots this option cannot override the baked-in seed)") DEFINE_int(hash_seed, 0,"Fixed seed to use to hash property keys (0 means random)""(with snapshots this option cannot override the baked-in seed)") DEFINE_bool(preemption, false,"activate a 100ms timer that switches between V8 threads") DEFINE_bool(regexp_optimization, true,"generate optimized regexp code") DEFINE_bool(testing_bool_flag, true,"testing_bool_flag") DEFINE_int(testing_int_flag, 13,"testing_int_flag") DEFINE_float(testing_float_flag, 2.5,"float-flag") DEFINE_string(testing_string_flag,"Hello, world!","string-flag") DEFINE_int(testing_prng_seed, 42,"Seed used for threading test randomness") DEFINE_string(testing_serialization_file,"/tmp/serdes","file in which to serialize heap") DEFINE_bool(help, false,"Print usage message, including flags, on console") DEFINE_bool(dump_counters, false,"Dump counters on exit") DEFINE_string(map_counters,"","Map counters to a file") DEFINE_args(js_arguments, JSARGUMENTS_INIT,"Pass all remaining arguments to the script. Alias for \"--\".") DEFINE_bool(debug_compile_events, true,"Enable debugger compile events") DEFINE_bool(debug_script_collected_events, true,"Enable debugger script collected events") DEFINE_bool(gdbjit, false,"enable GDBJIT interface (disables compacting GC)") DEFINE_bool(gdbjit_full, false,"enable GDBJIT interface for all code objects") DEFINE_bool(gdbjit_dump, false,"dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter,"","dump only objects containing this substring") DEFINE_bool(force_marking_deque_overflows, false,"force overflows of marking deque by reducing it's size ""to 64 words") DEFINE_bool(stress_compaction, false,"stress the GC compactor to flush out bugs (implies ""--force_marking_deque_overflows)")#define FLAG DEFINE_bool(enable_slow_asserts, false,"enable asserts that are slow to execute") DEFINE_bool(trace_codegen, false,"print name of functions for which code is generated") DEFINE_bool(print_source, false,"pretty print source code") DEFINE_bool(print_builtin_source, false,"pretty print source code for builtins") DEFINE_bool(print_ast, false,"print source AST") DEFINE_bool(print_builtin_ast, false,"print source AST for builtins") DEFINE_string(stop_at,"","function name where to insert a breakpoint") DEFINE_bool(print_builtin_scopes, false,"print scopes for builtins") DEFINE_bool(print_scopes, false,"print scopes") DEFINE_bool(trace_contexts, false,"trace contexts operations") DEFINE_bool(gc_greedy, false,"perform GC prior to some allocations") DEFINE_bool(gc_verbose, false,"print stuff during garbage collection") DEFINE_bool(heap_stats, false,"report heap statistics before and after GC") DEFINE_bool(code_stats, false,"report code statistics after GC") DEFINE_bool(verify_heap, false,"verify heap pointers before and after GC") DEFINE_bool(print_handles, false,"report handles after GC") DEFINE_bool(print_global_handles, false,"report global handles after GC") DEFINE_bool(trace_ic, false,"trace inline cache state transitions") DEFINE_bool(print_interfaces, false,"print interfaces") DEFINE_bool(print_interface_details, false,"print interface inference details") DEFINE_int(print_interface_depth, 5,"depth for printing interfaces") DEFINE_bool(trace_normalization, false,"prints when objects are turned into dictionaries.") DEFINE_bool(trace_lazy, false,"trace lazy compilation") DEFINE_bool(collect_heap_spill_statistics, false,"report heap spill statistics along with heap_stats ""(requires heap_stats)") DEFINE_bool(trace_isolates, false,"trace isolate state changes") DEFINE_bool(log_state_changes, false,"Log state changes.") DEFINE_bool(regexp_possessive_quantifier, false,"enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false,"trace regexp bytecode execution") DEFINE_bool(trace_regexp_assembler, false,"trace regexp macro assembler calls.")#define FLAG DEFINE_bool(log, false,"Minimal logging (no API, code, GC, suspect, or handles samples).") DEFINE_bool(log_all, false,"Log all events to the log file.") DEFINE_bool(log_runtime, false,"Activate runtime system %Log call.") DEFINE_bool(log_api, false,"Log API events to the log file.") DEFINE_bool(log_code, false,"Log code events to the log file without profiling.") DEFINE_bool(log_gc, false,"Log heap samples on garbage collection for the hp2ps tool.") DEFINE_bool(log_handles, false,"Log global handle events.") DEFINE_bool(log_snapshot_positions, false,"log positions of (de)serialized objects in the snapshot.") DEFINE_bool(log_suspect, false,"Log suspect operations.") DEFINE_bool(prof, false,"Log statistical profiling information (implies --log-code).") DEFINE_bool(prof_auto, true,"Used with --prof, starts profiling automatically") DEFINE_bool(prof_lazy, false,"Used with --prof, only does sampling and logging"" when profiler is active (implies --noprof_auto).") DEFINE_bool(prof_browser_mode, true,"Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false,"Log regular expression execution.") DEFINE_bool(sliding_state_window, false,"Update sliding state window counters.") DEFINE_string(logfile,"v8.log","Specify the name of the log file.") DEFINE_bool(ll_prof, false,"Enable low-level linux profiler.")#define FLAG DEFINE_bool(trace_elements_transitions, false,"trace elements transitions") DEFINE_bool(print_code_stubs, false,"print code stubs") DEFINE_bool(test_secondary_stub_cache, false,"test secondary stub cache by disabling the primary one") DEFINE_bool(test_primary_stub_cache, false,"test primary stub cache by disabling the secondary one") DEFINE_bool(print_code, false,"print generated code") DEFINE_bool(print_opt_code, false,"print optimized code") DEFINE_bool(print_unopt_code, false,"print unoptimized code before ""printing optimized code based on it") DEFINE_bool(print_code_verbose, false,"print more information for code") DEFINE_bool(print_builtin_code, false,"print generated code for builtins")#47"/Users/thlorenz/dev/dx/v8-perf/build/v8/src/flags.cc"2 namespace{struct Flag{enum FlagType{TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS} name
static HeapObject * cast(Object *obj)
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
void TakeControl(VirtualMemory *from)
void ResetAllocationInfo()
void SetNewSpacePageFlags(NewSpacePage *chunk)
intptr_t SizeOfFirstPage()
static const intptr_t kPageAlignmentMask
static FreeSpace * cast(Object *obj)
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
static void Clear(MemoryChunk *chunk)
static Failure * Exception()
PromotionQueue * promotion_queue()
intptr_t inline_allocation_limit_step()
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
const char * AllocationSpaceName(AllocationSpace space)
#define ASSERT(condition)
void set_reserved_memory(VirtualMemory *reservation)
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static void IncrementLiveBytesFromGC(Address address, int by)
#define ASSERT_GE(v1, v2)
void Step(intptr_t allocated, CompletionAction action)
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
static const int kFlagsOffset
const char * comment() const
void RecordAllocation(HeapObject *obj)
NewSpacePage * current_page()
#define INCREMENT(type, size, name, camel_name)
MemoryAllocator(Isolate *isolate)
#define STRING_TYPE_LIST(V)
void FreeUnmarkedObjects()
static const int kPageSize
CodeRange(Isolate *isolate)
bool IsSweepingComplete()
static Code * cast(Object *obj)
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
const intptr_t kHeapObjectTagMask
void FreeMemory(VirtualMemory *reservation, Executability executable)
static bool IsAtEnd(Address addr)
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
static MarkBit MarkBitFrom(Address addr)
AllocationStats accounting_stats_
void Free(MemoryChunk *chunk)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Executability executable()
NewSpacePage * first_page()
bool was_swept_conservatively_
SlotsBuffer * slots_buffer_
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
bool Guard(void *address)
int(* HeapObjectCallback)(HeapObject *obj)
LargePage * FindPage(Address a)
FreeListNode ** next_address()
bool AdvanceSweeper(intptr_t bytes_to_sweep)
#define OFFSET_OF(type, field)
static NewSpacePage * FromAddress(Address address_in_page)
friend class NewSpacePageIterator
bool UncommitBlock(Address start, size_t size)
static Failure * RetryAfterGC()
static int CodePageAreaStartOffset()
void EvictEvacuationCandidatesFromFreeLists()
virtual int RoundSizeDownToObjectAlignment(int size)
MemoryAllocator * memory_allocator()
static Address & Address_at(Address addr)
void QueueMemoryChunkForFree(MemoryChunk *chunk)
static int CodePageGuardSize()
bool IsAligned(T value, U alignment)
void InitializeReservedMemory()
void decrement_scan_on_scavenge_pages()
virtual bool ReserveSpace(int bytes)
bool Commit(void *address, size_t size, bool is_executable)
void DecreaseUnsweptFreeBytes(Page *p)
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
void initialize_scan_on_scavenge(bool scan)
T RoundUp(T x, intptr_t m)
#define ASSERT_LE(v1, v2)
void RecordPromotion(HeapObject *obj)
bool contains(Address address)
void set_prev_page(Page *page)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
static int CodePageAreaEndOffset()
int Free(Address start, int size_in_bytes)
#define CHECK_NE(unexpected, value)
void ReleasePage(Page *page)
MemoryChunk * prev_chunk_
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void set_map_no_write_barrier(Map *value)
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
MaybeObject * FindObject(Address a)
static const int kObjectStartOffset
void set_prev_page(NewSpacePage *page)
bool Contains(HeapObject *obj)
static void Swap(SemiSpace *from, SemiSpace *to)
void InitializeAsAnchor(PagedSpace *owner)
int InitialSemiSpaceSize()
static bool ReleaseRegion(void *base, size_t size)
SemiSpaceIterator(NewSpace *space)
static const intptr_t kLiveBytesOffset
static bool CommitRegion(void *base, size_t size, bool is_executable)
void set_next_page(NewSpacePage *page)
void Sort(int(*cmp)(const T *x, const T *y))
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
static NewSpacePage * FromLimit(Address address_limit)
IncrementalMarking * incremental_marking()
void IncrementLiveBytes(int by)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
virtual bool ReserveSpace(int bytes)
static FreeListNode * FromAddress(Address address)
SemiSpace(Heap *heap, SemiSpaceId semispace)
NewSpacePage * next_page() const
static bool UncommitRegion(void *base, size_t size)
static const intptr_t kAlignment
void set_owner(Space *space)
void * Remove(void *key, uint32_t hash)
void RememberUnmappedPage(Address page, bool compacted)
static void IncrementLiveBytesFromMutator(Address address, int by)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
LargePage * next_page() const
static const intptr_t kCopyOnFlipFlagsMask
#define ASSERT_EQ(v1, v2)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
void set_prev_chunk(MemoryChunk *prev)
InstanceType instance_type()
friend class LargeObjectIterator
bool IsEvacuationCandidate()
static HeapObject * FromAddress(Address address)
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
T RoundDown(T x, intptr_t m)
static FixedArray * cast(Object *obj)
void InsertAfter(MemoryChunk *other)
void set_next_page(LargePage *page)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
static size_t AllocateAlignment()
virtual void PrepareForMarkCompact()
void set_age_mark(Address mark)
static int CodePageGuardStartOffset()
NewSpacePage * prev_page() const
void CreateFillerObjectAt(Address addr, int size)
intptr_t unswept_free_bytes_
bool CommitBlock(Address start, size_t size, Executability executable)
AllocationInfo allocation_info_
static const intptr_t kAllocatedThreshold
void FreeRawMemory(Address buf, size_t length)
Executability executable()
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
static bool IsFreeListNode(HeapObject *object)
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
void DeleteArray(T *array)
static bool IsAtStart(Address addr)
void OldSpaceStep(intptr_t allocated)
bool ShrinkTo(int new_capacity)
HeapObjectIterator(PagedSpace *space)
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
intptr_t OldGenerationSpaceAvailable()
static intptr_t CommitPageSize()
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void SetFlags(intptr_t flags, intptr_t mask)
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
void set_next_chunk(MemoryChunk *next)
static JSObject * cast(Object *obj)
VirtualMemory * reserved_memory()
SlotsBuffer * slots_buffer()
OldSpace * old_data_space()
static void AssertValidRange(Address from, Address to)
MarkCompactCollector * mark_compact_collector()
void AddAll(const List< T, AllocationPolicy > &other, AllocationPolicy allocator=AllocationPolicy())
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
AllocationSpace identity()
bool Uncommit(void *address, size_t size)
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)