73 owner ==
HEAP->old_data_space() ||
74 owner ==
HEAP->map_space() ||
75 owner ==
HEAP->cell_space() ||
76 owner ==
HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
86 void HeapObjectIterator::Initialize(
PagedSpace* space,
88 HeapObjectIterator::PageMode mode,
103 bool HeapObjectIterator::AdvanceToNextPage() {
104 ASSERT(cur_addr_ == cur_end_);
105 if (page_mode_ == kOnePageOnly)
return false;
107 if (cur_addr_ ==
NULL) {
108 cur_page = space_->
anchor();
111 ASSERT(cur_addr_ == cur_page->area_end());
113 cur_page = cur_page->next_page();
114 if (cur_page == space_->
anchor())
return false;
115 cur_addr_ = cur_page->area_start();
116 cur_end_ = cur_page->area_end();
117 ASSERT(cur_page->WasSweptPrecisely());
131 current_allocation_block_index_(0) {
148 LOG(isolate_, NewEvent(
"CodeRange", code_range_->
address(), requested));
153 size_t size = code_range_->
size() - (aligned_base - base);
154 allocation_list_.
Add(FreeBlock(aligned_base, size));
155 current_allocation_block_index_ = 0;
160 int CodeRange::CompareFreeBlockAddress(
const FreeBlock* left,
161 const FreeBlock* right) {
165 return static_cast<int>(left->start - right->start);
169 void CodeRange::GetNextAllocationBlock(
size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
179 free_list_.
AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.
Sort(&CompareFreeBlockAddress);
182 for (
int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
191 if (merged.size > 0) {
192 allocation_list_.
Add(merged);
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) {
217 GetNextAllocationBlock(requested);
221 FreeBlock current = allocation_list_[current_allocation_block_index_];
224 *allocated = current.size;
226 *allocated = aligned_requested;
228 ASSERT(*allocated <= current.size);
236 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) {
239 GetNextAllocationBlock(0);
241 return current.start;
247 free_list_.
Add(FreeBlock(address, length));
248 code_range_->
Uncommit(address, length);
256 allocation_list_.
Free();
267 capacity_executable_(0),
269 size_executable_(0) {
276 ASSERT_GE(capacity_, capacity_executable_);
279 size_executable_ = 0;
291 capacity_executable_ = 0;
299 size_t size = reservation->
size();
303 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
306 ASSERT(size_executable_ >= size);
307 size_executable_ -= size;
311 static_cast<Address>(reservation->
address())));
324 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
327 ASSERT(size_executable_ >= size);
328 size_executable_ -= size;
348 size_ += reservation.
size();
369 if (!reservation.
Commit(base, size,
false)) {
420 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
506 if (size_executable_ + chunk_size > capacity_executable_) {
508 StringEvent(
"MemoryAllocator::AllocateRawMemory",
509 "V8 Executable Allocation capacity exceeded"));
522 size_executable_ += chunk_size;
530 size_executable_ += reservation.
size();
539 area_end = area_start + body_size;
554 area_end = base + chunk_size;
557 isolate_->
counters()->memory_allocated()->
558 Increment(static_cast<int>(chunk_size));
560 LOG(isolate_, NewEvent(
"MemoryChunk", base, chunk_size));
594 return LargePage::Initialize(isolate_->
heap(), chunk);
599 LOG(isolate_, DeleteEvent(
"MemoryChunk", chunk));
632 isolate_->
counters()->memory_allocated()->Increment(static_cast<int>(size));
639 isolate_->
counters()->memory_allocated()->Decrement(static_cast<int>(size));
654 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
655 MemoryAllocationCallbackRegistration registration =
656 memory_allocation_callbacks_[i];
657 if ((registration.space & space) == space &&
658 (registration.action & action) == action)
659 registration.callback(space, action, static_cast<int>(size));
666 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
667 if (memory_allocation_callbacks_[i].callback == callback)
return true;
678 MemoryAllocationCallbackRegistration registration(callback, space, action);
680 return memory_allocation_callbacks_.
Add(registration);
687 for (
int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
688 if (memory_allocation_callbacks_[i].callback == callback) {
689 memory_allocation_callbacks_.
Remove(i);
698 void MemoryAllocator::ReportStatistics() {
699 float pct =
static_cast<float>(capacity_ - size_) / capacity_;
702 ", available: %%%d\n\n",
703 capacity_, size_, static_cast<int>(pct*100));
771 if (!chunk->
InNewSpace() && !
static_cast<Page*
>(chunk)->WasSwept()) {
772 static_cast<PagedSpace*
>(chunk->
owner())->IncrementUnsweptFreeBytes(-by);
781 intptr_t max_capacity,
784 :
Space(heap, id, executable),
786 was_swept_conservatively_(
false),
787 first_unswept_page_(
Page::FromAddress(
NULL)),
788 unswept_free_bytes_(0) {
818 while (iterator.has_next()) {
829 ASSERT(!
heap()->mark_compact_collector()->in_use());
837 Address next = cur + obj->Size();
838 if ((cur <= addr) && (addr < next))
return obj;
869 if (p ==
NULL)
return false;
914 while (it.has_next()) {
935 intptr_t size =
free_list_.EvictFreeListItems(page);
960 while (it.has_next()) {
961 Page* page = it.next();
966 if (obj->IsFreeSpace() &&
976 FreeList::SizeStats sizes;
993 void PagedSpace::Verify(ObjectVisitor* visitor) {
997 bool allocation_pointer_found_in_space =
1000 while (page_iterator.has_next()) {
1001 Page* page = page_iterator.next();
1002 CHECK(page->owner() ==
this);
1004 allocation_pointer_found_in_space =
true;
1006 CHECK(page->WasSweptPrecisely());
1007 HeapObjectIterator it(page,
NULL);
1008 Address end_of_previous_object = page->area_start();
1011 for (HeapObject*
object = it.Next();
object !=
NULL;
object = it.Next()) {
1012 CHECK(end_of_previous_object <= object->address());
1016 Map* map =
object->map();
1017 CHECK(map->IsMap());
1021 VerifyObject(
object);
1027 int size =
object->Size();
1028 object->IterateBody(map->instance_type(), size, visitor);
1033 CHECK(object->address() + size <=
top);
1034 end_of_previous_object =
object->address() + size;
1036 CHECK_LE(black_size, page->LiveBytes());
1038 CHECK(allocation_pointer_found_in_space);
1040 #endif // VERIFY_HEAP
1047 int maximum_semispace_capacity) {
1054 size_t size = 2 * reserved_semispace_capacity;
1057 size, size, &reservation_);
1058 if (base ==
NULL)
return false;
1061 chunk_size_ =
static_cast<uintptr_t
>(size);
1062 LOG(
heap()->isolate(), NewEvent(
"InitialChunk", chunk_base_, chunk_size_));
1064 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1068 allocated_histogram_ = NewArray<HistogramInfo>(
LAST_TYPE + 1);
1069 promoted_histogram_ = NewArray<HistogramInfo>(
LAST_TYPE + 1);
1071 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1072 promoted_histogram_[name].set_name(#name);
1076 ASSERT(reserved_semispace_capacity ==
heap()->ReservedSemiSpaceSize());
1077 ASSERT(static_cast<intptr_t>(chunk_size_) >=
1078 2 *
heap()->ReservedSemiSpaceSize());
1081 to_space_.
SetUp(chunk_base_,
1082 initial_semispace_capacity,
1083 maximum_semispace_capacity);
1084 from_space_.
SetUp(chunk_base_ + reserved_semispace_capacity,
1085 initial_semispace_capacity,
1086 maximum_semispace_capacity);
1087 if (!to_space_.
Commit()) {
1092 start_ = chunk_base_;
1093 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1095 object_expected_ =
reinterpret_cast<uintptr_t
>(start_) |
kHeapObjectTag;
1104 if (allocated_histogram_) {
1106 allocated_histogram_ =
NULL;
1108 if (promoted_histogram_) {
1110 promoted_histogram_ =
NULL;
1120 LOG(
heap()->isolate(), DeleteEvent(
"InitialChunk", chunk_base_));
1139 if (to_space_.
GrowTo(new_capacity)) {
1141 if (!from_space_.
GrowTo(new_capacity)) {
1157 int rounded_new_capacity =
RoundUp(new_capacity, Page::kPageSize);
1158 if (rounded_new_capacity <
Capacity() &&
1159 to_space_.
ShrinkTo(rounded_new_capacity)) {
1161 from_space_.
Reset();
1162 if (!from_space_.
ShrinkTo(rounded_new_capacity)) {
1177 void NewSpace::UpdateAllocationInfo() {
1182 if (
heap()->incremental_marking()->IsMarking() &&
1186 allocation_info_.
limit =
Min(new_limit, allocation_info_.
limit);
1194 UpdateAllocationInfo();
1197 NewSpacePageIterator it(&to_space_);
1198 while (it.has_next()) {
1227 int remaining_in_page =
static_cast<int>(limit -
top);
1230 UpdateAllocationInfo();
1236 MaybeObject* NewSpace::SlowAllocateRaw(
int size_in_bytes) {
1238 Address new_top = old_top + size_in_bytes;
1240 if (allocation_info_.
limit < high) {
1244 allocation_info_.
limit + inline_allocation_limit_step_,
1246 int bytes_allocated =
static_cast<int>(new_top - top_on_previous_step_);
1249 top_on_previous_step_ = new_top;
1250 return AllocateRaw(size_in_bytes);
1253 int bytes_allocated =
static_cast<int>(old_top - top_on_previous_step_);
1256 top_on_previous_step_ = to_space_.
page_low();
1257 return AllocateRaw(size_in_bytes);
1267 void NewSpace::Verify() {
1276 while (current !=
top()) {
1286 Map* map =
object->map();
1287 CHECK(map->IsMap());
1291 CHECK(!object->IsMap());
1292 CHECK(!object->IsCode());
1298 VerifyPointersVisitor visitor;
1299 int size =
object->Size();
1300 object->IterateBody(map->instance_type(), size, &visitor);
1307 CHECK(!page->is_anchor());
1308 current = page->area_start();
1315 from_space_.Verify();
1324 int initial_capacity,
1325 int maximum_capacity) {
1332 ASSERT(maximum_capacity >= Page::kPageSize);
1333 initial_capacity_ =
RoundDown(initial_capacity, Page::kPageSize);
1334 capacity_ = initial_capacity;
1335 maximum_capacity_ =
RoundDown(maximum_capacity, Page::kPageSize);
1338 address_mask_ = ~(maximum_capacity - 1);
1340 object_expected_ =
reinterpret_cast<uintptr_t
>(start) |
kHeapObjectTag;
1354 Address end = start_ + maximum_capacity_;
1356 if (!
heap()->isolate()->memory_allocator()->CommitBlock(start,
1363 for (
int i = 1; i <= pages; i++) {
1365 NewSpacePage::Initialize(
heap(), end - i * Page::kPageSize,
this);
1378 Address start = start_ + maximum_capacity_ - capacity_;
1379 if (!
heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1392 if (!
Commit())
return false;
1395 ASSERT(new_capacity <= maximum_capacity_);
1396 ASSERT(new_capacity > capacity_);
1400 Address end = start_ + maximum_capacity_;
1401 Address start = end - new_capacity;
1402 size_t delta = new_capacity - capacity_;
1405 if (!
heap()->isolate()->memory_allocator()->CommitBlock(
1409 capacity_ = new_capacity;
1411 ASSERT(last_page != anchor());
1412 for (
int i = pages_before + 1; i <= pages_after; i++) {
1422 last_page = new_page;
1430 ASSERT(new_capacity >= initial_capacity_);
1431 ASSERT(new_capacity < capacity_);
1437 Address old_start = space_end - capacity_;
1438 size_t delta = capacity_ - new_capacity;
1451 ASSERT((current_page_ <=
first_page()) && (current_page_ >= new_last_page));
1454 capacity_ = new_capacity;
1460 void SemiSpace::FlipPages(intptr_t
flags, intptr_t mask) {
1470 while (page != &anchor_) {
1473 if (becomes_to_space) {
1474 page->
ClearFlag(MemoryChunk::IN_FROM_SPACE);
1475 page->
SetFlag(MemoryChunk::IN_TO_SPACE);
1479 page->
SetFlag(MemoryChunk::IN_FROM_SPACE);
1480 page->
ClearFlag(MemoryChunk::IN_TO_SPACE);
1484 page->
IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1513 from->FlipPages(0, 0);
1522 while (it.has_next()) {
1533 void SemiSpace::Verify() {
1535 NewSpacePage* page = anchor_.
next_page();
1537 while (page != &anchor_) {
1538 CHECK(page->semi_space() ==
this);
1539 CHECK(page->InNewSpace());
1540 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1541 : MemoryChunk::IN_TO_SPACE));
1542 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1543 : MemoryChunk::IN_FROM_SPACE));
1545 if (!is_from_space) {
1548 if (page->heap()->incremental_marking()->IsMarking()) {
1551 CHECK(!page->IsFlagSet(
1558 CHECK(page->prev_page()->next_page() == page);
1559 page = page->next_page();
1570 CHECK_EQ(space, end_page->semi_space());
1574 if (page == end_page) {
1575 CHECK(start <= end);
1577 while (page != end_page) {
1578 page = page->next_page();
1595 Initialize(space->
bottom(), space->
top(), size_func);
1600 Initialize(start, space->
top(),
NULL);
1605 Initialize(from, to,
NULL);
1609 void SemiSpaceIterator::Initialize(
Address start,
1615 size_func_ = size_func;
1621 static void ClearHistograms() {
1622 Isolate* isolate = Isolate::Current();
1624 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1626 #undef DEF_TYPE_NAME
1628 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1630 #undef CLEAR_HISTOGRAM
1632 isolate->js_spill_information()->Clear();
1636 static void ClearCodeKindStatistics() {
1637 Isolate* isolate = Isolate::Current();
1639 isolate->code_kind_statistics()[i] = 0;
1644 static void ReportCodeKindStatistics() {
1645 Isolate* isolate = Isolate::Current();
1648 #define CASE(name) \
1649 case Code::name: table[Code::name] = #name; \
1653 switch (static_cast<Code::Kind>(i)) {
1655 CASE(OPTIMIZED_FUNCTION);
1659 CASE(KEYED_LOAD_IC);
1661 CASE(KEYED_STORE_IC);
1663 CASE(KEYED_CALL_IC);
1667 CASE(TO_BOOLEAN_IC);
1673 PrintF(
"\n Code kind histograms: \n");
1675 if (isolate->code_kind_statistics()[i] > 0) {
1676 PrintF(
" %-20s: %10d bytes\n", table[i],
1677 isolate->code_kind_statistics()[i]);
1684 static int CollectHistogramInfo(HeapObject* obj) {
1685 Isolate* isolate = Isolate::Current();
1688 ASSERT(isolate->heap_histograms()[type].name() !=
NULL);
1689 isolate->heap_histograms()[type].increment_number(1);
1690 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1692 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1694 isolate->js_spill_information());
1701 static void ReportHistogram(
bool print_spill) {
1702 Isolate* isolate = Isolate::Current();
1703 PrintF(
"\n Object Histogram:\n");
1705 if (isolate->heap_histograms()[i].number() > 0) {
1706 PrintF(
" %-34s%10d (%10d bytes)\n",
1707 isolate->heap_histograms()[i].name(),
1708 isolate->heap_histograms()[i].number(),
1709 isolate->heap_histograms()[i].bytes());
1715 int string_number = 0;
1716 int string_bytes = 0;
1717 #define INCREMENT(type, size, name, camel_name) \
1718 string_number += isolate->heap_histograms()[type].number(); \
1719 string_bytes += isolate->heap_histograms()[type].bytes();
1722 if (string_number > 0) {
1723 PrintF(
" %-34s%10d (%10d bytes)\n\n",
"STRING_TYPE", string_number,
1727 if (FLAG_collect_heap_spill_statistics && print_spill) {
1728 isolate->js_spill_information()->Print();
1737 allocated_histogram_[i].clear();
1738 promoted_histogram_[i].clear();
1753 static void DoReportStatistics(
Isolate* isolate,
1755 LOG(isolate, HeapSampleBeginEvent(
"NewSpace", description));
1757 int string_number = 0;
1758 int string_bytes = 0;
1759 #define INCREMENT(type, size, name, camel_name) \
1760 string_number += info[type].number(); \
1761 string_bytes += info[type].bytes();
1764 if (string_number > 0) {
1766 HeapSampleItemEvent(
"STRING_TYPE", string_number, string_bytes));
1771 if (info[i].number() > 0) {
1773 HeapSampleItemEvent(info[i].name(), info[i].number(),
1777 LOG(isolate, HeapSampleEndEvent(
"NewSpace", description));
1783 if (FLAG_heap_stats) {
1788 PrintF(
"\n Object Histogram:\n");
1790 if (allocated_histogram_[i].number() > 0) {
1791 PrintF(
" %-34s%10d (%10d bytes)\n",
1792 allocated_histogram_[i].name(),
1793 allocated_histogram_[i].number(),
1794 allocated_histogram_[i].bytes());
1803 DoReportStatistics(isolate, allocated_histogram_,
"allocated");
1804 DoReportStatistics(isolate, promoted_histogram_,
"promoted");
1812 allocated_histogram_[type].increment_number(1);
1813 allocated_histogram_[type].increment_bytes(obj->
Size());
1820 promoted_histogram_[type].increment_number(1);
1821 promoted_histogram_[type].increment_bytes(obj->
Size());
1828 ASSERT(size_in_bytes > 0);
1842 this_as_free_space->
set_size(size_in_bytes);
1857 if (
map() ==
HEAP->raw_unchecked_free_space_map()) {
1870 if (
map() ==
HEAP->raw_unchecked_free_space_map()) {
1884 if (
map() ==
HEAP->raw_unchecked_free_space_map()) {
1896 : owner_(owner), heap_(owner->heap()) {
1901 void FreeList::Reset() {
1904 medium_list_ =
NULL;
1910 int FreeList::Free(
Address start,
int size_in_bytes) {
1911 if (size_in_bytes == 0)
return 0;
1913 node->set_size(heap_, size_in_bytes);
1916 if (size_in_bytes < kSmallListMin)
return size_in_bytes;
1920 if (size_in_bytes <= kSmallListMax) {
1921 node->set_next(small_list_);
1923 }
else if (size_in_bytes <= kMediumListMax) {
1924 node->set_next(medium_list_);
1925 medium_list_ = node;
1926 }
else if (size_in_bytes <= kLargeListMax) {
1927 node->set_next(large_list_);
1930 node->set_next(huge_list_);
1933 available_ += size_in_bytes;
1934 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1939 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
int* node_size) {
1940 FreeListNode* node = *list;
1944 while (node !=
NULL &&
1946 available_ -= node->Size();
1947 node = node->next();
1951 *node_size = node->Size();
1952 *list = node->next();
1961 FreeListNode* FreeList::FindNodeFor(
int size_in_bytes,
int* node_size) {
1962 FreeListNode* node =
NULL;
1964 if (size_in_bytes <= kSmallAllocationMax) {
1965 node = PickNodeFromList(&small_list_, node_size);
1966 if (node !=
NULL)
return node;
1969 if (size_in_bytes <= kMediumAllocationMax) {
1970 node = PickNodeFromList(&medium_list_, node_size);
1971 if (node !=
NULL)
return node;
1974 if (size_in_bytes <= kLargeAllocationMax) {
1975 node = PickNodeFromList(&large_list_, node_size);
1976 if (node !=
NULL)
return node;
1979 for (FreeListNode** cur = &huge_list_;
1981 cur = (*cur)->next_address()) {
1982 FreeListNode* cur_node = *cur;
1983 while (cur_node !=
NULL &&
1985 available_ -=
reinterpret_cast<FreeSpace*
>(cur_node)->Size();
1986 cur_node = cur_node->next();
1990 if (cur_node ==
NULL)
break;
1992 ASSERT((*cur)->map() ==
HEAP->raw_unchecked_free_space_map());
1993 FreeSpace* cur_as_free_space =
reinterpret_cast<FreeSpace*
>(*cur);
1994 int size = cur_as_free_space->Size();
1995 if (size >= size_in_bytes) {
1999 *cur = node->next();
2012 HeapObject* FreeList::Allocate(
int size_in_bytes) {
2013 ASSERT(0 < size_in_bytes);
2014 ASSERT(size_in_bytes <= kMaxBlockSize);
2017 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2019 int new_node_size = 0;
2020 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2023 available_ -= new_node_size;
2024 ASSERT(IsVeryLong() || available_ == SumFreeLists());
2026 int bytes_left = new_node_size - size_in_bytes;
2029 int old_linear_size =
static_cast<int>(owner_->limit() - owner_->top());
2033 owner_->Free(owner_->top(), old_linear_size);
2035 owner_->heap()->incremental_marking()->OldSpaceStep(
2036 size_in_bytes - old_linear_size);
2039 for (
int i = 0; i < size_in_bytes /
kPointerSize; i++) {
2040 reinterpret_cast<Object**
>(new_node->address())[i] =
2048 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2054 owner_->Allocate(new_node_size);
2056 if (bytes_left > kThreshold &&
2057 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2058 FLAG_incremental_marking_steps) {
2059 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2063 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2064 new_node_size - size_in_bytes - linear_size);
2065 owner_->SetTop(new_node->address() + size_in_bytes,
2066 new_node->address() + size_in_bytes + linear_size);
2067 }
else if (bytes_left > 0) {
2070 owner_->SetTop(new_node->address() + size_in_bytes,
2071 new_node->address() + new_node_size);
2082 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
2086 FreeSpace* free_space =
reinterpret_cast<FreeSpace*
>(n);
2087 sum += free_space->Size();
2095 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
2096 sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
2097 if (sizes->huge_size_ < p->area_size()) {
2098 sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
2099 sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
2100 sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
2102 sizes->small_size_ = 0;
2103 sizes->medium_size_ = 0;
2104 sizes->large_size_ = 0;
2109 static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
2111 while (*n !=
NULL) {
2113 FreeSpace* free_space =
reinterpret_cast<FreeSpace*
>(*n);
2114 sum += free_space->Size();
2117 n = (*n)->next_address();
2124 intptr_t FreeList::EvictFreeListItems(Page* p) {
2125 intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
2127 if (sum < p->area_size()) {
2128 sum += EvictFreeListItemsInList(&small_list_, p) +
2129 EvictFreeListItemsInList(&medium_list_, p) +
2130 EvictFreeListItemsInList(&large_list_, p);
2133 available_ -=
static_cast<int>(sum);
2140 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
2142 while (cur !=
NULL) {
2143 ASSERT(cur->map() ==
HEAP->raw_unchecked_free_space_map());
2144 FreeSpace* cur_as_free_space =
reinterpret_cast<FreeSpace*
>(cur);
2145 sum += cur_as_free_space->Size();
2152 static const int kVeryLongFreeList = 500;
2155 int FreeList::FreeListLength(FreeListNode* cur) {
2157 while (cur !=
NULL) {
2160 if (length == kVeryLongFreeList)
return length;
2166 bool FreeList::IsVeryLong() {
2167 if (FreeListLength(small_list_) == kVeryLongFreeList)
return true;
2168 if (FreeListLength(medium_list_) == kVeryLongFreeList)
return true;
2169 if (FreeListLength(large_list_) == kVeryLongFreeList)
return true;
2170 if (FreeListLength(huge_list_) == kVeryLongFreeList)
return true;
2178 intptr_t FreeList::SumFreeLists() {
2179 intptr_t sum = SumFreeList(small_list_);
2180 sum += SumFreeList(medium_list_);
2181 sum += SumFreeList(large_list_);
2182 sum += SumFreeList(huge_list_);
2199 MaybeObject* maybe = AllocateRaw(bytes);
2201 if (!maybe->ToObject(&
object))
return false;
2204 if ((top - bytes) == allocation->
address()) {
2205 allocation_info_.
top = allocation->
address();
2219 int old_linear_size =
static_cast<int>(
limit() -
top());
2232 if (FLAG_gc_verbose) {
2234 reinterpret_cast<intptr_t>(p));
2252 Address new_top = current_top + size_in_bytes;
2257 if (new_area ==
NULL)
return false;
2259 int old_linear_size =
static_cast<int>(
limit() -
top());
2273 if (*map_location ==
NULL) {
2274 *map_location = heap->free_space_map();
2276 ASSERT(*map_location == heap->free_space_map());
2283 void FreeList::RepairLists(Heap* heap) {
2284 RepairFreeList(heap, small_list_);
2285 RepairFreeList(heap, medium_list_);
2286 RepairFreeList(heap, large_list_);
2287 RepairFreeList(heap, huge_list_);
2312 intptr_t freed_bytes = 0;
2317 if (FLAG_gc_verbose) {
2319 reinterpret_cast<intptr_t>(p));
2325 }
while (p !=
anchor() && freed_bytes < bytes_to_sweep);
2364 if (
object !=
NULL)
return object;
2370 if (!
heap()->always_allocate() &&
2371 heap()->OldGenerationAllocationLimitReached()) {
2387 if (
object !=
NULL)
return object;
2396 void PagedSpace::ReportCodeStatistics() {
2397 Isolate* isolate = Isolate::Current();
2398 CommentStatistic* comments_statistics =
2399 isolate->paged_space_comments_statistics();
2400 ReportCodeKindStatistics();
2401 PrintF(
"Code comment statistics (\" [ comment-txt : size/ "
2402 "count (average)\"):\n");
2403 for (
int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2404 const CommentStatistic&
cs = comments_statistics[i];
2406 PrintF(
" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2414 void PagedSpace::ResetCodeStatistics() {
2415 Isolate* isolate = Isolate::Current();
2416 CommentStatistic* comments_statistics =
2417 isolate->paged_space_comments_statistics();
2418 ClearCodeKindStatistics();
2419 for (
int i = 0; i < CommentStatistic::kMaxComments; i++) {
2420 comments_statistics[i].Clear();
2422 comments_statistics[CommentStatistic::kMaxComments].comment =
"Unknown";
2423 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2424 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2430 static void EnterComment(Isolate* isolate,
const char* comment,
int delta) {
2431 CommentStatistic* comments_statistics =
2432 isolate->paged_space_comments_statistics();
2434 if (delta <= 0)
return;
2435 CommentStatistic*
cs = &comments_statistics[CommentStatistic::kMaxComments];
2438 for (
int i = 0; i < CommentStatistic::kMaxComments; i++) {
2439 if (comments_statistics[i].comment ==
NULL) {
2440 cs = &comments_statistics[i];
2441 cs->comment = comment;
2443 }
else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2444 cs = &comments_statistics[i];
2456 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2458 ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2459 const char* tmp =
reinterpret_cast<const char*
>(it->rinfo()->data());
2460 if (tmp[0] !=
'[') {
2466 const char*
const comment_txt =
2467 reinterpret_cast<const char*
>(it->rinfo()->data());
2468 const byte* prev_pc = it->rinfo()->pc();
2475 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2476 const char*
const txt =
2477 reinterpret_cast<const char*
>(it->rinfo()->data());
2478 flat_delta +=
static_cast<int>(it->rinfo()->pc() - prev_pc);
2479 if (txt[0] ==
']')
break;
2481 CollectCommentStatistics(isolate, it);
2483 prev_pc = it->rinfo()->pc();
2487 EnterComment(isolate, comment_txt, flat_delta);
2494 void PagedSpace::CollectCodeStatistics() {
2496 HeapObjectIterator obj_it(
this);
2497 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
2498 if (obj->IsCode()) {
2500 isolate->code_kind_statistics()[code->kind()] += code->Size();
2501 RelocIterator it(code);
2503 const byte* prev_pc = code->instruction_start();
2504 while (!it.done()) {
2505 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2506 delta +=
static_cast<int>(it.rinfo()->pc() - prev_pc);
2507 CollectCommentStatistics(isolate, &it);
2508 prev_pc = it.rinfo()->pc();
2513 ASSERT(code->instruction_start() <= prev_pc &&
2514 prev_pc <= code->instruction_end());
2515 delta +=
static_cast<int>(code->instruction_end() - prev_pc);
2516 EnterComment(isolate,
"NoComment", delta);
2522 void PagedSpace::ReportStatistics() {
2531 HeapObjectIterator obj_it(
this);
2532 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next())
2533 CollectHistogramInfo(obj);
2534 ReportHistogram(
true);
2564 CHECK(object->IsMap() ||
object->IsFreeSpace());
2576 CHECK(object->IsJSGlobalPropertyCell() ||
2577 object->
map() ==
heap()->two_pointer_filler_map());
2585 current_ = space->first_page_;
2592 current_ = space->first_page_;
2593 size_func_ = size_func;
2608 static bool ComparePointers(
void* key1,
void* key2) {
2609 return key1 == key2;
2614 intptr_t max_capacity,
2617 max_capacity_(max_capacity),
2622 chunk_map_(ComparePointers, 1024) {}
2636 while (first_page_ !=
NULL) {
2639 LOG(
heap()->isolate(), DeleteEvent(
"LargeObjectChunk", page->
address()));
2654 if (!
heap()->always_allocate() &&
2655 heap()->OldGenerationAllocationLimitReached()) {
2659 if (
Size() + object_size > max_capacity_) {
2664 AllocateLargePage(object_size,
this, executable);
2668 size_ +=
static_cast<int>(page->
size());
2669 objects_size_ += object_size;
2678 for (uintptr_t key = base; key <= limit; key++) {
2679 HashMap::Entry* entry = chunk_map_.
Lookup(reinterpret_cast<void*>(key),
2680 static_cast<uint32_t>(key),
2683 entry->value = page;
2691 reinterpret_cast<Object**
>(
object->address())[0] =
2692 heap()->fixed_array_map();
2713 HashMap::Entry* e = chunk_map_.
Lookup(reinterpret_cast<void*>(key),
2714 static_cast<uint32_t>(key),
2731 while (current !=
NULL) {
2735 bool is_pointer_object =
object->IsFixedArray();
2737 if (mark_bit.
Get()) {
2746 if (previous ==
NULL) {
2747 first_page_ = current;
2754 object,
heap()->isolate());
2755 size_ -=
static_cast<int>(page->
size());
2756 objects_size_ -=
object->Size();
2763 uintptr_t base =
reinterpret_cast<uintptr_t
>(page)/alignment;
2764 uintptr_t limit = base + (page->
size()-1)/alignment;
2765 for (uintptr_t key = base; key <= limit; key++) {
2766 chunk_map_.
Remove(reinterpret_cast<void*>(key),
2767 static_cast<uint32_t>(key));
2770 if (is_pointer_object) {
2782 Address address =
object->address();
2785 bool owned = (chunk->
owner() ==
this);
2796 void LargeObjectSpace::Verify() {
2799 chunk = chunk->next_page()) {
2808 Map* map =
object->
map();
2809 CHECK(map->IsMap());
2815 CHECK(object->IsCode() ||
object->IsSeqString() ||
2816 object->IsExternalString() ||
object->IsFixedArray() ||
2817 object->IsFixedDoubleArray() ||
object->IsByteArray());
2823 if (object->IsCode()) {
2825 object->IterateBody(map->instance_type(),
2828 }
else if (object->IsFixedArray()) {
2830 for (
int j = 0; j < array->length(); j++) {
2831 Object* element = array->get(j);
2832 if (element->IsHeapObject()) {
2835 CHECK(element_object->map()->IsMap());
2847 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
2853 void LargeObjectSpace::ReportStatistics() {
2854 PrintF(
" size: %" V8_PTR_PREFIX
"d\n", size_);
2855 int num_objects = 0;
2858 for (HeapObject* obj = it.Next(); obj !=
NULL; obj = it.Next()) {
2860 CollectHistogramInfo(obj);
2863 PrintF(
" number of objects %d, "
2864 "size of objects %" V8_PTR_PREFIX
"d\n", num_objects, objects_size_);
2865 if (num_objects > 0) ReportHistogram(
false);
2869 void LargeObjectSpace::CollectCodeStatistics() {
2872 for (HeapObject* obj = obj_it.Next(); obj !=
NULL; obj = obj_it.Next()) {
2873 if (obj->IsCode()) {
2875 isolate->code_kind_statistics()[code->kind()] += code->Size();
2883 PrintF(
"Page@%p in %s\n",
2886 printf(
" --------------------------------------\n");
2887 HeapObjectIterator objects(
this,
heap()->GcSafeSizeOfOldObjectFunction());
2888 unsigned mark_size = 0;
2889 for (HeapObject*
object = objects.Next();
2891 object = objects.Next()) {
2893 PrintF(
" %c ", (is_marked ?
'!' :
' '));
2897 object->ShortPrint();
2900 printf(
" --------------------------------------\n");
2901 printf(
" Marked: %x, LiveCount: %x\n", mark_size,
LiveBytes());
static bool IsBlack(MarkBit mark_bit)
virtual bool ReserveSpace(int bytes)
static const int kHeaderSize
#define SLOW_ASSERT(condition)
void(* MemoryAllocationCallback)(ObjectSpace space, AllocationAction action, int size)
#define CHECK_EQ(expected, value)
void set_next_page(Page *page)
void ZapBlock(Address start, size_t size)
bool GrowTo(int new_capacity)
virtual void RepairFreeListsAfterBoot()
intptr_t OldGenerationCapacityAvailable()
bool IsAddressAligned(Address addr, intptr_t alignment, int offset=0)
#define INSTANCE_TYPE_LIST(V)
static MemoryChunk * Initialize(Heap *heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space *owner)
void set_size(Heap *heap, int size_in_bytes)
bool Contains(Address addr)
friend class PageIterator
static void ReportDeleteIfNeeded(HeapObject *obj, Isolate *isolate)
void PrintF(const char *format,...)
void SetTop(Address top, Address limit)
bool was_swept_conservatively()
void set_next(FreeListNode *next)
bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback)
virtual void PrepareForMarkCompact()
bool SetUp(const size_t requested_size)
void ReleaseAllUnusedPages()
static Smi * FromInt(int value)
#define LOG(isolate, Call)
virtual void VerifyObject(HeapObject *obj)
Page * first_unswept_page_
virtual void VerifyObject(HeapObject *obj)
MemoryChunk * next_chunk_
static bool ShouldBeSweptLazily(Page *p)
static MemoryChunk * FromAddress(Address a)
LargeObjectIterator(LargeObjectSpace *space)
static HeapObject * cast(Object *obj)
bool SetUp(intptr_t max_capacity, intptr_t capacity_executable)
void TakeControl(VirtualMemory *from)
void ResetAllocationInfo()
void SetNewSpacePageFlags(NewSpacePage *chunk)
intptr_t SizeOfFirstPage()
static const intptr_t kPageAlignmentMask
static FreeSpace * cast(Object *obj)
HeapObjectCallback GcSafeSizeOfOldObjectFunction()
static void Clear(MemoryChunk *chunk)
static Failure * Exception()
PromotionQueue * promotion_queue()
intptr_t inline_allocation_limit_step()
Page * AllocatePage(intptr_t size, PagedSpace *owner, Executability executable)
const char * AllocationSpaceName(AllocationSpace space)
static const int kWriteBarrierCounterGranularity
#define ASSERT(condition)
void set_reserved_memory(VirtualMemory *reservation)
v8::Handle< v8::Value > Print(const v8::Arguments &args)
static void IncrementLiveBytesFromGC(Address address, int by)
#define ASSERT_GE(v1, v2)
void Step(intptr_t allocated, CompletionAction action)
LargePage * AllocateLargePage(intptr_t object_size, Space *owner, Executability executable)
static const int kFlagsOffset
void RecordAllocation(HeapObject *obj)
NewSpacePage * current_page()
#define INCREMENT(type, size, name, camel_name)
MemoryAllocator(Isolate *isolate)
#define STRING_TYPE_LIST(V)
void FreeUnmarkedObjects()
static const int kPageSize
CodeRange(Isolate *isolate)
bool IsSweepingComplete()
static Code * cast(Object *obj)
Address AllocateAlignedMemory(size_t requested, size_t alignment, Executability executable, VirtualMemory *controller)
const intptr_t kHeapObjectTagMask
void FreeMemory(VirtualMemory *reservation, Executability executable)
static bool IsAtEnd(Address addr)
LargeObjectSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id)
static MarkBit MarkBitFrom(Address addr)
AllocationStats accounting_stats_
void Free(MemoryChunk *chunk)
Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory *controller)
Executability executable()
NewSpacePage * first_page()
bool was_swept_conservatively_
SlotsBuffer * slots_buffer_
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback)
bool Guard(void *address)
int(* HeapObjectCallback)(HeapObject *obj)
LargePage * FindPage(Address a)
FreeListNode ** next_address()
bool AdvanceSweeper(intptr_t bytes_to_sweep)
#define OFFSET_OF(type, field)
static NewSpacePage * FromAddress(Address address_in_page)
friend class NewSpacePageIterator
bool UncommitBlock(Address start, size_t size)
static Failure * RetryAfterGC()
static int CodePageAreaStartOffset()
void EvictEvacuationCandidatesFromFreeLists()
virtual int RoundSizeDownToObjectAlignment(int size)
MemoryAllocator * memory_allocator()
static Address & Address_at(Address addr)
void QueueMemoryChunkForFree(MemoryChunk *chunk)
static int CodePageGuardSize()
bool IsAligned(T value, U alignment)
void InitializeReservedMemory()
void decrement_scan_on_scavenge_pages()
virtual bool ReserveSpace(int bytes)
bool Commit(void *address, size_t size, bool is_executable)
void DecreaseUnsweptFreeBytes(Page *p)
Entry * Lookup(void *key, uint32_t hash, bool insert, AllocationPolicy allocator=AllocationPolicy())
void initialize_scan_on_scavenge(bool scan)
T RoundUp(T x, intptr_t m)
void RecordPromotion(HeapObject *obj)
bool contains(Address address)
void set_prev_page(Page *page)
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, size_t *allocated)
static int CodePageAreaEndOffset()
int Free(Address start, int size_in_bytes)
#define CHECK_NE(unexpected, value)
void ReleasePage(Page *page)
MemoryChunk * prev_chunk_
activate correct semantics for inheriting readonliness false
static Page * Initialize(Heap *heap, MemoryChunk *chunk, Executability executable, PagedSpace *owner)
void set_map_no_write_barrier(Map *value)
static intptr_t SweepConservatively(PagedSpace *space, Page *p)
MaybeObject * FindObject(Address a)
static const int kObjectStartOffset
void set_prev_page(NewSpacePage *page)
bool Contains(HeapObject *obj)
static void Swap(SemiSpace *from, SemiSpace *to)
void InitializeAsAnchor(PagedSpace *owner)
int InitialSemiSpaceSize()
static bool ReleaseRegion(void *base, size_t size)
SemiSpaceIterator(NewSpace *space)
static const intptr_t kLiveBytesOffset
static bool CommitRegion(void *base, size_t size, bool is_executable)
void set_next_page(NewSpacePage *page)
void Sort(int(*cmp)(const T *x, const T *y))
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space)
MemoryChunk * AllocateChunk(intptr_t body_size, Executability executable, Space *space)
static NewSpacePage * FromLimit(Address address_limit)
IncrementalMarking * incremental_marking()
void IncrementLiveBytes(int by)
void SetUp(Address start, int initial_capacity, int maximum_capacity)
virtual bool ReserveSpace(int bytes)
static FreeListNode * FromAddress(Address address)
SemiSpace(Heap *heap, SemiSpaceId semispace)
NewSpacePage * next_page() const
static bool UncommitRegion(void *base, size_t size)
static const intptr_t kAlignment
void set_owner(Space *space)
void * Remove(void *key, uint32_t hash)
void RememberUnmappedPage(Address page, bool compacted)
static void IncrementLiveBytesFromMutator(Address address, int by)
void PerformAllocationCallback(ObjectSpace space, AllocationAction action, size_t size)
LargePage * next_page() const
static const intptr_t kCopyOnFlipFlagsMask
#define ASSERT_EQ(v1, v2)
void set_prev_chunk(MemoryChunk *prev)
InstanceType instance_type()
friend class LargeObjectIterator
bool IsEvacuationCandidate()
static bool ShouldZapGarbage()
static HeapObject * FromAddress(Address address)
PagedSpace(Heap *heap, intptr_t max_capacity, AllocationSpace id, Executability executable)
T RoundDown(T x, intptr_t m)
static FixedArray * cast(Object *obj)
void InsertAfter(MemoryChunk *other)
void set_next_page(LargePage *page)
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
static size_t AllocateAlignment()
virtual void PrepareForMarkCompact()
intptr_t write_barrier_counter_
void set_age_mark(Address mark)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if NULL
static int CodePageGuardStartOffset()
NewSpacePage * prev_page() const
void CreateFillerObjectAt(Address addr, int size)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
intptr_t unswept_free_bytes_
bool CommitBlock(Address start, size_t size, Executability executable)
AllocationInfo allocation_info_
static const intptr_t kAllocatedThreshold
void FreeRawMemory(Address buf, size_t length)
Executability executable()
bool SetUp(int reserved_semispace_size_, int max_semispace_size)
static bool IsFreeListNode(HeapObject *object)
MUST_USE_RESULT MaybeObject * FindObject(Address addr)
void DeleteArray(T *array)
static bool IsAtStart(Address addr)
void OldSpaceStep(intptr_t allocated)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination use dead code elimination trace on stack replacement optimize closures cache optimized code for closures functions with arguments object loop weight for representation inference allow uint32 values on optimize frames if they are used only in safe operations track parallel recompilation enable all profiler experiments number of stack frames inspected by the profiler call recompile stub directly when self optimizing trigger profiler ticks based on counting instead of timing weight back edges by jump distance for interrupt triggering percentage of ICs that must have type info to allow optimization watch_ic_patching retry_self_opt interrupt_at_exit extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 and VFP2 enable use of VFP2 instructions if available enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of MIPS FPU instructions if expose natives in global object expose gc extension number of stack frames to capture disable builtin natives files print a stack trace if an assertion failure occurs use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations prepare for turning on always opt minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions automatically set the debug break flag when debugger commands are in the queue always cause a debug break before aborting maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print more details following each garbage collection print amount of external allocated memory after each time it is adjusted flush code that we expect not to use again before full gc do incremental marking steps track object counts and memory usage use caching Perform compaction on every full GC Never perform compaction on full GC testing only Compact code space on full incremental collections Default seed for initializing random allows verbose printing trace parsing and preparsing Check icache flushes in ARM and MIPS simulator Stack alingment in bytes in print stack trace when throwing exceptions randomize hashes to avoid predictable hash Fixed seed to use to hash property activate a timer that switches between V8 threads testing_bool_flag float flag Seed used for threading test randomness A filename with extra code to be included in the Print usage including flags
bool ShrinkTo(int new_capacity)
HeapObjectIterator(PagedSpace *space)
MUST_USE_RESULT MaybeObject * AllocateRaw(int object_size, Executability executable)
intptr_t OldGenerationSpaceAvailable()
static intptr_t CommitPageSize()
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
void SetFlags(intptr_t flags, intptr_t mask)
static MUST_USE_RESULT bool CommitCodePage(VirtualMemory *vm, Address start, size_t size)
void set_next_chunk(MemoryChunk *next)
static JSObject * cast(Object *obj)
VirtualMemory * reserved_memory()
SlotsBuffer * slots_buffer()
OldSpace * old_data_space()
static void AssertValidRange(Address from, Address to)
MarkCompactCollector * mark_compact_collector()
void AddAll(const List< T, AllocationPolicy > &other, AllocationPolicy allocator=AllocationPolicy())
void AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action)
AllocationSpace identity()
bool Uncommit(void *address, size_t size)
virtual MUST_USE_RESULT HeapObject * SlowAllocateRaw(int size_in_bytes)