53 int size()
const {
return size_; }
71 : allocation_size_(0),
72 segment_bytes_allocated_(0),
84 ASSERT(segment_bytes_allocated_ == 0);
91 static const unsigned char kZapDeadByte = 0xcd;
98 for (
Segment* current = segment_head_; current !=
NULL; ) {
100 if (keep ==
NULL && current->size() <= kMaximumKeptSegmentSize) {
105 int size = current->size();
110 memset(current, kZapDeadByte, size);
112 DeleteSegment(current, size);
123 position_ =
RoundUp(start, kAlignment);
124 limit_ = keep->
end();
129 memset(start, kZapDeadByte, keep->
capacity());
132 position_ = limit_ = 0;
136 segment_head_ = keep;
143 static const unsigned char kZapDeadByte = 0xcd;
147 if (segment_head_ !=
NULL) {
153 memset(segment_head_, kZapDeadByte, size);
155 DeleteSegment(segment_head_, size);
156 segment_head_ =
NULL;
159 ASSERT(segment_bytes_allocated_ == 0);
168 if (result !=
NULL) {
170 segment_head_ = result;
177 void Zone::DeleteSegment(Segment* segment,
int size) {
183 Address Zone::NewExpand(
int size) {
187 ASSERT(size > limit_ - position_);
193 Segment* head = segment_head_;
194 const size_t old_size = (head ==
NULL) ? 0 : head->
size();
195 static const size_t kSegmentOverhead =
sizeof(Segment) + kAlignment;
196 const size_t new_size_no_overhead = size + (old_size << 1);
197 size_t new_size = kSegmentOverhead + new_size_no_overhead;
198 const size_t min_new_size = kSegmentOverhead +
static_cast<size_t>(
size);
200 if (new_size_no_overhead < static_cast<size_t>(size) ||
201 new_size < static_cast<size_t>(kSegmentOverhead)) {
205 if (new_size < static_cast<size_t>(kMinimumSegmentSize)) {
206 new_size = kMinimumSegmentSize;
207 }
else if (new_size > static_cast<size_t>(kMaximumSegmentSize)) {
212 new_size =
Max(min_new_size, static_cast<size_t>(kMaximumSegmentSize));
214 if (new_size > INT_MAX) {
218 Segment* segment = NewSegment(static_cast<int>(new_size));
219 if (segment ==
NULL) {
226 position_ = result +
size;
230 if (reinterpret_cast<uintptr_t>(position_)
231 <
reinterpret_cast<uintptr_t
>(result)) {
235 limit_ = segment->end();
236 ASSERT(position_ <= limit_);
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
static void Delete(void *p)
void Initialize(Segment *next, int size)
static void * New(size_t size)
#define ASAN_UNPOISON_MEMORY_REGION(start, size)
#define ASSERT(condition)
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object size
T RoundUp(T x, intptr_t m)
void adjust_segment_bytes_allocated(int delta)
T RoundDown(T x, intptr_t m)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)