53 int size()
const {
return size_; }
71 : zone_excess_limit_(256 *
MB),
72 segment_bytes_allocated_(0),
80 ZoneScope::~ZoneScope() {
82 if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
83 isolate_->zone()->scope_nesting_--;
89 Segment* Zone::NewSegment(
int size) {
90 Segment* result =
reinterpret_cast<Segment*
>(
Malloced::New(size));
93 result->Initialize(segment_head_, size);
94 segment_head_ = result;
101 void Zone::DeleteSegment(Segment* segment,
int size) {
110 static const unsigned char kZapDeadByte = 0xcd;
115 while (keep !=
NULL && keep->
size() > kMaximumKeptSegmentSize) {
121 Segment* current = segment_head_;
122 while (current !=
NULL) {
124 if (current == keep) {
128 int size = current->
size();
131 memset(current, kZapDeadByte, size);
133 DeleteSegment(current, size);
144 position_ =
RoundUp(start, kAlignment);
145 limit_ = keep->
end();
148 memset(start, kZapDeadByte, keep->
capacity());
151 position_ = limit_ = 0;
155 segment_head_ = keep;
160 if (segment_head_ !=
NULL) {
161 DeleteSegment(segment_head_, segment_head_->
size());
162 segment_head_ =
NULL;
167 Address Zone::NewExpand(
int size) {
171 ASSERT(size > limit_ - position_);
178 int old_size = (head ==
NULL) ? 0 : head->
size();
179 static const int kSegmentOverhead =
sizeof(
Segment) + kAlignment;
180 int new_size_no_overhead = size + (old_size << 1);
181 int new_size = kSegmentOverhead + new_size_no_overhead;
183 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
187 if (new_size < kMinimumSegmentSize) {
188 new_size = kMinimumSegmentSize;
189 }
else if (new_size > kMaximumSegmentSize) {
194 new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
196 Segment* segment = NewSegment(new_size);
197 if (segment ==
NULL) {
204 position_ = result + size;
206 if (position_ < result) {
210 limit_ = segment->end();
211 ASSERT(position_ <= limit_);
static void Delete(void *p)
void Initialize(Segment *next, int size)
static void * New(size_t size)
#define ASSERT(condition)
static unsigned allocation_size_
T RoundUp(T x, intptr_t m)
void adjust_segment_bytes_allocated(int delta)
#define ASSERT_EQ(v1, v2)
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
T RoundDown(T x, intptr_t m)
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)