v8  3.11.10(node0.8.26)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
zone.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <string.h>
29 
30 #include "v8.h"
31 #include "zone-inl.h"
32 
33 namespace v8 {
34 namespace internal {
35 
36 
37 // Segments represent chunks of memory: They have starting address
38 // (encoded in the this pointer) and a size in bytes. Segments are
39 // chained together forming a LIFO structure with the newest segment
40 // available as segment_head_. Segments are allocated using malloc()
41 // and de-allocated using free().
42 
43 class Segment {
44  public:
45  void Initialize(Segment* next, int size) {
46  next_ = next;
47  size_ = size;
48  }
49 
50  Segment* next() const { return next_; }
51  void clear_next() { next_ = NULL; }
52 
53  int size() const { return size_; }
54  int capacity() const { return size_ - sizeof(Segment); }
55 
56  Address start() const { return address(sizeof(Segment)); }
57  Address end() const { return address(size_); }
58 
59  private:
60  // Computes the address of the nth byte in this segment.
61  Address address(int n) const {
62  return Address(this) + n;
63  }
64 
65  Segment* next_;
66  int size_;
67 };
68 
69 
70 Zone::Zone()
71  : zone_excess_limit_(256 * MB),
72  segment_bytes_allocated_(0),
73  position_(0),
74  limit_(0),
75  scope_nesting_(0),
76  segment_head_(NULL) {
77 }
78 unsigned Zone::allocation_size_ = 0;
79 
80 ZoneScope::~ZoneScope() {
81  ASSERT_EQ(Isolate::Current(), isolate_);
82  if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
83  isolate_->zone()->scope_nesting_--;
84 }
85 
86 
87 // Creates a new segment, sets it size, and pushes it to the front
88 // of the segment chain. Returns the new segment.
89 Segment* Zone::NewSegment(int size) {
90  Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
92  if (result != NULL) {
93  result->Initialize(segment_head_, size);
94  segment_head_ = result;
95  }
96  return result;
97 }
98 
99 
100 // Deletes the given segment. Does not touch the segment chain.
101 void Zone::DeleteSegment(Segment* segment, int size) {
103  Malloced::Delete(segment);
104 }
105 
106 
108 #ifdef DEBUG
109  // Constant byte value used for zapping dead memory in debug mode.
110  static const unsigned char kZapDeadByte = 0xcd;
111 #endif
112 
113  // Find a segment with a suitable size to keep around.
114  Segment* keep = segment_head_;
115  while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
116  keep = keep->next();
117  }
118 
119  // Traverse the chained list of segments, zapping (in debug mode)
120  // and freeing every segment except the one we wish to keep.
121  Segment* current = segment_head_;
122  while (current != NULL) {
123  Segment* next = current->next();
124  if (current == keep) {
125  // Unlink the segment we wish to keep from the list.
126  current->clear_next();
127  } else {
128  int size = current->size();
129 #ifdef DEBUG
130  // Zap the entire current segment (including the header).
131  memset(current, kZapDeadByte, size);
132 #endif
133  DeleteSegment(current, size);
134  }
135  current = next;
136  }
137 
138  // If we have found a segment we want to keep, we must recompute the
139  // variables 'position' and 'limit' to prepare for future allocate
140  // attempts. Otherwise, we must clear the position and limit to
141  // force a new segment to be allocated on demand.
142  if (keep != NULL) {
143  Address start = keep->start();
144  position_ = RoundUp(start, kAlignment);
145  limit_ = keep->end();
146 #ifdef DEBUG
147  // Zap the contents of the kept segment (but not the header).
148  memset(start, kZapDeadByte, keep->capacity());
149 #endif
150  } else {
151  position_ = limit_ = 0;
152  }
153 
154  // Update the head segment to be the kept segment (if any).
155  segment_head_ = keep;
156 }
157 
158 
160  if (segment_head_ != NULL) {
161  DeleteSegment(segment_head_, segment_head_->size());
162  segment_head_ = NULL;
163  }
164 }
165 
166 
167 Address Zone::NewExpand(int size) {
168  // Make sure the requested size is already properly aligned and that
169  // there isn't enough room in the Zone to satisfy the request.
170  ASSERT(size == RoundDown(size, kAlignment));
171  ASSERT(size > limit_ - position_);
172 
173  // Compute the new segment size. We use a 'high water mark'
174  // strategy, where we increase the segment size every time we expand
175  // except that we employ a maximum segment size when we delete. This
176  // is to avoid excessive malloc() and free() overhead.
177  Segment* head = segment_head_;
178  int old_size = (head == NULL) ? 0 : head->size();
179  static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
180  int new_size_no_overhead = size + (old_size << 1);
181  int new_size = kSegmentOverhead + new_size_no_overhead;
182  // Guard against integer overflow.
183  if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
185  return NULL;
186  }
187  if (new_size < kMinimumSegmentSize) {
188  new_size = kMinimumSegmentSize;
189  } else if (new_size > kMaximumSegmentSize) {
190  // Limit the size of new segments to avoid growing the segment size
191  // exponentially, thus putting pressure on contiguous virtual address space.
192  // All the while making sure to allocate a segment large enough to hold the
193  // requested size.
194  new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
195  }
196  Segment* segment = NewSegment(new_size);
197  if (segment == NULL) {
199  return NULL;
200  }
201 
202  // Recompute 'top' and 'limit' based on the new segment.
203  Address result = RoundUp(segment->start(), kAlignment);
204  position_ = result + size;
205  // Check for address overflow.
206  if (position_ < result) {
208  return NULL;
209  }
210  limit_ = segment->end();
211  ASSERT(position_ <= limit_);
212  return result;
213 }
214 
215 
216 } } // namespace v8::internal
byte * Address
Definition: globals.h:172
static void Delete(void *p)
Definition: allocation.cc:47
void Initialize(Segment *next, int size)
Definition: zone.cc:45
static void * New(size_t size)
Definition: allocation.cc:38
#define ASSERT(condition)
Definition: checks.h:270
void DeleteAll()
Definition: zone.cc:107
Address start() const
Definition: zone.cc:56
int capacity() const
Definition: zone.cc:54
Address end() const
Definition: zone.cc:57
static unsigned allocation_size_
Definition: zone.h:89
T RoundUp(T x, intptr_t m)
Definition: utils.h:150
void adjust_segment_bytes_allocated(int delta)
Definition: zone-inl.h:82
void DeleteKeptSegment()
Definition: zone.cc:159
Segment * next() const
Definition: zone.cc:50
#define ASSERT_EQ(v1, v2)
Definition: checks.h:271
activate correct semantics for inheriting readonliness enable harmony semantics for typeof enable harmony enable harmony proxies enable all harmony harmony_scoping harmony_proxies harmony_scoping tracks arrays with only smi values automatically unbox arrays of doubles use crankshaft use hydrogen range analysis use hydrogen global value numbering use function inlining maximum number of AST nodes considered for a single inlining loop invariant code motion print statistics for hydrogen trace generated IR for specified phases trace register allocator trace range analysis trace representation types environment for every instruction put a break point before deoptimizing polymorphic inlining perform array bounds checks elimination trace on stack replacement optimize closures functions with arguments object optimize functions containing for in loops profiler considers IC stability primitive functions trigger their own optimization re try self optimization if it failed insert an interrupt check at function exit execution budget before interrupt is triggered call count before self optimization self_optimization count_based_interrupts weighted_back_edges trace_opt emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of SAHF instruction if enable use of VFP3 instructions if available this implies enabling ARMv7 enable use of ARMv7 instructions if enable use of MIPS FPU instructions if NULL
Definition: flags.cc:274
void clear_next()
Definition: zone.cc:51
T RoundDown(T x, intptr_t m)
Definition: utils.h:142
static void FatalProcessOutOfMemory(const char *location, bool take_snapshot=false)
int size() const
Definition: zone.cc:53
const int MB
Definition: globals.h:222