blob: 2d14d137efddf6119c8dd24daf7b5d5b4490375a [file] [log] [blame]
Ben Murdoch85b71792012-04-11 18:30:58 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +010028#include "v8.h"
Ben Murdoch85b71792012-04-11 18:30:58 +010029
Steve Blocka7e24c12009-10-30 11:49:00 +000030#include "zone-inl.h"
Ben Murdoch85b71792012-04-11 18:30:58 +010031#include "splay-tree-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000032
33namespace v8 {
34namespace internal {
35
36
Steve Blocka7e24c12009-10-30 11:49:00 +000037// Segments represent chunks of memory: They have starting address
38// (encoded in the this pointer) and a size in bytes. Segments are
39// chained together forming a LIFO structure with the newest segment
Steve Block44f0eee2011-05-26 01:26:41 +010040// available as segment_head_. Segments are allocated using malloc()
Steve Blocka7e24c12009-10-30 11:49:00 +000041// and de-allocated using free().
42
43class Segment {
44 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +000045 void Initialize(Segment* next, int size) {
46 next_ = next;
47 size_ = size;
48 }
49
Steve Blocka7e24c12009-10-30 11:49:00 +000050 Segment* next() const { return next_; }
51 void clear_next() { next_ = NULL; }
52
53 int size() const { return size_; }
54 int capacity() const { return size_ - sizeof(Segment); }
55
56 Address start() const { return address(sizeof(Segment)); }
57 Address end() const { return address(size_); }
58
Steve Blocka7e24c12009-10-30 11:49:00 +000059 private:
60 // Computes the address of the nth byte in this segment.
61 Address address(int n) const {
62 return Address(this) + n;
63 }
64
Steve Blocka7e24c12009-10-30 11:49:00 +000065 Segment* next_;
66 int size_;
67};
68
69
Ben Murdoch69a99ed2011-11-30 16:03:39 +000070Zone::Zone()
71 : zone_excess_limit_(256 * MB),
72 segment_bytes_allocated_(0),
73 position_(0),
74 limit_(0),
75 scope_nesting_(0),
76 segment_head_(NULL) {
77}
78unsigned Zone::allocation_size_ = 0;
79
80ZoneScope::~ZoneScope() {
81 ASSERT_EQ(Isolate::Current(), isolate_);
82 if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
83 isolate_->zone()->scope_nesting_--;
84}
85
86
Steve Block44f0eee2011-05-26 01:26:41 +010087// Creates a new segment, sets it size, and pushes it to the front
88// of the segment chain. Returns the new segment.
89Segment* Zone::NewSegment(int size) {
90 Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
91 adjust_segment_bytes_allocated(size);
92 if (result != NULL) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +000093 result->Initialize(segment_head_, size);
Steve Block44f0eee2011-05-26 01:26:41 +010094 segment_head_ = result;
95 }
96 return result;
97}
98
99
100// Deletes the given segment. Does not touch the segment chain.
101void Zone::DeleteSegment(Segment* segment, int size) {
102 adjust_segment_bytes_allocated(-size);
103 Malloced::Delete(segment);
104}
Steve Blocka7e24c12009-10-30 11:49:00 +0000105
106
107void Zone::DeleteAll() {
108#ifdef DEBUG
109 // Constant byte value used for zapping dead memory in debug mode.
110 static const unsigned char kZapDeadByte = 0xcd;
111#endif
112
113 // Find a segment with a suitable size to keep around.
Steve Block44f0eee2011-05-26 01:26:41 +0100114 Segment* keep = segment_head_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000115 while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
116 keep = keep->next();
117 }
118
119 // Traverse the chained list of segments, zapping (in debug mode)
120 // and freeing every segment except the one we wish to keep.
Steve Block44f0eee2011-05-26 01:26:41 +0100121 Segment* current = segment_head_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000122 while (current != NULL) {
123 Segment* next = current->next();
124 if (current == keep) {
125 // Unlink the segment we wish to keep from the list.
126 current->clear_next();
127 } else {
128 int size = current->size();
129#ifdef DEBUG
130 // Zap the entire current segment (including the header).
131 memset(current, kZapDeadByte, size);
132#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100133 DeleteSegment(current, size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000134 }
135 current = next;
136 }
137
138 // If we have found a segment we want to keep, we must recompute the
139 // variables 'position' and 'limit' to prepare for future allocate
140 // attempts. Otherwise, we must clear the position and limit to
141 // force a new segment to be allocated on demand.
142 if (keep != NULL) {
143 Address start = keep->start();
144 position_ = RoundUp(start, kAlignment);
145 limit_ = keep->end();
146#ifdef DEBUG
147 // Zap the contents of the kept segment (but not the header).
148 memset(start, kZapDeadByte, keep->capacity());
149#endif
150 } else {
151 position_ = limit_ = 0;
152 }
153
154 // Update the head segment to be the kept segment (if any).
Steve Block44f0eee2011-05-26 01:26:41 +0100155 segment_head_ = keep;
Steve Blocka7e24c12009-10-30 11:49:00 +0000156}
157
158
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000159void Zone::DeleteKeptSegment() {
160 if (segment_head_ != NULL) {
161 DeleteSegment(segment_head_, segment_head_->size());
162 segment_head_ = NULL;
163 }
164}
165
166
Steve Blocka7e24c12009-10-30 11:49:00 +0000167Address Zone::NewExpand(int size) {
168 // Make sure the requested size is already properly aligned and that
169 // there isn't enough room in the Zone to satisfy the request.
170 ASSERT(size == RoundDown(size, kAlignment));
Ben Murdoch589d6972011-11-30 16:04:58 +0000171 ASSERT(size > limit_ - position_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000172
173 // Compute the new segment size. We use a 'high water mark'
174 // strategy, where we increase the segment size every time we expand
175 // except that we employ a maximum segment size when we delete. This
176 // is to avoid excessive malloc() and free() overhead.
Steve Block44f0eee2011-05-26 01:26:41 +0100177 Segment* head = segment_head_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000178 int old_size = (head == NULL) ? 0 : head->size();
179 static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
Ben Murdoch589d6972011-11-30 16:04:58 +0000180 int new_size_no_overhead = size + (old_size << 1);
181 int new_size = kSegmentOverhead + new_size_no_overhead;
182 // Guard against integer overflow.
183 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
184 V8::FatalProcessOutOfMemory("Zone");
185 return NULL;
186 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000187 if (new_size < kMinimumSegmentSize) {
188 new_size = kMinimumSegmentSize;
189 } else if (new_size > kMaximumSegmentSize) {
190 // Limit the size of new segments to avoid growing the segment size
191 // exponentially, thus putting pressure on contiguous virtual address space.
192 // All the while making sure to allocate a segment large enough to hold the
193 // requested size.
194 new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
195 }
Steve Block44f0eee2011-05-26 01:26:41 +0100196 Segment* segment = NewSegment(new_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000197 if (segment == NULL) {
198 V8::FatalProcessOutOfMemory("Zone");
199 return NULL;
200 }
201
202 // Recompute 'top' and 'limit' based on the new segment.
203 Address result = RoundUp(segment->start(), kAlignment);
204 position_ = result + size;
Ben Murdoch589d6972011-11-30 16:04:58 +0000205 // Check for address overflow.
206 if (position_ < result) {
207 V8::FatalProcessOutOfMemory("Zone");
208 return NULL;
209 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000210 limit_ = segment->end();
211 ASSERT(position_ <= limit_);
212 return result;
213}
214
215
216} } // namespace v8::internal