blob: f8dbaabc7888b0316eb58c0f04dfc57481ac0a46 [file] [log] [blame]
Ben Murdochb8e0da22011-05-16 14:20:40 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "zone-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010031#include "splay-tree-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000032
33namespace v8 {
34namespace internal {
35
36
37Address Zone::position_ = 0;
38Address Zone::limit_ = 0;
39int Zone::zone_excess_limit_ = 256 * MB;
40int Zone::segment_bytes_allocated_ = 0;
Ben Murdochb8e0da22011-05-16 14:20:40 +010041unsigned Zone::allocation_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000042
43bool AssertNoZoneAllocation::allow_allocation_ = true;
44
45int ZoneScope::nesting_ = 0;
46
47// Segments represent chunks of memory: They have starting address
48// (encoded in the this pointer) and a size in bytes. Segments are
49// chained together forming a LIFO structure with the newest segment
50// available as Segment::head(). Segments are allocated using malloc()
51// and de-allocated using free().
52
53class Segment {
54 public:
55 Segment* next() const { return next_; }
56 void clear_next() { next_ = NULL; }
57
58 int size() const { return size_; }
59 int capacity() const { return size_ - sizeof(Segment); }
60
61 Address start() const { return address(sizeof(Segment)); }
62 Address end() const { return address(size_); }
63
64 static Segment* head() { return head_; }
65 static void set_head(Segment* head) { head_ = head; }
66
67 // Creates a new segment, sets it size, and pushes it to the front
68 // of the segment chain. Returns the new segment.
69 static Segment* New(int size) {
70 Segment* result = reinterpret_cast<Segment*>(Malloced::New(size));
71 Zone::adjust_segment_bytes_allocated(size);
72 if (result != NULL) {
73 result->next_ = head_;
74 result->size_ = size;
75 head_ = result;
76 }
77 return result;
78 }
79
80 // Deletes the given segment. Does not touch the segment chain.
81 static void Delete(Segment* segment, int size) {
82 Zone::adjust_segment_bytes_allocated(-size);
83 Malloced::Delete(segment);
84 }
85
86 static int bytes_allocated() { return bytes_allocated_; }
87
88 private:
89 // Computes the address of the nth byte in this segment.
90 Address address(int n) const {
91 return Address(this) + n;
92 }
93
94 static Segment* head_;
95 static int bytes_allocated_;
96 Segment* next_;
97 int size_;
98};
99
100
101Segment* Segment::head_ = NULL;
102int Segment::bytes_allocated_ = 0;
103
104
105void Zone::DeleteAll() {
106#ifdef DEBUG
107 // Constant byte value used for zapping dead memory in debug mode.
108 static const unsigned char kZapDeadByte = 0xcd;
109#endif
110
111 // Find a segment with a suitable size to keep around.
112 Segment* keep = Segment::head();
113 while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) {
114 keep = keep->next();
115 }
116
117 // Traverse the chained list of segments, zapping (in debug mode)
118 // and freeing every segment except the one we wish to keep.
119 Segment* current = Segment::head();
120 while (current != NULL) {
121 Segment* next = current->next();
122 if (current == keep) {
123 // Unlink the segment we wish to keep from the list.
124 current->clear_next();
125 } else {
126 int size = current->size();
127#ifdef DEBUG
128 // Zap the entire current segment (including the header).
129 memset(current, kZapDeadByte, size);
130#endif
131 Segment::Delete(current, size);
132 }
133 current = next;
134 }
135
136 // If we have found a segment we want to keep, we must recompute the
137 // variables 'position' and 'limit' to prepare for future allocate
138 // attempts. Otherwise, we must clear the position and limit to
139 // force a new segment to be allocated on demand.
140 if (keep != NULL) {
141 Address start = keep->start();
142 position_ = RoundUp(start, kAlignment);
143 limit_ = keep->end();
144#ifdef DEBUG
145 // Zap the contents of the kept segment (but not the header).
146 memset(start, kZapDeadByte, keep->capacity());
147#endif
148 } else {
149 position_ = limit_ = 0;
150 }
151
152 // Update the head segment to be the kept segment (if any).
153 Segment::set_head(keep);
154}
155
156
157Address Zone::NewExpand(int size) {
158 // Make sure the requested size is already properly aligned and that
159 // there isn't enough room in the Zone to satisfy the request.
160 ASSERT(size == RoundDown(size, kAlignment));
161 ASSERT(position_ + size > limit_);
162
163 // Compute the new segment size. We use a 'high water mark'
164 // strategy, where we increase the segment size every time we expand
165 // except that we employ a maximum segment size when we delete. This
166 // is to avoid excessive malloc() and free() overhead.
167 Segment* head = Segment::head();
168 int old_size = (head == NULL) ? 0 : head->size();
169 static const int kSegmentOverhead = sizeof(Segment) + kAlignment;
170 int new_size = kSegmentOverhead + size + (old_size << 1);
171 if (new_size < kMinimumSegmentSize) {
172 new_size = kMinimumSegmentSize;
173 } else if (new_size > kMaximumSegmentSize) {
174 // Limit the size of new segments to avoid growing the segment size
175 // exponentially, thus putting pressure on contiguous virtual address space.
176 // All the while making sure to allocate a segment large enough to hold the
177 // requested size.
178 new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
179 }
180 Segment* segment = Segment::New(new_size);
181 if (segment == NULL) {
182 V8::FatalProcessOutOfMemory("Zone");
183 return NULL;
184 }
185
186 // Recompute 'top' and 'limit' based on the new segment.
187 Address result = RoundUp(segment->start(), kAlignment);
188 position_ = result + size;
189 limit_ = segment->end();
190 ASSERT(position_ <= limit_);
191 return result;
192}
193
194
195} } // namespace v8::internal