Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 28 | #include <string.h> |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 29 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 30 | #include "v8.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 31 | #include "zone-inl.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 32 | |
| 33 | namespace v8 { |
| 34 | namespace internal { |
| 35 | |
| 36 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 37 | // Segments represent chunks of memory: They have starting address |
| 38 | // (encoded in the this pointer) and a size in bytes. Segments are |
| 39 | // chained together forming a LIFO structure with the newest segment |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 40 | // available as segment_head_. Segments are allocated using malloc() |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 41 | // and de-allocated using free(). |
| 42 | |
| 43 | class Segment { |
| 44 | public: |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 45 | void Initialize(Segment* next, int size) { |
| 46 | next_ = next; |
| 47 | size_ = size; |
| 48 | } |
| 49 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 50 | Segment* next() const { return next_; } |
| 51 | void clear_next() { next_ = NULL; } |
| 52 | |
| 53 | int size() const { return size_; } |
| 54 | int capacity() const { return size_ - sizeof(Segment); } |
| 55 | |
| 56 | Address start() const { return address(sizeof(Segment)); } |
| 57 | Address end() const { return address(size_); } |
| 58 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 59 | private: |
| 60 | // Computes the address of the nth byte in this segment. |
| 61 | Address address(int n) const { |
| 62 | return Address(this) + n; |
| 63 | } |
| 64 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 65 | Segment* next_; |
| 66 | int size_; |
| 67 | }; |
| 68 | |
| 69 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 70 | Zone::Zone() |
| 71 | : zone_excess_limit_(256 * MB), |
| 72 | segment_bytes_allocated_(0), |
| 73 | position_(0), |
| 74 | limit_(0), |
| 75 | scope_nesting_(0), |
| 76 | segment_head_(NULL) { |
| 77 | } |
| 78 | unsigned Zone::allocation_size_ = 0; |
| 79 | |
| 80 | ZoneScope::~ZoneScope() { |
| 81 | ASSERT_EQ(Isolate::Current(), isolate_); |
| 82 | if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll(); |
| 83 | isolate_->zone()->scope_nesting_--; |
| 84 | } |
| 85 | |
| 86 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 87 | // Creates a new segment, sets it size, and pushes it to the front |
| 88 | // of the segment chain. Returns the new segment. |
| 89 | Segment* Zone::NewSegment(int size) { |
| 90 | Segment* result = reinterpret_cast<Segment*>(Malloced::New(size)); |
| 91 | adjust_segment_bytes_allocated(size); |
| 92 | if (result != NULL) { |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 93 | result->Initialize(segment_head_, size); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 94 | segment_head_ = result; |
| 95 | } |
| 96 | return result; |
| 97 | } |
| 98 | |
| 99 | |
| 100 | // Deletes the given segment. Does not touch the segment chain. |
| 101 | void Zone::DeleteSegment(Segment* segment, int size) { |
| 102 | adjust_segment_bytes_allocated(-size); |
| 103 | Malloced::Delete(segment); |
| 104 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 105 | |
| 106 | |
| 107 | void Zone::DeleteAll() { |
| 108 | #ifdef DEBUG |
| 109 | // Constant byte value used for zapping dead memory in debug mode. |
| 110 | static const unsigned char kZapDeadByte = 0xcd; |
| 111 | #endif |
| 112 | |
| 113 | // Find a segment with a suitable size to keep around. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 114 | Segment* keep = segment_head_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 115 | while (keep != NULL && keep->size() > kMaximumKeptSegmentSize) { |
| 116 | keep = keep->next(); |
| 117 | } |
| 118 | |
| 119 | // Traverse the chained list of segments, zapping (in debug mode) |
| 120 | // and freeing every segment except the one we wish to keep. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 121 | Segment* current = segment_head_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 122 | while (current != NULL) { |
| 123 | Segment* next = current->next(); |
| 124 | if (current == keep) { |
| 125 | // Unlink the segment we wish to keep from the list. |
| 126 | current->clear_next(); |
| 127 | } else { |
| 128 | int size = current->size(); |
| 129 | #ifdef DEBUG |
| 130 | // Zap the entire current segment (including the header). |
| 131 | memset(current, kZapDeadByte, size); |
| 132 | #endif |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 133 | DeleteSegment(current, size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 134 | } |
| 135 | current = next; |
| 136 | } |
| 137 | |
| 138 | // If we have found a segment we want to keep, we must recompute the |
| 139 | // variables 'position' and 'limit' to prepare for future allocate |
| 140 | // attempts. Otherwise, we must clear the position and limit to |
| 141 | // force a new segment to be allocated on demand. |
| 142 | if (keep != NULL) { |
| 143 | Address start = keep->start(); |
| 144 | position_ = RoundUp(start, kAlignment); |
| 145 | limit_ = keep->end(); |
| 146 | #ifdef DEBUG |
| 147 | // Zap the contents of the kept segment (but not the header). |
| 148 | memset(start, kZapDeadByte, keep->capacity()); |
| 149 | #endif |
| 150 | } else { |
| 151 | position_ = limit_ = 0; |
| 152 | } |
| 153 | |
| 154 | // Update the head segment to be the kept segment (if any). |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 155 | segment_head_ = keep; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 159 | void Zone::DeleteKeptSegment() { |
| 160 | if (segment_head_ != NULL) { |
| 161 | DeleteSegment(segment_head_, segment_head_->size()); |
| 162 | segment_head_ = NULL; |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 167 | Address Zone::NewExpand(int size) { |
| 168 | // Make sure the requested size is already properly aligned and that |
| 169 | // there isn't enough room in the Zone to satisfy the request. |
| 170 | ASSERT(size == RoundDown(size, kAlignment)); |
Ben Murdoch | 589d697 | 2011-11-30 16:04:58 +0000 | [diff] [blame] | 171 | ASSERT(size > limit_ - position_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 172 | |
| 173 | // Compute the new segment size. We use a 'high water mark' |
| 174 | // strategy, where we increase the segment size every time we expand |
| 175 | // except that we employ a maximum segment size when we delete. This |
| 176 | // is to avoid excessive malloc() and free() overhead. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 177 | Segment* head = segment_head_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 178 | int old_size = (head == NULL) ? 0 : head->size(); |
| 179 | static const int kSegmentOverhead = sizeof(Segment) + kAlignment; |
Ben Murdoch | 589d697 | 2011-11-30 16:04:58 +0000 | [diff] [blame] | 180 | int new_size_no_overhead = size + (old_size << 1); |
| 181 | int new_size = kSegmentOverhead + new_size_no_overhead; |
| 182 | // Guard against integer overflow. |
| 183 | if (new_size_no_overhead < size || new_size < kSegmentOverhead) { |
| 184 | V8::FatalProcessOutOfMemory("Zone"); |
| 185 | return NULL; |
| 186 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 187 | if (new_size < kMinimumSegmentSize) { |
| 188 | new_size = kMinimumSegmentSize; |
| 189 | } else if (new_size > kMaximumSegmentSize) { |
| 190 | // Limit the size of new segments to avoid growing the segment size |
| 191 | // exponentially, thus putting pressure on contiguous virtual address space. |
| 192 | // All the while making sure to allocate a segment large enough to hold the |
| 193 | // requested size. |
| 194 | new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize); |
| 195 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 196 | Segment* segment = NewSegment(new_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 197 | if (segment == NULL) { |
| 198 | V8::FatalProcessOutOfMemory("Zone"); |
| 199 | return NULL; |
| 200 | } |
| 201 | |
| 202 | // Recompute 'top' and 'limit' based on the new segment. |
| 203 | Address result = RoundUp(segment->start(), kAlignment); |
| 204 | position_ = result + size; |
Ben Murdoch | 589d697 | 2011-11-30 16:04:58 +0000 | [diff] [blame] | 205 | // Check for address overflow. |
| 206 | if (position_ < result) { |
| 207 | V8::FatalProcessOutOfMemory("Zone"); |
| 208 | return NULL; |
| 209 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 210 | limit_ = segment->end(); |
| 211 | ASSERT(position_ <= limit_); |
| 212 | return result; |
| 213 | } |
| 214 | |
| 215 | |
| 216 | } } // namespace v8::internal |