blob: a10b63612e484962870ab2004c6bf2a7ff44f49a [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2012 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/zone.h"
6
7#include <cstring>
Ben Murdoch85b71792012-04-11 18:30:58 +01008
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/v8.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010
11#ifdef V8_USE_ADDRESS_SANITIZER
12#include <sanitizer/asan_interface.h>
13#endif // V8_USE_ADDRESS_SANITIZER
Steve Blocka7e24c12009-10-30 11:49:00 +000014
15namespace v8 {
16namespace internal {
17
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000018namespace {
19
20#if V8_USE_ADDRESS_SANITIZER
21
22const size_t kASanRedzoneBytes = 24; // Must be a multiple of 8.
23
24#else
25
26#define ASAN_POISON_MEMORY_REGION(start, size) \
27 do { \
28 USE(start); \
29 USE(size); \
30 } while (false)
31
32#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
33 do { \
34 USE(start); \
35 USE(size); \
36 } while (false)
37
38const size_t kASanRedzoneBytes = 0;
39
40#endif // V8_USE_ADDRESS_SANITIZER
41
42} // namespace
43
Steve Blocka7e24c12009-10-30 11:49:00 +000044
Steve Blocka7e24c12009-10-30 11:49:00 +000045// Segments represent chunks of memory: They have starting address
46// (encoded in the this pointer) and a size in bytes. Segments are
47// chained together forming a LIFO structure with the newest segment
Steve Block44f0eee2011-05-26 01:26:41 +010048// available as segment_head_. Segments are allocated using malloc()
Steve Blocka7e24c12009-10-30 11:49:00 +000049// and de-allocated using free().
50
51class Segment {
52 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000053 void Initialize(Segment* next, size_t size) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +000054 next_ = next;
55 size_ = size;
56 }
57
Steve Blocka7e24c12009-10-30 11:49:00 +000058 Segment* next() const { return next_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000059 void clear_next() { next_ = nullptr; }
Steve Blocka7e24c12009-10-30 11:49:00 +000060
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061 size_t size() const { return size_; }
62 size_t capacity() const { return size_ - sizeof(Segment); }
Steve Blocka7e24c12009-10-30 11:49:00 +000063
64 Address start() const { return address(sizeof(Segment)); }
65 Address end() const { return address(size_); }
66
Steve Blocka7e24c12009-10-30 11:49:00 +000067 private:
68 // Computes the address of the nth byte in this segment.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000069 Address address(size_t n) const { return Address(this) + n; }
Steve Blocka7e24c12009-10-30 11:49:00 +000070
Steve Blocka7e24c12009-10-30 11:49:00 +000071 Segment* next_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000072 size_t size_;
Steve Blocka7e24c12009-10-30 11:49:00 +000073};
74
Ben Murdochda12d292016-06-02 14:46:10 +010075Zone::Zone(base::AccountingAllocator* allocator)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000076 : allocation_size_(0),
Ben Murdoch69a99ed2011-11-30 16:03:39 +000077 segment_bytes_allocated_(0),
78 position_(0),
79 limit_(0),
Ben Murdochda12d292016-06-02 14:46:10 +010080 allocator_(allocator),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000081 segment_head_(nullptr) {}
Ben Murdoch69a99ed2011-11-30 16:03:39 +000082
Ben Murdochb8a8cc12014-11-26 15:28:44 +000083Zone::~Zone() {
84 DeleteAll();
85 DeleteKeptSegment();
86
87 DCHECK(segment_bytes_allocated_ == 0);
88}
89
90
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091void* Zone::New(size_t size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 // Round up the requested size to fit the alignment.
93 size = RoundUp(size, kAlignment);
94
95 // If the allocation size is divisible by 8 then we return an 8-byte aligned
96 // address.
97 if (kPointerSize == 4 && kAlignment == 4) {
98 position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
99 } else {
100 DCHECK(kAlignment >= kPointerSize);
101 }
102
103 // Check if the requested size is available without expanding.
104 Address result = position_;
105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106 const size_t size_with_redzone = size + kASanRedzoneBytes;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100107 const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_);
108 const uintptr_t position = reinterpret_cast<uintptr_t>(position_);
109 // position_ > limit_ can be true after the alignment correction above.
110 if (limit < position || size_with_redzone > limit - position) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000111 result = NewExpand(size_with_redzone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000112 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000113 position_ += size_with_redzone;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000114 }
115
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000116 Address redzone_position = result + size;
117 DCHECK(redzone_position + kASanRedzoneBytes == position_);
118 ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119
120 // Check that the result has the proper alignment and return it.
121 DCHECK(IsAddressAligned(result, kAlignment, 0));
122 allocation_size_ += size;
123 return reinterpret_cast<void*>(result);
124}
125
126
127void Zone::DeleteAll() {
128#ifdef DEBUG
129 // Constant byte value used for zapping dead memory in debug mode.
130 static const unsigned char kZapDeadByte = 0xcd;
131#endif
132
133 // Find a segment with a suitable size to keep around.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134 Segment* keep = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000135 // Traverse the chained list of segments, zapping (in debug mode)
136 // and freeing every segment except the one we wish to keep.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000137 for (Segment* current = segment_head_; current;) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000138 Segment* next = current->next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000139 if (!keep && current->size() <= kMaximumKeptSegmentSize) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000140 // Unlink the segment we wish to keep from the list.
141 keep = current;
142 keep->clear_next();
143 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144 size_t size = current->size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000145#ifdef DEBUG
146 // Un-poison first so the zapping doesn't trigger ASan complaints.
147 ASAN_UNPOISON_MEMORY_REGION(current, size);
148 // Zap the entire current segment (including the header).
149 memset(current, kZapDeadByte, size);
150#endif
151 DeleteSegment(current, size);
152 }
153 current = next;
154 }
155
156 // If we have found a segment we want to keep, we must recompute the
157 // variables 'position' and 'limit' to prepare for future allocate
158 // attempts. Otherwise, we must clear the position and limit to
159 // force a new segment to be allocated on demand.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000160 if (keep) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000161 Address start = keep->start();
162 position_ = RoundUp(start, kAlignment);
163 limit_ = keep->end();
164 // Un-poison so we can re-use the segment later.
165 ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
166#ifdef DEBUG
167 // Zap the contents of the kept segment (but not the header).
168 memset(start, kZapDeadByte, keep->capacity());
169#endif
170 } else {
171 position_ = limit_ = 0;
172 }
173
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400174 allocation_size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000175 // Update the head segment to be the kept segment (if any).
176 segment_head_ = keep;
177}
178
179
180void Zone::DeleteKeptSegment() {
181#ifdef DEBUG
182 // Constant byte value used for zapping dead memory in debug mode.
183 static const unsigned char kZapDeadByte = 0xcd;
184#endif
185
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000186 DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
187 if (segment_head_ != nullptr) {
188 size_t size = segment_head_->size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000189#ifdef DEBUG
190 // Un-poison first so the zapping doesn't trigger ASan complaints.
191 ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
192 // Zap the entire kept segment (including the header).
193 memset(segment_head_, kZapDeadByte, size);
194#endif
195 DeleteSegment(segment_head_, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000196 segment_head_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000197 }
198
199 DCHECK(segment_bytes_allocated_ == 0);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000200}
201
202
Steve Block44f0eee2011-05-26 01:26:41 +0100203// Creates a new segment, sets it size, and pushes it to the front
204// of the segment chain. Returns the new segment.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000205Segment* Zone::NewSegment(size_t size) {
Ben Murdochda12d292016-06-02 14:46:10 +0100206 Segment* result = reinterpret_cast<Segment*>(allocator_->Allocate(size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000207 segment_bytes_allocated_ += size;
208 if (result != nullptr) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000209 result->Initialize(segment_head_, size);
Steve Block44f0eee2011-05-26 01:26:41 +0100210 segment_head_ = result;
211 }
212 return result;
213}
214
215
216// Deletes the given segment. Does not touch the segment chain.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217void Zone::DeleteSegment(Segment* segment, size_t size) {
218 segment_bytes_allocated_ -= size;
Ben Murdochda12d292016-06-02 14:46:10 +0100219 allocator_->Free(segment, size);
Steve Block44f0eee2011-05-26 01:26:41 +0100220}
Steve Blocka7e24c12009-10-30 11:49:00 +0000221
222
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000223Address Zone::NewExpand(size_t size) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000224 // Make sure the requested size is already properly aligned and that
225 // there isn't enough room in the Zone to satisfy the request.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000226 DCHECK_EQ(size, RoundDown(size, kAlignment));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100227 DCHECK(limit_ < position_ ||
228 reinterpret_cast<uintptr_t>(limit_) -
229 reinterpret_cast<uintptr_t>(position_) <
230 size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000231
232 // Compute the new segment size. We use a 'high water mark'
233 // strategy, where we increase the segment size every time we expand
234 // except that we employ a maximum segment size when we delete. This
235 // is to avoid excessive malloc() and free() overhead.
Steve Block44f0eee2011-05-26 01:26:41 +0100236 Segment* head = segment_head_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000237 const size_t old_size = (head == nullptr) ? 0 : head->size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000238 static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
239 const size_t new_size_no_overhead = size + (old_size << 1);
240 size_t new_size = kSegmentOverhead + new_size_no_overhead;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000241 const size_t min_new_size = kSegmentOverhead + size;
Ben Murdoch589d6972011-11-30 16:04:58 +0000242 // Guard against integer overflow.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000243 if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
Ben Murdoch589d6972011-11-30 16:04:58 +0000244 V8::FatalProcessOutOfMemory("Zone");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000245 return nullptr;
Ben Murdoch589d6972011-11-30 16:04:58 +0000246 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247 if (new_size < kMinimumSegmentSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000248 new_size = kMinimumSegmentSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000249 } else if (new_size > kMaximumSegmentSize) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000250 // Limit the size of new segments to avoid growing the segment size
251 // exponentially, thus putting pressure on contiguous virtual address space.
252 // All the while making sure to allocate a segment large enough to hold the
253 // requested size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000254 new_size = Max(min_new_size, kMaximumSegmentSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000255 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000256 if (new_size > INT_MAX) {
257 V8::FatalProcessOutOfMemory("Zone");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000258 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000260 Segment* segment = NewSegment(new_size);
261 if (segment == nullptr) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000262 V8::FatalProcessOutOfMemory("Zone");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000263 return nullptr;
Steve Blocka7e24c12009-10-30 11:49:00 +0000264 }
265
266 // Recompute 'top' and 'limit' based on the new segment.
267 Address result = RoundUp(segment->start(), kAlignment);
268 position_ = result + size;
Ben Murdoch589d6972011-11-30 16:04:58 +0000269 // Check for address overflow.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000270 // (Should not happen since the segment is guaranteed to accomodate
271 // size bytes + header and alignment padding)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000272 DCHECK(reinterpret_cast<uintptr_t>(position_) >=
273 reinterpret_cast<uintptr_t>(result));
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 limit_ = segment->end();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000275 DCHECK(position_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +0000276 return result;
277}
278
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000279} // namespace internal
280} // namespace v8