blob: 4b25158a41da35a5532bb013b1cfb42a6ef42b59 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000023
24namespace __sanitizer {
25
Kostya Serebryany5b014152012-06-22 13:00:50 +000026// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000027class DefaultSizeClassMap {
28 private:
29 // Here we use a spline composed of 5 polynomials of oder 1.
30 // The first size class is l0, then the classes go with step s0
31 // untill they reach l1, after which they go with step s1 and so on.
32 // Steps should be powers of two for cheap division.
33 // The size of the last size class should be a power of two.
34 // There should be at most 256 size classes.
35 static const uptr l0 = 1 << 4;
36 static const uptr l1 = 1 << 9;
37 static const uptr l2 = 1 << 12;
38 static const uptr l3 = 1 << 15;
39 static const uptr l4 = 1 << 18;
40 static const uptr l5 = 1 << 21;
41
42 static const uptr s0 = 1 << 4;
43 static const uptr s1 = 1 << 6;
44 static const uptr s2 = 1 << 9;
45 static const uptr s3 = 1 << 12;
46 static const uptr s4 = 1 << 15;
47
48 static const uptr u0 = 0 + (l1 - l0) / s0;
49 static const uptr u1 = u0 + (l2 - l1) / s1;
50 static const uptr u2 = u1 + (l3 - l2) / s2;
51 static const uptr u3 = u2 + (l4 - l3) / s3;
52 static const uptr u4 = u3 + (l5 - l4) / s4;
53
54 public:
55 static const uptr kNumClasses = u4 + 1;
56 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000057 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000058
59 COMPILER_CHECK(kNumClasses <= 256);
60 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
61
Kostya Serebryany5b014152012-06-22 13:00:50 +000062 static uptr Size(uptr class_id) {
63 if (class_id <= u0) return l0 + s0 * (class_id - 0);
64 if (class_id <= u1) return l1 + s1 * (class_id - u0);
65 if (class_id <= u2) return l2 + s2 * (class_id - u1);
66 if (class_id <= u3) return l3 + s3 * (class_id - u2);
67 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000068 return 0;
69 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000070 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000071 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
72 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
73 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
74 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
75 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
76 return 0;
77 }
78};
79
Kostya Serebryany5b014152012-06-22 13:00:50 +000080// Space: a portion of address space of kSpaceSize bytes starting at
81// a fixed address (kSpaceBeg). Both constants are powers of two and
82// kSpaceBeg is kSpaceSize-aligned.
83//
84// Region: a part of Space dedicated to a single size class.
85// There are kNumClasses Regions of equal size.
86//
87// UserChunk: a piece of memory returned to user.
88// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
89//
90// A Region looks like this:
91// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
92template <const uptr kSpaceBeg, const uptr kSpaceSize,
93 const uptr kMetadataSize, class SizeClassMap>
94class SizeClassAllocator64 {
95 public:
96 void Init() {
97 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
98 AllocBeg(), AllocSize())));
99 }
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000100 NOINLINE
Kostya Serebryany5b014152012-06-22 13:00:50 +0000101 void *Allocate(uptr size) {
102 CHECK_LE(size, SizeClassMap::kMaxSize);
103 return AllocateBySizeClass(SizeClassMap::ClassID(size));
104 }
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000105 NOINLINE
Kostya Serebryany5b014152012-06-22 13:00:50 +0000106 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000107 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000108 DeallocateBySizeClass(p, GetSizeClass(p));
109 }
110 bool PointerIsMine(void *p) {
111 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
112 }
113 uptr GetSizeClass(void *p) {
114 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
115 }
116
Kostya Serebryany41960462012-06-26 14:23:32 +0000117 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000118 uptr class_id = GetSizeClass(p);
119 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000120 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
121 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000122 }
123
Kostya Serebryany100590f2012-06-25 14:53:49 +0000124 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000125 uptr res = 0;
126 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000127 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000128 return res;
129 }
130
131 // Test-only.
132 void TestOnlyUnmap() {
133 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
134 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000135
Kostya Serebryany5b014152012-06-22 13:00:50 +0000136 private:
137 static const uptr kNumClasses = 256; // Power of two <= 256
138 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
139 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000140 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000141 // Populate the free list with at most this number of bytes at once
142 // or with one element if its size is greater.
143 static const uptr kPopulateSize = 1 << 18;
144
145 struct LifoListNode {
146 LifoListNode *next;
147 };
148
149 struct RegionInfo {
150 uptr mutex; // FIXME
151 LifoListNode *free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000152 uptr allocated_user; // Bytes allocated for user memory.
153 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryany6bbb5142012-06-25 14:31:59 +0000154 char padding[kCacheLineSize - 4 * sizeof(uptr)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000155 };
156 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
157
Kostya Serebryanyaad697e2012-06-25 14:58:17 +0000158 uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000159 uptr res = sizeof(RegionInfo) * kNumClasses;
160 CHECK_EQ(res % kPageSize, 0);
161 return res;
162 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000163 uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
164 uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
165
166 RegionInfo *GetRegionInfo(uptr class_id) {
167 CHECK_LT(class_id, kNumClasses);
168 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
169 return &regions[-1 - class_id];
170 }
171
172 void PushLifoList(LifoListNode **list, LifoListNode *node) {
173 node->next = *list;
174 *list = node;
175 }
176
177 LifoListNode *PopLifoList(LifoListNode **list) {
178 LifoListNode *res = *list;
Kostya Serebryany100590f2012-06-25 14:53:49 +0000179 *list = res->next;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000180 return res;
181 }
182
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000183 uptr GetChunkIdx(uptr chunk, uptr class_id) {
184 u32 offset = chunk % kRegionSize;
185 // Here we divide by a non-constant. This is costly.
186 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
187 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
188 return offset / (u32)SizeClassMap::Size(class_id);
189 }
190
Kostya Serebryany5b014152012-06-22 13:00:50 +0000191 LifoListNode *PopulateFreeList(uptr class_id, RegionInfo *region) {
192 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000193 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000194 uptr end_idx = beg_idx + kPopulateSize;
195 LifoListNode *res = 0;
196 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
197 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000198 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000199 do { // do-while loop because we need to put at least one item.
200 uptr p = region_beg + idx;
201 PushLifoList(&res, reinterpret_cast<LifoListNode*>(p));
202 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000203 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000204 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000205 region->allocated_user += idx - beg_idx;
206 region->allocated_meta += i * kMetadataSize;
207 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000208 return res;
209 }
210
211 void *AllocateBySizeClass(uptr class_id) {
212 CHECK_LT(class_id, kNumClasses);
213 RegionInfo *region = GetRegionInfo(class_id);
214 // FIXME: Lock region->mutex;
215 if (!region->free_list) {
216 region->free_list = PopulateFreeList(class_id, region);
217 }
218 CHECK_NE(region->free_list, 0);
219 LifoListNode *node = PopLifoList(&region->free_list);
220 return reinterpret_cast<void*>(node);
221 }
222
223 void DeallocateBySizeClass(void *p, uptr class_id) {
224 RegionInfo *region = GetRegionInfo(class_id);
225 // FIXME: Lock region->mutex;
226 PushLifoList(&region->free_list, reinterpret_cast<LifoListNode*>(p));
227 }
228};
229
Kostya Serebryany41960462012-06-26 14:23:32 +0000230// This class can (de)allocate only large chunks of memory using mmap/unmap.
231// The main purpose of this allocator is to cover large and rare allocation
232// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
233// The result is always page-aligned.
234class LargeMmapAllocator {
235 public:
236 void Init() {
237 internal_memset(this, 0, sizeof(*this));
238 }
239 void *Allocate(uptr size) {
240 uptr map_size = RoundUpMapSize(size);
241 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
242 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
243 + kPageSize);
244 Header *h = GetHeader(res);
245 h->size = size;
246 {
247 // FIXME: lock
248 h->next = list_;
249 h->prev = 0;
250 if (list_)
251 list_->prev = h;
252 list_ = h;
253 }
254 return res;
255 }
256
257 void Deallocate(void *p) {
258 Header *h = GetHeader(p);
259 uptr map_size = RoundUpMapSize(h->size);
260 {
261 // FIXME: lock
262 Header *prev = h->prev;
263 Header *next = h->next;
264 if (prev)
265 prev->next = next;
266 if (next)
267 next->prev = prev;
268 if (h == list_)
269 list_ = next;
270 }
271 UnmapOrDie(h, map_size);
272 }
273
274 uptr TotalMemoryUsed() {
275 // FIXME: lock
276 uptr res = 0;
277 for (Header *l = list_; l; l = l->next) {
278 res += RoundUpMapSize(l->size);
279 }
280 return res;
281 }
282
283 bool PointerIsMine(void *p) {
284 // Fast check.
285 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
286 // FIXME: lock
287 for (Header *l = list_; l; l = l->next) {
288 if (GetUser(l) == p) return true;
289 }
290 return false;
291 }
292
293 // At least kPageSize/2 metadata bytes is available.
294 void *GetMetaData(void *p) {
295 return GetHeader(p) + 1;
296 }
297
298 private:
299 struct Header {
300 uptr size;
301 Header *next;
302 Header *prev;
303 };
304
305 Header *GetHeader(void *p) {
306 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
307 }
308
309 void *GetUser(Header *h) {
310 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
311 }
312
313 uptr RoundUpMapSize(uptr size) {
314 return RoundUpTo(size, kPageSize) + kPageSize;
315 }
316
317 Header *list_;
318 uptr lock_; // FIXME
319};
320
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000321} // namespace __sanitizer
322
323#endif // SANITIZER_ALLOCATOR_H