blob: 1728a711e931f6e634126ba0fe6708cd4000db98 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000023#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000024
25namespace __sanitizer {
26
Kostya Serebryany5b014152012-06-22 13:00:50 +000027// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000028class DefaultSizeClassMap {
29 private:
30 // Here we use a spline composed of 5 polynomials of oder 1.
31 // The first size class is l0, then the classes go with step s0
32 // untill they reach l1, after which they go with step s1 and so on.
33 // Steps should be powers of two for cheap division.
34 // The size of the last size class should be a power of two.
35 // There should be at most 256 size classes.
36 static const uptr l0 = 1 << 4;
37 static const uptr l1 = 1 << 9;
38 static const uptr l2 = 1 << 12;
39 static const uptr l3 = 1 << 15;
40 static const uptr l4 = 1 << 18;
41 static const uptr l5 = 1 << 21;
42
43 static const uptr s0 = 1 << 4;
44 static const uptr s1 = 1 << 6;
45 static const uptr s2 = 1 << 9;
46 static const uptr s3 = 1 << 12;
47 static const uptr s4 = 1 << 15;
48
49 static const uptr u0 = 0 + (l1 - l0) / s0;
50 static const uptr u1 = u0 + (l2 - l1) / s1;
51 static const uptr u2 = u1 + (l3 - l2) / s2;
52 static const uptr u3 = u2 + (l4 - l3) / s3;
53 static const uptr u4 = u3 + (l5 - l4) / s4;
54
55 public:
56 static const uptr kNumClasses = u4 + 1;
57 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000058 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000059
60 COMPILER_CHECK(kNumClasses <= 256);
61 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
62
Kostya Serebryany5b014152012-06-22 13:00:50 +000063 static uptr Size(uptr class_id) {
64 if (class_id <= u0) return l0 + s0 * (class_id - 0);
65 if (class_id <= u1) return l1 + s1 * (class_id - u0);
66 if (class_id <= u2) return l2 + s2 * (class_id - u1);
67 if (class_id <= u3) return l3 + s3 * (class_id - u2);
68 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000069 return 0;
70 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000071 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000072 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
73 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
74 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
75 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
76 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
77 return 0;
78 }
79};
80
Kostya Serebryany5b014152012-06-22 13:00:50 +000081// Space: a portion of address space of kSpaceSize bytes starting at
82// a fixed address (kSpaceBeg). Both constants are powers of two and
83// kSpaceBeg is kSpaceSize-aligned.
84//
85// Region: a part of Space dedicated to a single size class.
86// There are kNumClasses Regions of equal size.
87//
88// UserChunk: a piece of memory returned to user.
89// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
90//
91// A Region looks like this:
92// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
93template <const uptr kSpaceBeg, const uptr kSpaceSize,
94 const uptr kMetadataSize, class SizeClassMap>
95class SizeClassAllocator64 {
96 public:
97 void Init() {
98 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
99 AllocBeg(), AllocSize())));
100 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000101
102 bool CanAllocate(uptr size, uptr alignment) {
103 return size <= SizeClassMap::kMaxSize &&
104 alignment <= SizeClassMap::kMaxSize;
105 }
106
107 void *Allocate(uptr size, uptr alignment) {
108 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000109 return AllocateBySizeClass(SizeClassMap::ClassID(size));
110 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000111
Kostya Serebryany5b014152012-06-22 13:00:50 +0000112 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000113 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000114 DeallocateBySizeClass(p, GetSizeClass(p));
115 }
116 bool PointerIsMine(void *p) {
117 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
118 }
119 uptr GetSizeClass(void *p) {
120 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
121 }
122
Kostya Serebryany41960462012-06-26 14:23:32 +0000123 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000124 uptr class_id = GetSizeClass(p);
125 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000126 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
127 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000128 }
129
Kostya Serebryany100590f2012-06-25 14:53:49 +0000130 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000131 uptr res = 0;
132 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000133 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000134 return res;
135 }
136
137 // Test-only.
138 void TestOnlyUnmap() {
139 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
140 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000141
Kostya Serebryany5b014152012-06-22 13:00:50 +0000142 private:
143 static const uptr kNumClasses = 256; // Power of two <= 256
144 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
145 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000146 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000147 // Populate the free list with at most this number of bytes at once
148 // or with one element if its size is greater.
149 static const uptr kPopulateSize = 1 << 18;
150
151 struct LifoListNode {
152 LifoListNode *next;
153 };
154
155 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000156 SpinMutex mutex;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000157 LifoListNode *free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000158 uptr allocated_user; // Bytes allocated for user memory.
159 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryany6bbb5142012-06-25 14:31:59 +0000160 char padding[kCacheLineSize - 4 * sizeof(uptr)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000161 };
162 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
163
Kostya Serebryanyaad697e2012-06-25 14:58:17 +0000164 uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000165 uptr res = sizeof(RegionInfo) * kNumClasses;
166 CHECK_EQ(res % kPageSize, 0);
167 return res;
168 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000169 uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
170 uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
171
172 RegionInfo *GetRegionInfo(uptr class_id) {
173 CHECK_LT(class_id, kNumClasses);
174 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
175 return &regions[-1 - class_id];
176 }
177
178 void PushLifoList(LifoListNode **list, LifoListNode *node) {
179 node->next = *list;
180 *list = node;
181 }
182
183 LifoListNode *PopLifoList(LifoListNode **list) {
184 LifoListNode *res = *list;
Kostya Serebryany100590f2012-06-25 14:53:49 +0000185 *list = res->next;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000186 return res;
187 }
188
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000189 uptr GetChunkIdx(uptr chunk, uptr class_id) {
190 u32 offset = chunk % kRegionSize;
191 // Here we divide by a non-constant. This is costly.
192 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
193 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
194 return offset / (u32)SizeClassMap::Size(class_id);
195 }
196
Kostya Serebryany5b014152012-06-22 13:00:50 +0000197 LifoListNode *PopulateFreeList(uptr class_id, RegionInfo *region) {
198 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000199 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000200 uptr end_idx = beg_idx + kPopulateSize;
201 LifoListNode *res = 0;
202 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
203 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000204 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000205 do { // do-while loop because we need to put at least one item.
206 uptr p = region_beg + idx;
207 PushLifoList(&res, reinterpret_cast<LifoListNode*>(p));
208 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000209 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000210 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000211 region->allocated_user += idx - beg_idx;
212 region->allocated_meta += i * kMetadataSize;
213 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000214 return res;
215 }
216
217 void *AllocateBySizeClass(uptr class_id) {
218 CHECK_LT(class_id, kNumClasses);
219 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000220 SpinMutexLock l(&region->mutex);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000221 if (!region->free_list) {
222 region->free_list = PopulateFreeList(class_id, region);
223 }
224 CHECK_NE(region->free_list, 0);
225 LifoListNode *node = PopLifoList(&region->free_list);
226 return reinterpret_cast<void*>(node);
227 }
228
229 void DeallocateBySizeClass(void *p, uptr class_id) {
230 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000231 SpinMutexLock l(&region->mutex);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000232 PushLifoList(&region->free_list, reinterpret_cast<LifoListNode*>(p));
233 }
234};
235
Kostya Serebryany41960462012-06-26 14:23:32 +0000236// This class can (de)allocate only large chunks of memory using mmap/unmap.
237// The main purpose of this allocator is to cover large and rare allocation
238// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
239// The result is always page-aligned.
240class LargeMmapAllocator {
241 public:
242 void Init() {
243 internal_memset(this, 0, sizeof(*this));
244 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000245 void *Allocate(uptr size, uptr alignment) {
246 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Kostya Serebryany41960462012-06-26 14:23:32 +0000247 uptr map_size = RoundUpMapSize(size);
248 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
249 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
250 + kPageSize);
251 Header *h = GetHeader(res);
252 h->size = size;
253 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000254 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000255 h->next = list_;
256 h->prev = 0;
257 if (list_)
258 list_->prev = h;
259 list_ = h;
260 }
261 return res;
262 }
263
264 void Deallocate(void *p) {
265 Header *h = GetHeader(p);
266 uptr map_size = RoundUpMapSize(h->size);
267 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000268 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000269 Header *prev = h->prev;
270 Header *next = h->next;
271 if (prev)
272 prev->next = next;
273 if (next)
274 next->prev = prev;
275 if (h == list_)
276 list_ = next;
277 }
278 UnmapOrDie(h, map_size);
279 }
280
281 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000282 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000283 uptr res = 0;
284 for (Header *l = list_; l; l = l->next) {
285 res += RoundUpMapSize(l->size);
286 }
287 return res;
288 }
289
290 bool PointerIsMine(void *p) {
291 // Fast check.
292 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000293 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000294 for (Header *l = list_; l; l = l->next) {
295 if (GetUser(l) == p) return true;
296 }
297 return false;
298 }
299
300 // At least kPageSize/2 metadata bytes is available.
301 void *GetMetaData(void *p) {
302 return GetHeader(p) + 1;
303 }
304
305 private:
306 struct Header {
307 uptr size;
308 Header *next;
309 Header *prev;
310 };
311
312 Header *GetHeader(void *p) {
313 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
314 }
315
316 void *GetUser(Header *h) {
317 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
318 }
319
320 uptr RoundUpMapSize(uptr size) {
321 return RoundUpTo(size, kPageSize) + kPageSize;
322 }
323
324 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000325 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000326};
327
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000328// This class implements a complete memory allocator by using two
329// internal allocators:
330// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
331// When allocating 2^x bytes it should return 2^x aligned chunk.
332// SecondaryAllocator can allocate anything, but is not efficient.
333template <class PrimaryAllocator, class SecondaryAllocator>
334class CombinedAllocator {
335 public:
336 void Init() {
337 primary_.Init();
338 secondary_.Init();
339 }
340
341 void *Allocate(uptr size, uptr alignment) {
342 CHECK_GT(size, 0);
343 if (alignment > 8)
344 size = RoundUpTo(size, alignment);
345 void *res;
346 if (primary_.CanAllocate(size, alignment))
347 res = primary_.Allocate(size, alignment);
348 else
349 res = secondary_.Allocate(size, alignment);
350 if (alignment > 8)
351 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
352 return res;
353 }
354
355 void Deallocate(void *p) {
356 if (primary_.PointerIsMine(p))
357 primary_.Deallocate(p);
358 else
359 secondary_.Deallocate(p);
360 }
361
362 bool PointerIsMine(void *p) {
363 if (primary_.PointerIsMine(p))
364 return true;
365 return secondary_.PointerIsMine(p);
366 }
367
368 void *GetMetaData(void *p) {
369 if (primary_.PointerIsMine(p))
370 return primary_.GetMetaData(p);
371 return secondary_.GetMetaData(p);
372 }
373
374 uptr TotalMemoryUsed() {
375 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
376 }
377
378 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
379
380 private:
381 PrimaryAllocator primary_;
382 SecondaryAllocator secondary_;
383};
384
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000385} // namespace __sanitizer
386
387#endif // SANITIZER_ALLOCATOR_H