blob: 0d227756df8632a17312d8ec3af765ed98a89153 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany78e973f2012-07-06 09:26:01 +000023#include "sanitizer_list.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000024#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000025
26namespace __sanitizer {
27
Kostya Serebryany5b014152012-06-22 13:00:50 +000028// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000029class DefaultSizeClassMap {
30 private:
31 // Here we use a spline composed of 5 polynomials of oder 1.
32 // The first size class is l0, then the classes go with step s0
33 // untill they reach l1, after which they go with step s1 and so on.
34 // Steps should be powers of two for cheap division.
35 // The size of the last size class should be a power of two.
36 // There should be at most 256 size classes.
37 static const uptr l0 = 1 << 4;
38 static const uptr l1 = 1 << 9;
39 static const uptr l2 = 1 << 12;
40 static const uptr l3 = 1 << 15;
41 static const uptr l4 = 1 << 18;
42 static const uptr l5 = 1 << 21;
43
44 static const uptr s0 = 1 << 4;
45 static const uptr s1 = 1 << 6;
46 static const uptr s2 = 1 << 9;
47 static const uptr s3 = 1 << 12;
48 static const uptr s4 = 1 << 15;
49
50 static const uptr u0 = 0 + (l1 - l0) / s0;
51 static const uptr u1 = u0 + (l2 - l1) / s1;
52 static const uptr u2 = u1 + (l3 - l2) / s2;
53 static const uptr u3 = u2 + (l4 - l3) / s3;
54 static const uptr u4 = u3 + (l5 - l4) / s4;
55
56 public:
57 static const uptr kNumClasses = u4 + 1;
58 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000059 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000060
61 COMPILER_CHECK(kNumClasses <= 256);
62 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
63
Kostya Serebryany5b014152012-06-22 13:00:50 +000064 static uptr Size(uptr class_id) {
65 if (class_id <= u0) return l0 + s0 * (class_id - 0);
66 if (class_id <= u1) return l1 + s1 * (class_id - u0);
67 if (class_id <= u2) return l2 + s2 * (class_id - u1);
68 if (class_id <= u3) return l3 + s3 * (class_id - u2);
69 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000070 return 0;
71 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000072 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000073 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
74 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
75 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
76 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
77 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
78 return 0;
79 }
80};
81
Kostya Serebryany5b014152012-06-22 13:00:50 +000082// Space: a portion of address space of kSpaceSize bytes starting at
83// a fixed address (kSpaceBeg). Both constants are powers of two and
84// kSpaceBeg is kSpaceSize-aligned.
85//
86// Region: a part of Space dedicated to a single size class.
87// There are kNumClasses Regions of equal size.
88//
89// UserChunk: a piece of memory returned to user.
90// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
91//
92// A Region looks like this:
93// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
94template <const uptr kSpaceBeg, const uptr kSpaceSize,
95 const uptr kMetadataSize, class SizeClassMap>
96class SizeClassAllocator64 {
97 public:
98 void Init() {
99 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
100 AllocBeg(), AllocSize())));
101 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000102
103 bool CanAllocate(uptr size, uptr alignment) {
104 return size <= SizeClassMap::kMaxSize &&
105 alignment <= SizeClassMap::kMaxSize;
106 }
107
108 void *Allocate(uptr size, uptr alignment) {
109 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000110 return AllocateBySizeClass(SizeClassMap::ClassID(size));
111 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000112
Kostya Serebryany5b014152012-06-22 13:00:50 +0000113 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000114 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000115 DeallocateBySizeClass(p, GetSizeClass(p));
116 }
117 bool PointerIsMine(void *p) {
118 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
119 }
120 uptr GetSizeClass(void *p) {
121 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
122 }
123
Kostya Serebryany41960462012-06-26 14:23:32 +0000124 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000125 uptr class_id = GetSizeClass(p);
126 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000127 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
128 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000129 }
130
Kostya Serebryany100590f2012-06-25 14:53:49 +0000131 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000132 uptr res = 0;
133 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000134 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000135 return res;
136 }
137
138 // Test-only.
139 void TestOnlyUnmap() {
140 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
141 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000142
Kostya Serebryany5b014152012-06-22 13:00:50 +0000143 private:
144 static const uptr kNumClasses = 256; // Power of two <= 256
145 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
146 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000147 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000148 // Populate the free list with at most this number of bytes at once
149 // or with one element if its size is greater.
150 static const uptr kPopulateSize = 1 << 18;
151
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000152 struct ListNode {
153 ListNode *next;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000154 };
155
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000156 typedef IntrusiveList<ListNode> FreeList;
157
Kostya Serebryany5b014152012-06-22 13:00:50 +0000158 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000159 SpinMutex mutex;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000160 FreeList free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000161 uptr allocated_user; // Bytes allocated for user memory.
162 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000163 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(FreeList)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000164 };
165 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
166
Kostya Serebryanyaad697e2012-06-25 14:58:17 +0000167 uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000168 uptr res = sizeof(RegionInfo) * kNumClasses;
169 CHECK_EQ(res % kPageSize, 0);
170 return res;
171 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000172 uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
173 uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
174
175 RegionInfo *GetRegionInfo(uptr class_id) {
176 CHECK_LT(class_id, kNumClasses);
177 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
178 return &regions[-1 - class_id];
179 }
180
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000181 uptr GetChunkIdx(uptr chunk, uptr class_id) {
182 u32 offset = chunk % kRegionSize;
183 // Here we divide by a non-constant. This is costly.
184 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
185 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
186 return offset / (u32)SizeClassMap::Size(class_id);
187 }
188
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000189 void PopulateFreeList(uptr class_id, RegionInfo *region) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000190 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000191 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000192 uptr end_idx = beg_idx + kPopulateSize;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000193 region->free_list.clear();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000194 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
195 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000196 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000197 do { // do-while loop because we need to put at least one item.
198 uptr p = region_beg + idx;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000199 region->free_list.push_front(reinterpret_cast<ListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000200 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000201 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000202 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000203 region->allocated_user += idx - beg_idx;
204 region->allocated_meta += i * kMetadataSize;
205 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000206 }
207
208 void *AllocateBySizeClass(uptr class_id) {
209 CHECK_LT(class_id, kNumClasses);
210 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000211 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000212 if (region->free_list.empty()) {
213 PopulateFreeList(class_id, region);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000214 }
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000215 CHECK(!region->free_list.empty());
216 ListNode *node = region->free_list.front();
217 region->free_list.pop_front();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000218 return reinterpret_cast<void*>(node);
219 }
220
221 void DeallocateBySizeClass(void *p, uptr class_id) {
222 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000223 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000224 region->free_list.push_front(reinterpret_cast<ListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000225 }
226};
227
Kostya Serebryany41960462012-06-26 14:23:32 +0000228// This class can (de)allocate only large chunks of memory using mmap/unmap.
229// The main purpose of this allocator is to cover large and rare allocation
230// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
231// The result is always page-aligned.
232class LargeMmapAllocator {
233 public:
234 void Init() {
235 internal_memset(this, 0, sizeof(*this));
236 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000237 void *Allocate(uptr size, uptr alignment) {
238 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Kostya Serebryany41960462012-06-26 14:23:32 +0000239 uptr map_size = RoundUpMapSize(size);
240 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
241 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
242 + kPageSize);
243 Header *h = GetHeader(res);
244 h->size = size;
245 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000246 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000247 h->next = list_;
248 h->prev = 0;
249 if (list_)
250 list_->prev = h;
251 list_ = h;
252 }
253 return res;
254 }
255
256 void Deallocate(void *p) {
257 Header *h = GetHeader(p);
258 uptr map_size = RoundUpMapSize(h->size);
259 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000260 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000261 Header *prev = h->prev;
262 Header *next = h->next;
263 if (prev)
264 prev->next = next;
265 if (next)
266 next->prev = prev;
267 if (h == list_)
268 list_ = next;
269 }
270 UnmapOrDie(h, map_size);
271 }
272
273 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000274 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000275 uptr res = 0;
276 for (Header *l = list_; l; l = l->next) {
277 res += RoundUpMapSize(l->size);
278 }
279 return res;
280 }
281
282 bool PointerIsMine(void *p) {
283 // Fast check.
284 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000285 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000286 for (Header *l = list_; l; l = l->next) {
287 if (GetUser(l) == p) return true;
288 }
289 return false;
290 }
291
292 // At least kPageSize/2 metadata bytes is available.
293 void *GetMetaData(void *p) {
294 return GetHeader(p) + 1;
295 }
296
297 private:
298 struct Header {
299 uptr size;
300 Header *next;
301 Header *prev;
302 };
303
304 Header *GetHeader(void *p) {
305 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
306 }
307
308 void *GetUser(Header *h) {
309 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
310 }
311
312 uptr RoundUpMapSize(uptr size) {
313 return RoundUpTo(size, kPageSize) + kPageSize;
314 }
315
316 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000317 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000318};
319
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000320// This class implements a complete memory allocator by using two
321// internal allocators:
322// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
323// When allocating 2^x bytes it should return 2^x aligned chunk.
324// SecondaryAllocator can allocate anything, but is not efficient.
325template <class PrimaryAllocator, class SecondaryAllocator>
326class CombinedAllocator {
327 public:
328 void Init() {
329 primary_.Init();
330 secondary_.Init();
331 }
332
333 void *Allocate(uptr size, uptr alignment) {
334 CHECK_GT(size, 0);
335 if (alignment > 8)
336 size = RoundUpTo(size, alignment);
337 void *res;
338 if (primary_.CanAllocate(size, alignment))
339 res = primary_.Allocate(size, alignment);
340 else
341 res = secondary_.Allocate(size, alignment);
342 if (alignment > 8)
343 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
344 return res;
345 }
346
347 void Deallocate(void *p) {
348 if (primary_.PointerIsMine(p))
349 primary_.Deallocate(p);
350 else
351 secondary_.Deallocate(p);
352 }
353
354 bool PointerIsMine(void *p) {
355 if (primary_.PointerIsMine(p))
356 return true;
357 return secondary_.PointerIsMine(p);
358 }
359
360 void *GetMetaData(void *p) {
361 if (primary_.PointerIsMine(p))
362 return primary_.GetMetaData(p);
363 return secondary_.GetMetaData(p);
364 }
365
366 uptr TotalMemoryUsed() {
367 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
368 }
369
370 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
371
372 private:
373 PrimaryAllocator primary_;
374 SecondaryAllocator secondary_;
375};
376
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000377} // namespace __sanitizer
378
379#endif // SANITIZER_ALLOCATOR_H