blob: 8d716f55e9d27a84ab0b595b68aa19a44dbe5968 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany78e973f2012-07-06 09:26:01 +000023#include "sanitizer_list.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000024#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000025
26namespace __sanitizer {
27
Kostya Serebryany5b014152012-06-22 13:00:50 +000028// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000029class DefaultSizeClassMap {
30 private:
31 // Here we use a spline composed of 5 polynomials of oder 1.
32 // The first size class is l0, then the classes go with step s0
33 // untill they reach l1, after which they go with step s1 and so on.
34 // Steps should be powers of two for cheap division.
35 // The size of the last size class should be a power of two.
36 // There should be at most 256 size classes.
37 static const uptr l0 = 1 << 4;
38 static const uptr l1 = 1 << 9;
39 static const uptr l2 = 1 << 12;
40 static const uptr l3 = 1 << 15;
41 static const uptr l4 = 1 << 18;
42 static const uptr l5 = 1 << 21;
43
44 static const uptr s0 = 1 << 4;
45 static const uptr s1 = 1 << 6;
46 static const uptr s2 = 1 << 9;
47 static const uptr s3 = 1 << 12;
48 static const uptr s4 = 1 << 15;
49
50 static const uptr u0 = 0 + (l1 - l0) / s0;
51 static const uptr u1 = u0 + (l2 - l1) / s1;
52 static const uptr u2 = u1 + (l3 - l2) / s2;
53 static const uptr u3 = u2 + (l4 - l3) / s3;
54 static const uptr u4 = u3 + (l5 - l4) / s4;
55
Dmitry Vyukov7e634742012-08-23 17:16:07 +000056 // Max cached in local cache blocks.
57 static const uptr c0 = 256;
58 static const uptr c1 = 64;
59 static const uptr c2 = 16;
60 static const uptr c3 = 4;
61 static const uptr c4 = 1;
62
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000063 public:
64 static const uptr kNumClasses = u4 + 1;
65 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000066 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000067
68 COMPILER_CHECK(kNumClasses <= 256);
69 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
70
Kostya Serebryany5b014152012-06-22 13:00:50 +000071 static uptr Size(uptr class_id) {
72 if (class_id <= u0) return l0 + s0 * (class_id - 0);
73 if (class_id <= u1) return l1 + s1 * (class_id - u0);
74 if (class_id <= u2) return l2 + s2 * (class_id - u1);
75 if (class_id <= u3) return l3 + s3 * (class_id - u2);
76 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000077 return 0;
78 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000079 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000080 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
81 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
82 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
83 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
84 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
85 return 0;
86 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +000087
88 static uptr MaxCached(uptr class_id) {
89 if (class_id <= u0) return c0;
90 if (class_id <= u1) return c1;
91 if (class_id <= u2) return c2;
92 if (class_id <= u3) return c3;
93 if (class_id <= u4) return c4;
94 return 0;
95 }
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000096};
97
Kostya Serebryanyd1e60942012-07-06 13:46:49 +000098struct AllocatorListNode {
99 AllocatorListNode *next;
100};
101
102typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
103
104
Kostya Serebryany5b014152012-06-22 13:00:50 +0000105// Space: a portion of address space of kSpaceSize bytes starting at
106// a fixed address (kSpaceBeg). Both constants are powers of two and
107// kSpaceBeg is kSpaceSize-aligned.
108//
109// Region: a part of Space dedicated to a single size class.
110// There are kNumClasses Regions of equal size.
111//
112// UserChunk: a piece of memory returned to user.
113// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
114//
115// A Region looks like this:
116// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
117template <const uptr kSpaceBeg, const uptr kSpaceSize,
118 const uptr kMetadataSize, class SizeClassMap>
119class SizeClassAllocator64 {
120 public:
121 void Init() {
122 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
123 AllocBeg(), AllocSize())));
124 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000125
126 bool CanAllocate(uptr size, uptr alignment) {
127 return size <= SizeClassMap::kMaxSize &&
128 alignment <= SizeClassMap::kMaxSize;
129 }
130
131 void *Allocate(uptr size, uptr alignment) {
132 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000133 return AllocateBySizeClass(SizeClassMap::ClassID(size));
134 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000135
Kostya Serebryany5b014152012-06-22 13:00:50 +0000136 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000137 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000138 DeallocateBySizeClass(p, GetSizeClass(p));
139 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000140
141 // Allocate several chunks of the given class_id.
142 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
143 CHECK_LT(class_id, kNumClasses);
144 RegionInfo *region = GetRegionInfo(class_id);
145 SpinMutexLock l(&region->mutex);
146 if (region->free_list.empty()) {
147 PopulateFreeList(class_id, region);
148 }
149 CHECK(!region->free_list.empty());
Dmitry Vyukov7d3d9442012-08-24 15:53:14 +0000150 uptr count = SizeClassMap::MaxCached(class_id);
151 if (region->free_list.size() <= count) {
152 free_list->append_front(&region->free_list);
153 } else {
154 for (uptr i = 0; i < count; i++) {
155 AllocatorListNode *node = region->free_list.front();
156 region->free_list.pop_front();
157 free_list->push_front(node);
158 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000159 }
160 CHECK(!free_list->empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000161 }
162
163 // Swallow the entire free_list for the given class_id.
164 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
165 CHECK_LT(class_id, kNumClasses);
166 RegionInfo *region = GetRegionInfo(class_id);
167 SpinMutexLock l(&region->mutex);
168 region->free_list.append_front(free_list);
169 }
170
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000171 static bool PointerIsMine(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000172 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
173 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000174
175 static uptr GetSizeClass(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000176 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
177 }
178
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000179 static void *GetBlockBegin(void *p) {
180 uptr u = (uptr)p;
181 uptr s = GetActuallyAllocatedSize(p);
182 uptr regBeg = u & ~(kRegionSize - 1);
183 uptr regOff = u - regBeg;
184 uptr begin = regBeg + regOff / s * s;
185 return (void*)begin;
186 }
187
188 static uptr GetActuallyAllocatedSize(void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000189 CHECK(PointerIsMine(p));
190 return SizeClassMap::Size(GetSizeClass(p));
191 }
192
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000193 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
194
Kostya Serebryany41960462012-06-26 14:23:32 +0000195 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000196 uptr class_id = GetSizeClass(p);
197 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000198 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
199 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000200 }
201
Kostya Serebryany100590f2012-06-25 14:53:49 +0000202 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000203 uptr res = 0;
204 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000205 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000206 return res;
207 }
208
209 // Test-only.
210 void TestOnlyUnmap() {
211 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
212 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000213
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000214 static uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
215 static uptr AllocEnd() { return kSpaceBeg + kSpaceSize; }
216 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
217
Kostya Serebryany5b014152012-06-22 13:00:50 +0000218 static const uptr kNumClasses = 256; // Power of two <= 256
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000219 typedef SizeClassMap SizeClassMapT;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000220
221 private:
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000222 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000223 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
224 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000225 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000226 // Populate the free list with at most this number of bytes at once
227 // or with one element if its size is greater.
228 static const uptr kPopulateSize = 1 << 18;
229
Kostya Serebryany5b014152012-06-22 13:00:50 +0000230 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000231 SpinMutex mutex;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000232 AllocatorFreeList free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000233 uptr allocated_user; // Bytes allocated for user memory.
234 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000235 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000236 };
237 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
238
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000239 static uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000240 uptr res = sizeof(RegionInfo) * kNumClasses;
241 CHECK_EQ(res % kPageSize, 0);
242 return res;
243 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000244
245 RegionInfo *GetRegionInfo(uptr class_id) {
246 CHECK_LT(class_id, kNumClasses);
247 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
248 return &regions[-1 - class_id];
249 }
250
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000251 uptr GetChunkIdx(uptr chunk, uptr class_id) {
252 u32 offset = chunk % kRegionSize;
253 // Here we divide by a non-constant. This is costly.
254 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
255 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
256 return offset / (u32)SizeClassMap::Size(class_id);
257 }
258
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000259 void PopulateFreeList(uptr class_id, RegionInfo *region) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000260 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000261 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000262 uptr end_idx = beg_idx + kPopulateSize;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000263 region->free_list.clear();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000264 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
265 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000266 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000267 do { // do-while loop because we need to put at least one item.
268 uptr p = region_beg + idx;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000269 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000270 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000271 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000272 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000273 region->allocated_user += idx - beg_idx;
274 region->allocated_meta += i * kMetadataSize;
275 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000276 }
277
278 void *AllocateBySizeClass(uptr class_id) {
279 CHECK_LT(class_id, kNumClasses);
280 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000281 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000282 if (region->free_list.empty()) {
283 PopulateFreeList(class_id, region);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000284 }
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000285 CHECK(!region->free_list.empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000286 AllocatorListNode *node = region->free_list.front();
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000287 region->free_list.pop_front();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000288 return reinterpret_cast<void*>(node);
289 }
290
291 void DeallocateBySizeClass(void *p, uptr class_id) {
292 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000293 SpinMutexLock l(&region->mutex);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000294 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000295 }
296};
297
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000298// Objects of this type should be used as local caches for SizeClassAllocator64.
299// Since the typical use of this class is to have one object per thread in TLS,
300// is has to be POD.
301template<const uptr kNumClasses, class SizeClassAllocator>
302struct SizeClassAllocatorLocalCache {
303 // Don't need to call Init if the object is a global (i.e. zero-initialized).
304 void Init() {
305 internal_memset(this, 0, sizeof(*this));
306 }
307
308 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
309 CHECK_LT(class_id, kNumClasses);
310 AllocatorFreeList *free_list = &free_lists_[class_id];
311 if (free_list->empty())
312 allocator->BulkAllocate(class_id, free_list);
313 CHECK(!free_list->empty());
314 void *res = free_list->front();
315 free_list->pop_front();
316 return res;
317 }
318
319 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
320 CHECK_LT(class_id, kNumClasses);
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000321 AllocatorFreeList *free_list = &free_lists_[class_id];
322 free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
323 if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
324 DrainHalf(allocator, class_id);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000325 }
326
327 void Drain(SizeClassAllocator *allocator) {
328 for (uptr i = 0; i < kNumClasses; i++) {
329 allocator->BulkDeallocate(i, &free_lists_[i]);
330 CHECK(free_lists_[i].empty());
331 }
332 }
333
334 // private:
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000335 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000336 AllocatorFreeList free_lists_[kNumClasses];
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000337
338 void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
339 AllocatorFreeList *free_list = &free_lists_[class_id];
340 AllocatorFreeList half;
341 half.clear();
342 const uptr count = free_list->size() / 2;
343 for (uptr i = 0; i < count; i++) {
344 AllocatorListNode *node = free_list->front();
345 free_list->pop_front();
346 half.push_front(node);
347 }
348 allocator->BulkDeallocate(class_id, &half);
349 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000350};
351
Kostya Serebryany41960462012-06-26 14:23:32 +0000352// This class can (de)allocate only large chunks of memory using mmap/unmap.
353// The main purpose of this allocator is to cover large and rare allocation
354// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
355// The result is always page-aligned.
356class LargeMmapAllocator {
357 public:
358 void Init() {
359 internal_memset(this, 0, sizeof(*this));
360 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000361 void *Allocate(uptr size, uptr alignment) {
362 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000363 if (size + alignment + 2 * kPageSize < size)
364 return 0;
Kostya Serebryany41960462012-06-26 14:23:32 +0000365 uptr map_size = RoundUpMapSize(size);
366 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
367 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
368 + kPageSize);
369 Header *h = GetHeader(res);
370 h->size = size;
371 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000372 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000373 h->next = list_;
374 h->prev = 0;
375 if (list_)
376 list_->prev = h;
377 list_ = h;
378 }
379 return res;
380 }
381
382 void Deallocate(void *p) {
383 Header *h = GetHeader(p);
384 uptr map_size = RoundUpMapSize(h->size);
385 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000386 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000387 Header *prev = h->prev;
388 Header *next = h->next;
389 if (prev)
390 prev->next = next;
391 if (next)
392 next->prev = prev;
393 if (h == list_)
394 list_ = next;
395 }
396 UnmapOrDie(h, map_size);
397 }
398
399 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000400 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000401 uptr res = 0;
402 for (Header *l = list_; l; l = l->next) {
403 res += RoundUpMapSize(l->size);
404 }
405 return res;
406 }
407
408 bool PointerIsMine(void *p) {
409 // Fast check.
410 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000411 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000412 for (Header *l = list_; l; l = l->next) {
413 if (GetUser(l) == p) return true;
414 }
415 return false;
416 }
417
Kostya Serebryanyab349192012-07-18 16:04:55 +0000418 uptr GetActuallyAllocatedSize(void *p) {
419 return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
420 }
421
Kostya Serebryany41960462012-06-26 14:23:32 +0000422 // At least kPageSize/2 metadata bytes is available.
423 void *GetMetaData(void *p) {
424 return GetHeader(p) + 1;
425 }
426
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000427 void *GetBlockBegin(void *p) {
428 SpinMutexLock l(&mutex_);
429 for (Header *l = list_; l; l = l->next) {
430 void *b = GetUser(l);
431 if (p >= b && p < (u8*)b + l->size)
432 return b;
433 }
434 return 0;
435 }
436
Kostya Serebryany41960462012-06-26 14:23:32 +0000437 private:
438 struct Header {
439 uptr size;
440 Header *next;
441 Header *prev;
442 };
443
444 Header *GetHeader(void *p) {
445 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
446 }
447
448 void *GetUser(Header *h) {
449 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
450 }
451
452 uptr RoundUpMapSize(uptr size) {
453 return RoundUpTo(size, kPageSize) + kPageSize;
454 }
455
456 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000457 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000458};
459
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000460// This class implements a complete memory allocator by using two
461// internal allocators:
462// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
463// When allocating 2^x bytes it should return 2^x aligned chunk.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000464// PrimaryAllocator is used via a local AllocatorCache.
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000465// SecondaryAllocator can allocate anything, but is not efficient.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000466template <class PrimaryAllocator, class AllocatorCache,
Alexander Potapenkob4e9fd22012-07-08 15:00:06 +0000467 class SecondaryAllocator> // NOLINT
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000468class CombinedAllocator {
469 public:
470 void Init() {
471 primary_.Init();
472 secondary_.Init();
473 }
474
Kostya Serebryanyab349192012-07-18 16:04:55 +0000475 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
476 bool cleared = false) {
Kostya Serebryanya415df62012-07-19 12:15:33 +0000477 // Returning 0 on malloc(0) may break a lot of code.
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000478 if (size == 0)
479 size = 1;
480 if (size + alignment < size)
481 return 0;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000482 if (alignment > 8)
483 size = RoundUpTo(size, alignment);
484 void *res;
485 if (primary_.CanAllocate(size, alignment))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000486 res = cache->Allocate(&primary_, primary_.ClassID(size));
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000487 else
488 res = secondary_.Allocate(size, alignment);
489 if (alignment > 8)
490 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000491 if (cleared && res)
Kostya Serebryanyab349192012-07-18 16:04:55 +0000492 internal_memset(res, 0, size);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000493 return res;
494 }
495
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000496 void Deallocate(AllocatorCache *cache, void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000497 if (!p) return;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000498 if (primary_.PointerIsMine(p))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000499 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000500 else
501 secondary_.Deallocate(p);
502 }
503
Kostya Serebryanyab349192012-07-18 16:04:55 +0000504 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
505 uptr alignment) {
506 if (!p)
507 return Allocate(cache, new_size, alignment);
508 if (!new_size) {
509 Deallocate(cache, p);
510 return 0;
511 }
512 CHECK(PointerIsMine(p));
513 uptr old_size = GetActuallyAllocatedSize(p);
514 uptr memcpy_size = Min(new_size, old_size);
515 void *new_p = Allocate(cache, new_size, alignment);
516 if (new_p)
517 internal_memcpy(new_p, p, memcpy_size);
518 Deallocate(cache, p);
519 return new_p;
520 }
521
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000522 bool PointerIsMine(void *p) {
523 if (primary_.PointerIsMine(p))
524 return true;
525 return secondary_.PointerIsMine(p);
526 }
527
528 void *GetMetaData(void *p) {
529 if (primary_.PointerIsMine(p))
530 return primary_.GetMetaData(p);
531 return secondary_.GetMetaData(p);
532 }
533
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000534 void *GetBlockBegin(void *p) {
535 if (primary_.PointerIsMine(p))
536 return primary_.GetBlockBegin(p);
537 return secondary_.GetBlockBegin(p);
538 }
539
Kostya Serebryanyab349192012-07-18 16:04:55 +0000540 uptr GetActuallyAllocatedSize(void *p) {
541 if (primary_.PointerIsMine(p))
542 return primary_.GetActuallyAllocatedSize(p);
543 return secondary_.GetActuallyAllocatedSize(p);
544 }
545
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000546 uptr TotalMemoryUsed() {
547 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
548 }
549
550 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
551
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000552 void SwallowCache(AllocatorCache *cache) {
553 cache->Drain(&primary_);
554 }
555
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000556 private:
557 PrimaryAllocator primary_;
558 SecondaryAllocator secondary_;
559};
560
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000561} // namespace __sanitizer
562
563#endif // SANITIZER_ALLOCATOR_H