blob: 68a52a3d53afc690222c0096b5dd7518cb3f6788 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany78e973f2012-07-06 09:26:01 +000023#include "sanitizer_list.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000024#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000025
26namespace __sanitizer {
27
Kostya Serebryany5b014152012-06-22 13:00:50 +000028// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000029class DefaultSizeClassMap {
30 private:
31 // Here we use a spline composed of 5 polynomials of oder 1.
32 // The first size class is l0, then the classes go with step s0
33 // untill they reach l1, after which they go with step s1 and so on.
34 // Steps should be powers of two for cheap division.
35 // The size of the last size class should be a power of two.
36 // There should be at most 256 size classes.
37 static const uptr l0 = 1 << 4;
38 static const uptr l1 = 1 << 9;
39 static const uptr l2 = 1 << 12;
40 static const uptr l3 = 1 << 15;
41 static const uptr l4 = 1 << 18;
42 static const uptr l5 = 1 << 21;
43
44 static const uptr s0 = 1 << 4;
45 static const uptr s1 = 1 << 6;
46 static const uptr s2 = 1 << 9;
47 static const uptr s3 = 1 << 12;
48 static const uptr s4 = 1 << 15;
49
50 static const uptr u0 = 0 + (l1 - l0) / s0;
51 static const uptr u1 = u0 + (l2 - l1) / s1;
52 static const uptr u2 = u1 + (l3 - l2) / s2;
53 static const uptr u3 = u2 + (l4 - l3) / s3;
54 static const uptr u4 = u3 + (l5 - l4) / s4;
55
Dmitry Vyukov7e634742012-08-23 17:16:07 +000056 // Max cached in local cache blocks.
57 static const uptr c0 = 256;
58 static const uptr c1 = 64;
59 static const uptr c2 = 16;
60 static const uptr c3 = 4;
61 static const uptr c4 = 1;
62
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000063 public:
64 static const uptr kNumClasses = u4 + 1;
65 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000066 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000067
68 COMPILER_CHECK(kNumClasses <= 256);
69 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
70
Kostya Serebryany5b014152012-06-22 13:00:50 +000071 static uptr Size(uptr class_id) {
72 if (class_id <= u0) return l0 + s0 * (class_id - 0);
73 if (class_id <= u1) return l1 + s1 * (class_id - u0);
74 if (class_id <= u2) return l2 + s2 * (class_id - u1);
75 if (class_id <= u3) return l3 + s3 * (class_id - u2);
76 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000077 return 0;
78 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000079 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000080 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
81 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
82 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
83 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
84 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
85 return 0;
86 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +000087
88 static uptr MaxCached(uptr class_id) {
89 if (class_id <= u0) return c0;
90 if (class_id <= u1) return c1;
91 if (class_id <= u2) return c2;
92 if (class_id <= u3) return c3;
93 if (class_id <= u4) return c4;
94 return 0;
95 }
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000096};
97
Kostya Serebryanyd1e60942012-07-06 13:46:49 +000098struct AllocatorListNode {
99 AllocatorListNode *next;
100};
101
102typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
103
104
Kostya Serebryany5b014152012-06-22 13:00:50 +0000105// Space: a portion of address space of kSpaceSize bytes starting at
106// a fixed address (kSpaceBeg). Both constants are powers of two and
107// kSpaceBeg is kSpaceSize-aligned.
108//
109// Region: a part of Space dedicated to a single size class.
110// There are kNumClasses Regions of equal size.
111//
112// UserChunk: a piece of memory returned to user.
113// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
114//
115// A Region looks like this:
116// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
117template <const uptr kSpaceBeg, const uptr kSpaceSize,
118 const uptr kMetadataSize, class SizeClassMap>
119class SizeClassAllocator64 {
120 public:
121 void Init() {
122 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
123 AllocBeg(), AllocSize())));
124 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000125
126 bool CanAllocate(uptr size, uptr alignment) {
127 return size <= SizeClassMap::kMaxSize &&
128 alignment <= SizeClassMap::kMaxSize;
129 }
130
131 void *Allocate(uptr size, uptr alignment) {
132 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000133 return AllocateBySizeClass(SizeClassMap::ClassID(size));
134 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000135
Kostya Serebryany5b014152012-06-22 13:00:50 +0000136 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000137 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000138 DeallocateBySizeClass(p, GetSizeClass(p));
139 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000140
141 // Allocate several chunks of the given class_id.
142 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
143 CHECK_LT(class_id, kNumClasses);
144 RegionInfo *region = GetRegionInfo(class_id);
145 SpinMutexLock l(&region->mutex);
146 if (region->free_list.empty()) {
147 PopulateFreeList(class_id, region);
148 }
149 CHECK(!region->free_list.empty());
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000150 const uptr count = SizeClassMap::MaxCached(class_id);
151 for (uptr i = 0; i < count && !region->free_list.empty(); i++) {
152 AllocatorListNode *node = region->free_list.front();
153 region->free_list.pop_front();
154 free_list->push_front(node);
155 }
156 CHECK(!free_list->empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000157 }
158
159 // Swallow the entire free_list for the given class_id.
160 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
161 CHECK_LT(class_id, kNumClasses);
162 RegionInfo *region = GetRegionInfo(class_id);
163 SpinMutexLock l(&region->mutex);
164 region->free_list.append_front(free_list);
165 }
166
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000167 static bool PointerIsMine(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000168 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
169 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000170
171 static uptr GetSizeClass(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000172 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
173 }
174
Kostya Serebryanyab349192012-07-18 16:04:55 +0000175 uptr GetActuallyAllocatedSize(void *p) {
176 CHECK(PointerIsMine(p));
177 return SizeClassMap::Size(GetSizeClass(p));
178 }
179
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000180 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
181
Kostya Serebryany41960462012-06-26 14:23:32 +0000182 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000183 uptr class_id = GetSizeClass(p);
184 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000185 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
186 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000187 }
188
Kostya Serebryany100590f2012-06-25 14:53:49 +0000189 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000190 uptr res = 0;
191 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000192 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000193 return res;
194 }
195
196 // Test-only.
197 void TestOnlyUnmap() {
198 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
199 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000200
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000201 static uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
202 static uptr AllocEnd() { return kSpaceBeg + kSpaceSize; }
203 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
204
Kostya Serebryany5b014152012-06-22 13:00:50 +0000205 static const uptr kNumClasses = 256; // Power of two <= 256
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000206 typedef SizeClassMap SizeClassMapT;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000207
208 private:
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000209 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000210 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
211 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000212 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000213 // Populate the free list with at most this number of bytes at once
214 // or with one element if its size is greater.
215 static const uptr kPopulateSize = 1 << 18;
216
Kostya Serebryany5b014152012-06-22 13:00:50 +0000217 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000218 SpinMutex mutex;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000219 AllocatorFreeList free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000220 uptr allocated_user; // Bytes allocated for user memory.
221 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000222 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000223 };
224 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
225
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000226 static uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000227 uptr res = sizeof(RegionInfo) * kNumClasses;
228 CHECK_EQ(res % kPageSize, 0);
229 return res;
230 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000231
232 RegionInfo *GetRegionInfo(uptr class_id) {
233 CHECK_LT(class_id, kNumClasses);
234 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
235 return &regions[-1 - class_id];
236 }
237
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000238 uptr GetChunkIdx(uptr chunk, uptr class_id) {
239 u32 offset = chunk % kRegionSize;
240 // Here we divide by a non-constant. This is costly.
241 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
242 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
243 return offset / (u32)SizeClassMap::Size(class_id);
244 }
245
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000246 void PopulateFreeList(uptr class_id, RegionInfo *region) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000247 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000248 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000249 uptr end_idx = beg_idx + kPopulateSize;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000250 region->free_list.clear();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000251 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
252 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000253 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000254 do { // do-while loop because we need to put at least one item.
255 uptr p = region_beg + idx;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000256 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000257 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000258 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000259 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000260 region->allocated_user += idx - beg_idx;
261 region->allocated_meta += i * kMetadataSize;
262 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000263 }
264
265 void *AllocateBySizeClass(uptr class_id) {
266 CHECK_LT(class_id, kNumClasses);
267 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000268 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000269 if (region->free_list.empty()) {
270 PopulateFreeList(class_id, region);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000271 }
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000272 CHECK(!region->free_list.empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000273 AllocatorListNode *node = region->free_list.front();
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000274 region->free_list.pop_front();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000275 return reinterpret_cast<void*>(node);
276 }
277
278 void DeallocateBySizeClass(void *p, uptr class_id) {
279 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000280 SpinMutexLock l(&region->mutex);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000281 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000282 }
283};
284
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000285// Objects of this type should be used as local caches for SizeClassAllocator64.
286// Since the typical use of this class is to have one object per thread in TLS,
287// is has to be POD.
288template<const uptr kNumClasses, class SizeClassAllocator>
289struct SizeClassAllocatorLocalCache {
290 // Don't need to call Init if the object is a global (i.e. zero-initialized).
291 void Init() {
292 internal_memset(this, 0, sizeof(*this));
293 }
294
295 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
296 CHECK_LT(class_id, kNumClasses);
297 AllocatorFreeList *free_list = &free_lists_[class_id];
298 if (free_list->empty())
299 allocator->BulkAllocate(class_id, free_list);
300 CHECK(!free_list->empty());
301 void *res = free_list->front();
302 free_list->pop_front();
303 return res;
304 }
305
306 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
307 CHECK_LT(class_id, kNumClasses);
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000308 AllocatorFreeList *free_list = &free_lists_[class_id];
309 free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
310 if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
311 DrainHalf(allocator, class_id);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000312 }
313
314 void Drain(SizeClassAllocator *allocator) {
315 for (uptr i = 0; i < kNumClasses; i++) {
316 allocator->BulkDeallocate(i, &free_lists_[i]);
317 CHECK(free_lists_[i].empty());
318 }
319 }
320
321 // private:
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000322 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000323 AllocatorFreeList free_lists_[kNumClasses];
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000324
325 void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
326 AllocatorFreeList *free_list = &free_lists_[class_id];
327 AllocatorFreeList half;
328 half.clear();
329 const uptr count = free_list->size() / 2;
330 for (uptr i = 0; i < count; i++) {
331 AllocatorListNode *node = free_list->front();
332 free_list->pop_front();
333 half.push_front(node);
334 }
335 allocator->BulkDeallocate(class_id, &half);
336 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000337};
338
Kostya Serebryany41960462012-06-26 14:23:32 +0000339// This class can (de)allocate only large chunks of memory using mmap/unmap.
340// The main purpose of this allocator is to cover large and rare allocation
341// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
342// The result is always page-aligned.
343class LargeMmapAllocator {
344 public:
345 void Init() {
346 internal_memset(this, 0, sizeof(*this));
347 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000348 void *Allocate(uptr size, uptr alignment) {
349 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000350 if (size + alignment + 2 * kPageSize < size)
351 return 0;
Kostya Serebryany41960462012-06-26 14:23:32 +0000352 uptr map_size = RoundUpMapSize(size);
353 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
354 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
355 + kPageSize);
356 Header *h = GetHeader(res);
357 h->size = size;
358 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000359 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000360 h->next = list_;
361 h->prev = 0;
362 if (list_)
363 list_->prev = h;
364 list_ = h;
365 }
366 return res;
367 }
368
369 void Deallocate(void *p) {
370 Header *h = GetHeader(p);
371 uptr map_size = RoundUpMapSize(h->size);
372 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000373 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000374 Header *prev = h->prev;
375 Header *next = h->next;
376 if (prev)
377 prev->next = next;
378 if (next)
379 next->prev = prev;
380 if (h == list_)
381 list_ = next;
382 }
383 UnmapOrDie(h, map_size);
384 }
385
386 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000387 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000388 uptr res = 0;
389 for (Header *l = list_; l; l = l->next) {
390 res += RoundUpMapSize(l->size);
391 }
392 return res;
393 }
394
395 bool PointerIsMine(void *p) {
396 // Fast check.
397 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000398 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000399 for (Header *l = list_; l; l = l->next) {
400 if (GetUser(l) == p) return true;
401 }
402 return false;
403 }
404
Kostya Serebryanyab349192012-07-18 16:04:55 +0000405 uptr GetActuallyAllocatedSize(void *p) {
406 return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
407 }
408
Kostya Serebryany41960462012-06-26 14:23:32 +0000409 // At least kPageSize/2 metadata bytes is available.
410 void *GetMetaData(void *p) {
411 return GetHeader(p) + 1;
412 }
413
414 private:
415 struct Header {
416 uptr size;
417 Header *next;
418 Header *prev;
419 };
420
421 Header *GetHeader(void *p) {
422 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
423 }
424
425 void *GetUser(Header *h) {
426 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
427 }
428
429 uptr RoundUpMapSize(uptr size) {
430 return RoundUpTo(size, kPageSize) + kPageSize;
431 }
432
433 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000434 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000435};
436
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000437// This class implements a complete memory allocator by using two
438// internal allocators:
439// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
440// When allocating 2^x bytes it should return 2^x aligned chunk.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000441// PrimaryAllocator is used via a local AllocatorCache.
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000442// SecondaryAllocator can allocate anything, but is not efficient.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000443template <class PrimaryAllocator, class AllocatorCache,
Alexander Potapenkob4e9fd22012-07-08 15:00:06 +0000444 class SecondaryAllocator> // NOLINT
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000445class CombinedAllocator {
446 public:
447 void Init() {
448 primary_.Init();
449 secondary_.Init();
450 }
451
Kostya Serebryanyab349192012-07-18 16:04:55 +0000452 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
453 bool cleared = false) {
Kostya Serebryanya415df62012-07-19 12:15:33 +0000454 // Returning 0 on malloc(0) may break a lot of code.
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000455 if (size == 0)
456 size = 1;
457 if (size + alignment < size)
458 return 0;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000459 if (alignment > 8)
460 size = RoundUpTo(size, alignment);
461 void *res;
462 if (primary_.CanAllocate(size, alignment))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000463 res = cache->Allocate(&primary_, primary_.ClassID(size));
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000464 else
465 res = secondary_.Allocate(size, alignment);
466 if (alignment > 8)
467 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000468 if (cleared && res)
Kostya Serebryanyab349192012-07-18 16:04:55 +0000469 internal_memset(res, 0, size);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000470 return res;
471 }
472
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000473 void Deallocate(AllocatorCache *cache, void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000474 if (!p) return;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000475 if (primary_.PointerIsMine(p))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000476 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000477 else
478 secondary_.Deallocate(p);
479 }
480
Kostya Serebryanyab349192012-07-18 16:04:55 +0000481 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
482 uptr alignment) {
483 if (!p)
484 return Allocate(cache, new_size, alignment);
485 if (!new_size) {
486 Deallocate(cache, p);
487 return 0;
488 }
489 CHECK(PointerIsMine(p));
490 uptr old_size = GetActuallyAllocatedSize(p);
491 uptr memcpy_size = Min(new_size, old_size);
492 void *new_p = Allocate(cache, new_size, alignment);
493 if (new_p)
494 internal_memcpy(new_p, p, memcpy_size);
495 Deallocate(cache, p);
496 return new_p;
497 }
498
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000499 bool PointerIsMine(void *p) {
500 if (primary_.PointerIsMine(p))
501 return true;
502 return secondary_.PointerIsMine(p);
503 }
504
505 void *GetMetaData(void *p) {
506 if (primary_.PointerIsMine(p))
507 return primary_.GetMetaData(p);
508 return secondary_.GetMetaData(p);
509 }
510
Kostya Serebryanyab349192012-07-18 16:04:55 +0000511 uptr GetActuallyAllocatedSize(void *p) {
512 if (primary_.PointerIsMine(p))
513 return primary_.GetActuallyAllocatedSize(p);
514 return secondary_.GetActuallyAllocatedSize(p);
515 }
516
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000517 uptr TotalMemoryUsed() {
518 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
519 }
520
521 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
522
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000523 void SwallowCache(AllocatorCache *cache) {
524 cache->Drain(&primary_);
525 }
526
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000527 private:
528 PrimaryAllocator primary_;
529 SecondaryAllocator secondary_;
530};
531
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000532} // namespace __sanitizer
533
534#endif // SANITIZER_ALLOCATOR_H