blob: 2e12afd4c44c7262dea6d41a22a75153f5b87585 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany78e973f2012-07-06 09:26:01 +000023#include "sanitizer_list.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000024#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000025
26namespace __sanitizer {
27
Kostya Serebryany5b014152012-06-22 13:00:50 +000028// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000029class DefaultSizeClassMap {
30 private:
31 // Here we use a spline composed of 5 polynomials of oder 1.
32 // The first size class is l0, then the classes go with step s0
33 // untill they reach l1, after which they go with step s1 and so on.
34 // Steps should be powers of two for cheap division.
35 // The size of the last size class should be a power of two.
36 // There should be at most 256 size classes.
37 static const uptr l0 = 1 << 4;
38 static const uptr l1 = 1 << 9;
39 static const uptr l2 = 1 << 12;
40 static const uptr l3 = 1 << 15;
41 static const uptr l4 = 1 << 18;
42 static const uptr l5 = 1 << 21;
43
44 static const uptr s0 = 1 << 4;
45 static const uptr s1 = 1 << 6;
46 static const uptr s2 = 1 << 9;
47 static const uptr s3 = 1 << 12;
48 static const uptr s4 = 1 << 15;
49
50 static const uptr u0 = 0 + (l1 - l0) / s0;
51 static const uptr u1 = u0 + (l2 - l1) / s1;
52 static const uptr u2 = u1 + (l3 - l2) / s2;
53 static const uptr u3 = u2 + (l4 - l3) / s3;
54 static const uptr u4 = u3 + (l5 - l4) / s4;
55
Dmitry Vyukov7e634742012-08-23 17:16:07 +000056 // Max cached in local cache blocks.
57 static const uptr c0 = 256;
58 static const uptr c1 = 64;
59 static const uptr c2 = 16;
60 static const uptr c3 = 4;
61 static const uptr c4 = 1;
62
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000063 public:
64 static const uptr kNumClasses = u4 + 1;
65 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000066 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000067
68 COMPILER_CHECK(kNumClasses <= 256);
69 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
70
Kostya Serebryany5b014152012-06-22 13:00:50 +000071 static uptr Size(uptr class_id) {
72 if (class_id <= u0) return l0 + s0 * (class_id - 0);
73 if (class_id <= u1) return l1 + s1 * (class_id - u0);
74 if (class_id <= u2) return l2 + s2 * (class_id - u1);
75 if (class_id <= u3) return l3 + s3 * (class_id - u2);
76 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000077 return 0;
78 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000079 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000080 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
81 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
82 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
83 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
84 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
85 return 0;
86 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +000087
88 static uptr MaxCached(uptr class_id) {
89 if (class_id <= u0) return c0;
90 if (class_id <= u1) return c1;
91 if (class_id <= u2) return c2;
92 if (class_id <= u3) return c3;
93 if (class_id <= u4) return c4;
94 return 0;
95 }
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000096};
97
Kostya Serebryanyd1e60942012-07-06 13:46:49 +000098struct AllocatorListNode {
99 AllocatorListNode *next;
100};
101
102typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
103
104
Kostya Serebryany5b014152012-06-22 13:00:50 +0000105// Space: a portion of address space of kSpaceSize bytes starting at
106// a fixed address (kSpaceBeg). Both constants are powers of two and
107// kSpaceBeg is kSpaceSize-aligned.
108//
109// Region: a part of Space dedicated to a single size class.
110// There are kNumClasses Regions of equal size.
111//
112// UserChunk: a piece of memory returned to user.
113// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
114//
115// A Region looks like this:
116// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
117template <const uptr kSpaceBeg, const uptr kSpaceSize,
118 const uptr kMetadataSize, class SizeClassMap>
119class SizeClassAllocator64 {
120 public:
121 void Init() {
122 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
123 AllocBeg(), AllocSize())));
124 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000125
126 bool CanAllocate(uptr size, uptr alignment) {
127 return size <= SizeClassMap::kMaxSize &&
128 alignment <= SizeClassMap::kMaxSize;
129 }
130
131 void *Allocate(uptr size, uptr alignment) {
132 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000133 return AllocateBySizeClass(SizeClassMap::ClassID(size));
134 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000135
Kostya Serebryany5b014152012-06-22 13:00:50 +0000136 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000137 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000138 DeallocateBySizeClass(p, GetSizeClass(p));
139 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000140
141 // Allocate several chunks of the given class_id.
142 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
143 CHECK_LT(class_id, kNumClasses);
144 RegionInfo *region = GetRegionInfo(class_id);
145 SpinMutexLock l(&region->mutex);
146 if (region->free_list.empty()) {
147 PopulateFreeList(class_id, region);
148 }
149 CHECK(!region->free_list.empty());
Dmitry Vyukov7d3d9442012-08-24 15:53:14 +0000150 uptr count = SizeClassMap::MaxCached(class_id);
151 if (region->free_list.size() <= count) {
152 free_list->append_front(&region->free_list);
153 } else {
154 for (uptr i = 0; i < count; i++) {
155 AllocatorListNode *node = region->free_list.front();
156 region->free_list.pop_front();
157 free_list->push_front(node);
158 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000159 }
160 CHECK(!free_list->empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000161 }
162
163 // Swallow the entire free_list for the given class_id.
164 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
165 CHECK_LT(class_id, kNumClasses);
166 RegionInfo *region = GetRegionInfo(class_id);
167 SpinMutexLock l(&region->mutex);
168 region->free_list.append_front(free_list);
169 }
170
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000171 static bool PointerIsMine(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000172 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
173 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000174
175 static uptr GetSizeClass(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000176 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
177 }
178
Kostya Serebryanyab349192012-07-18 16:04:55 +0000179 uptr GetActuallyAllocatedSize(void *p) {
180 CHECK(PointerIsMine(p));
181 return SizeClassMap::Size(GetSizeClass(p));
182 }
183
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000184 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
185
Kostya Serebryany41960462012-06-26 14:23:32 +0000186 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000187 uptr class_id = GetSizeClass(p);
188 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000189 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
190 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000191 }
192
Kostya Serebryany100590f2012-06-25 14:53:49 +0000193 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000194 uptr res = 0;
195 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000196 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000197 return res;
198 }
199
200 // Test-only.
201 void TestOnlyUnmap() {
202 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
203 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000204
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000205 static uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
206 static uptr AllocEnd() { return kSpaceBeg + kSpaceSize; }
207 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
208
Kostya Serebryany5b014152012-06-22 13:00:50 +0000209 static const uptr kNumClasses = 256; // Power of two <= 256
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000210 typedef SizeClassMap SizeClassMapT;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000211
212 private:
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000213 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000214 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
215 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000216 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000217 // Populate the free list with at most this number of bytes at once
218 // or with one element if its size is greater.
219 static const uptr kPopulateSize = 1 << 18;
220
Kostya Serebryany5b014152012-06-22 13:00:50 +0000221 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000222 SpinMutex mutex;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000223 AllocatorFreeList free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000224 uptr allocated_user; // Bytes allocated for user memory.
225 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000226 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000227 };
228 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
229
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000230 static uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000231 uptr res = sizeof(RegionInfo) * kNumClasses;
232 CHECK_EQ(res % kPageSize, 0);
233 return res;
234 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000235
236 RegionInfo *GetRegionInfo(uptr class_id) {
237 CHECK_LT(class_id, kNumClasses);
238 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
239 return &regions[-1 - class_id];
240 }
241
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000242 uptr GetChunkIdx(uptr chunk, uptr class_id) {
243 u32 offset = chunk % kRegionSize;
244 // Here we divide by a non-constant. This is costly.
245 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
246 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
247 return offset / (u32)SizeClassMap::Size(class_id);
248 }
249
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000250 void PopulateFreeList(uptr class_id, RegionInfo *region) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000251 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000252 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000253 uptr end_idx = beg_idx + kPopulateSize;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000254 region->free_list.clear();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000255 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
256 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000257 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000258 do { // do-while loop because we need to put at least one item.
259 uptr p = region_beg + idx;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000260 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000261 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000262 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000263 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000264 region->allocated_user += idx - beg_idx;
265 region->allocated_meta += i * kMetadataSize;
266 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000267 }
268
269 void *AllocateBySizeClass(uptr class_id) {
270 CHECK_LT(class_id, kNumClasses);
271 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000272 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000273 if (region->free_list.empty()) {
274 PopulateFreeList(class_id, region);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000275 }
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000276 CHECK(!region->free_list.empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000277 AllocatorListNode *node = region->free_list.front();
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000278 region->free_list.pop_front();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000279 return reinterpret_cast<void*>(node);
280 }
281
282 void DeallocateBySizeClass(void *p, uptr class_id) {
283 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000284 SpinMutexLock l(&region->mutex);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000285 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000286 }
287};
288
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000289// Objects of this type should be used as local caches for SizeClassAllocator64.
290// Since the typical use of this class is to have one object per thread in TLS,
291// is has to be POD.
292template<const uptr kNumClasses, class SizeClassAllocator>
293struct SizeClassAllocatorLocalCache {
294 // Don't need to call Init if the object is a global (i.e. zero-initialized).
295 void Init() {
296 internal_memset(this, 0, sizeof(*this));
297 }
298
299 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
300 CHECK_LT(class_id, kNumClasses);
301 AllocatorFreeList *free_list = &free_lists_[class_id];
302 if (free_list->empty())
303 allocator->BulkAllocate(class_id, free_list);
304 CHECK(!free_list->empty());
305 void *res = free_list->front();
306 free_list->pop_front();
307 return res;
308 }
309
310 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
311 CHECK_LT(class_id, kNumClasses);
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000312 AllocatorFreeList *free_list = &free_lists_[class_id];
313 free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
314 if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
315 DrainHalf(allocator, class_id);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000316 }
317
318 void Drain(SizeClassAllocator *allocator) {
319 for (uptr i = 0; i < kNumClasses; i++) {
320 allocator->BulkDeallocate(i, &free_lists_[i]);
321 CHECK(free_lists_[i].empty());
322 }
323 }
324
325 // private:
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000326 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000327 AllocatorFreeList free_lists_[kNumClasses];
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000328
329 void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
330 AllocatorFreeList *free_list = &free_lists_[class_id];
331 AllocatorFreeList half;
332 half.clear();
333 const uptr count = free_list->size() / 2;
334 for (uptr i = 0; i < count; i++) {
335 AllocatorListNode *node = free_list->front();
336 free_list->pop_front();
337 half.push_front(node);
338 }
339 allocator->BulkDeallocate(class_id, &half);
340 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000341};
342
Kostya Serebryany41960462012-06-26 14:23:32 +0000343// This class can (de)allocate only large chunks of memory using mmap/unmap.
344// The main purpose of this allocator is to cover large and rare allocation
345// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
346// The result is always page-aligned.
347class LargeMmapAllocator {
348 public:
349 void Init() {
350 internal_memset(this, 0, sizeof(*this));
351 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000352 void *Allocate(uptr size, uptr alignment) {
353 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000354 if (size + alignment + 2 * kPageSize < size)
355 return 0;
Kostya Serebryany41960462012-06-26 14:23:32 +0000356 uptr map_size = RoundUpMapSize(size);
357 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
358 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
359 + kPageSize);
360 Header *h = GetHeader(res);
361 h->size = size;
362 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000363 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000364 h->next = list_;
365 h->prev = 0;
366 if (list_)
367 list_->prev = h;
368 list_ = h;
369 }
370 return res;
371 }
372
373 void Deallocate(void *p) {
374 Header *h = GetHeader(p);
375 uptr map_size = RoundUpMapSize(h->size);
376 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000377 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000378 Header *prev = h->prev;
379 Header *next = h->next;
380 if (prev)
381 prev->next = next;
382 if (next)
383 next->prev = prev;
384 if (h == list_)
385 list_ = next;
386 }
387 UnmapOrDie(h, map_size);
388 }
389
390 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000391 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000392 uptr res = 0;
393 for (Header *l = list_; l; l = l->next) {
394 res += RoundUpMapSize(l->size);
395 }
396 return res;
397 }
398
399 bool PointerIsMine(void *p) {
400 // Fast check.
401 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000402 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000403 for (Header *l = list_; l; l = l->next) {
404 if (GetUser(l) == p) return true;
405 }
406 return false;
407 }
408
Kostya Serebryanyab349192012-07-18 16:04:55 +0000409 uptr GetActuallyAllocatedSize(void *p) {
410 return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
411 }
412
Kostya Serebryany41960462012-06-26 14:23:32 +0000413 // At least kPageSize/2 metadata bytes is available.
414 void *GetMetaData(void *p) {
415 return GetHeader(p) + 1;
416 }
417
418 private:
419 struct Header {
420 uptr size;
421 Header *next;
422 Header *prev;
423 };
424
425 Header *GetHeader(void *p) {
426 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
427 }
428
429 void *GetUser(Header *h) {
430 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
431 }
432
433 uptr RoundUpMapSize(uptr size) {
434 return RoundUpTo(size, kPageSize) + kPageSize;
435 }
436
437 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000438 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000439};
440
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000441// This class implements a complete memory allocator by using two
442// internal allocators:
443// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
444// When allocating 2^x bytes it should return 2^x aligned chunk.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000445// PrimaryAllocator is used via a local AllocatorCache.
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000446// SecondaryAllocator can allocate anything, but is not efficient.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000447template <class PrimaryAllocator, class AllocatorCache,
Alexander Potapenkob4e9fd22012-07-08 15:00:06 +0000448 class SecondaryAllocator> // NOLINT
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000449class CombinedAllocator {
450 public:
451 void Init() {
452 primary_.Init();
453 secondary_.Init();
454 }
455
Kostya Serebryanyab349192012-07-18 16:04:55 +0000456 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
457 bool cleared = false) {
Kostya Serebryanya415df62012-07-19 12:15:33 +0000458 // Returning 0 on malloc(0) may break a lot of code.
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000459 if (size == 0)
460 size = 1;
461 if (size + alignment < size)
462 return 0;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000463 if (alignment > 8)
464 size = RoundUpTo(size, alignment);
465 void *res;
466 if (primary_.CanAllocate(size, alignment))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000467 res = cache->Allocate(&primary_, primary_.ClassID(size));
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000468 else
469 res = secondary_.Allocate(size, alignment);
470 if (alignment > 8)
471 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000472 if (cleared && res)
Kostya Serebryanyab349192012-07-18 16:04:55 +0000473 internal_memset(res, 0, size);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000474 return res;
475 }
476
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000477 void Deallocate(AllocatorCache *cache, void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000478 if (!p) return;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000479 if (primary_.PointerIsMine(p))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000480 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000481 else
482 secondary_.Deallocate(p);
483 }
484
Kostya Serebryanyab349192012-07-18 16:04:55 +0000485 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
486 uptr alignment) {
487 if (!p)
488 return Allocate(cache, new_size, alignment);
489 if (!new_size) {
490 Deallocate(cache, p);
491 return 0;
492 }
493 CHECK(PointerIsMine(p));
494 uptr old_size = GetActuallyAllocatedSize(p);
495 uptr memcpy_size = Min(new_size, old_size);
496 void *new_p = Allocate(cache, new_size, alignment);
497 if (new_p)
498 internal_memcpy(new_p, p, memcpy_size);
499 Deallocate(cache, p);
500 return new_p;
501 }
502
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000503 bool PointerIsMine(void *p) {
504 if (primary_.PointerIsMine(p))
505 return true;
506 return secondary_.PointerIsMine(p);
507 }
508
509 void *GetMetaData(void *p) {
510 if (primary_.PointerIsMine(p))
511 return primary_.GetMetaData(p);
512 return secondary_.GetMetaData(p);
513 }
514
Kostya Serebryanyab349192012-07-18 16:04:55 +0000515 uptr GetActuallyAllocatedSize(void *p) {
516 if (primary_.PointerIsMine(p))
517 return primary_.GetActuallyAllocatedSize(p);
518 return secondary_.GetActuallyAllocatedSize(p);
519 }
520
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000521 uptr TotalMemoryUsed() {
522 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
523 }
524
525 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
526
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000527 void SwallowCache(AllocatorCache *cache) {
528 cache->Drain(&primary_);
529 }
530
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000531 private:
532 PrimaryAllocator primary_;
533 SecondaryAllocator secondary_;
534};
535
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000536} // namespace __sanitizer
537
538#endif // SANITIZER_ALLOCATOR_H