blob: a8ea6f83db8c3ce62f7e40fe7deef14274276573 [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany78e973f2012-07-06 09:26:01 +000023#include "sanitizer_list.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000024#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000025
26namespace __sanitizer {
27
Kostya Serebryany5b014152012-06-22 13:00:50 +000028// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000029class DefaultSizeClassMap {
30 private:
31 // Here we use a spline composed of 5 polynomials of oder 1.
32 // The first size class is l0, then the classes go with step s0
33 // untill they reach l1, after which they go with step s1 and so on.
34 // Steps should be powers of two for cheap division.
35 // The size of the last size class should be a power of two.
36 // There should be at most 256 size classes.
37 static const uptr l0 = 1 << 4;
38 static const uptr l1 = 1 << 9;
39 static const uptr l2 = 1 << 12;
40 static const uptr l3 = 1 << 15;
41 static const uptr l4 = 1 << 18;
42 static const uptr l5 = 1 << 21;
43
44 static const uptr s0 = 1 << 4;
45 static const uptr s1 = 1 << 6;
46 static const uptr s2 = 1 << 9;
47 static const uptr s3 = 1 << 12;
48 static const uptr s4 = 1 << 15;
49
50 static const uptr u0 = 0 + (l1 - l0) / s0;
51 static const uptr u1 = u0 + (l2 - l1) / s1;
52 static const uptr u2 = u1 + (l3 - l2) / s2;
53 static const uptr u3 = u2 + (l4 - l3) / s3;
54 static const uptr u4 = u3 + (l5 - l4) / s4;
55
56 public:
57 static const uptr kNumClasses = u4 + 1;
58 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000059 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000060
61 COMPILER_CHECK(kNumClasses <= 256);
62 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
63
Kostya Serebryany5b014152012-06-22 13:00:50 +000064 static uptr Size(uptr class_id) {
65 if (class_id <= u0) return l0 + s0 * (class_id - 0);
66 if (class_id <= u1) return l1 + s1 * (class_id - u0);
67 if (class_id <= u2) return l2 + s2 * (class_id - u1);
68 if (class_id <= u3) return l3 + s3 * (class_id - u2);
69 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000070 return 0;
71 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000072 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000073 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
74 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
75 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
76 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
77 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
78 return 0;
79 }
80};
81
Kostya Serebryanyd1e60942012-07-06 13:46:49 +000082struct AllocatorListNode {
83 AllocatorListNode *next;
84};
85
86typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
87
88
Kostya Serebryany5b014152012-06-22 13:00:50 +000089// Space: a portion of address space of kSpaceSize bytes starting at
90// a fixed address (kSpaceBeg). Both constants are powers of two and
91// kSpaceBeg is kSpaceSize-aligned.
92//
93// Region: a part of Space dedicated to a single size class.
94// There are kNumClasses Regions of equal size.
95//
96// UserChunk: a piece of memory returned to user.
97// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
98//
99// A Region looks like this:
100// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
101template <const uptr kSpaceBeg, const uptr kSpaceSize,
102 const uptr kMetadataSize, class SizeClassMap>
103class SizeClassAllocator64 {
104 public:
105 void Init() {
106 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
107 AllocBeg(), AllocSize())));
108 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000109
110 bool CanAllocate(uptr size, uptr alignment) {
111 return size <= SizeClassMap::kMaxSize &&
112 alignment <= SizeClassMap::kMaxSize;
113 }
114
115 void *Allocate(uptr size, uptr alignment) {
116 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000117 return AllocateBySizeClass(SizeClassMap::ClassID(size));
118 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000119
Kostya Serebryany5b014152012-06-22 13:00:50 +0000120 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000121 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000122 DeallocateBySizeClass(p, GetSizeClass(p));
123 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000124
125 // Allocate several chunks of the given class_id.
126 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
127 CHECK_LT(class_id, kNumClasses);
128 RegionInfo *region = GetRegionInfo(class_id);
129 SpinMutexLock l(&region->mutex);
130 if (region->free_list.empty()) {
131 PopulateFreeList(class_id, region);
132 }
133 CHECK(!region->free_list.empty());
134 // Just take as many chunks as we have in the free list now.
135 // FIXME: this might be too much.
136 free_list->append_front(&region->free_list);
137 CHECK(region->free_list.empty());
138 }
139
140 // Swallow the entire free_list for the given class_id.
141 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
142 CHECK_LT(class_id, kNumClasses);
143 RegionInfo *region = GetRegionInfo(class_id);
144 SpinMutexLock l(&region->mutex);
145 region->free_list.append_front(free_list);
146 }
147
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000148 static bool PointerIsMine(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000149 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
150 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000151
152 static uptr GetSizeClass(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000153 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
154 }
155
Kostya Serebryanyab349192012-07-18 16:04:55 +0000156 uptr GetActuallyAllocatedSize(void *p) {
157 CHECK(PointerIsMine(p));
158 return SizeClassMap::Size(GetSizeClass(p));
159 }
160
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000161 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
162
Kostya Serebryany41960462012-06-26 14:23:32 +0000163 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000164 uptr class_id = GetSizeClass(p);
165 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
Kostya Serebryany41960462012-06-26 14:23:32 +0000166 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
167 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000168 }
169
Kostya Serebryany100590f2012-06-25 14:53:49 +0000170 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000171 uptr res = 0;
172 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000173 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000174 return res;
175 }
176
177 // Test-only.
178 void TestOnlyUnmap() {
179 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
180 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000181
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000182 static uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
183 static uptr AllocEnd() { return kSpaceBeg + kSpaceSize; }
184 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
185
Kostya Serebryany5b014152012-06-22 13:00:50 +0000186 static const uptr kNumClasses = 256; // Power of two <= 256
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000187
188 private:
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000189 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000190 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
191 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000192 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000193 // Populate the free list with at most this number of bytes at once
194 // or with one element if its size is greater.
195 static const uptr kPopulateSize = 1 << 18;
196
Kostya Serebryany5b014152012-06-22 13:00:50 +0000197 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000198 SpinMutex mutex;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000199 AllocatorFreeList free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000200 uptr allocated_user; // Bytes allocated for user memory.
201 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000202 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000203 };
204 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
205
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000206 static uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000207 uptr res = sizeof(RegionInfo) * kNumClasses;
208 CHECK_EQ(res % kPageSize, 0);
209 return res;
210 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000211
212 RegionInfo *GetRegionInfo(uptr class_id) {
213 CHECK_LT(class_id, kNumClasses);
214 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
215 return &regions[-1 - class_id];
216 }
217
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000218 uptr GetChunkIdx(uptr chunk, uptr class_id) {
219 u32 offset = chunk % kRegionSize;
220 // Here we divide by a non-constant. This is costly.
221 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
222 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
223 return offset / (u32)SizeClassMap::Size(class_id);
224 }
225
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000226 void PopulateFreeList(uptr class_id, RegionInfo *region) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000227 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000228 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000229 uptr end_idx = beg_idx + kPopulateSize;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000230 region->free_list.clear();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000231 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
232 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000233 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000234 do { // do-while loop because we need to put at least one item.
235 uptr p = region_beg + idx;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000236 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000237 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000238 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000239 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000240 region->allocated_user += idx - beg_idx;
241 region->allocated_meta += i * kMetadataSize;
242 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000243 }
244
245 void *AllocateBySizeClass(uptr class_id) {
246 CHECK_LT(class_id, kNumClasses);
247 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000248 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000249 if (region->free_list.empty()) {
250 PopulateFreeList(class_id, region);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000251 }
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000252 CHECK(!region->free_list.empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000253 AllocatorListNode *node = region->free_list.front();
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000254 region->free_list.pop_front();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000255 return reinterpret_cast<void*>(node);
256 }
257
258 void DeallocateBySizeClass(void *p, uptr class_id) {
259 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000260 SpinMutexLock l(&region->mutex);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000261 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000262 }
263};
264
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000265// Objects of this type should be used as local caches for SizeClassAllocator64.
266// Since the typical use of this class is to have one object per thread in TLS,
267// is has to be POD.
268template<const uptr kNumClasses, class SizeClassAllocator>
269struct SizeClassAllocatorLocalCache {
270 // Don't need to call Init if the object is a global (i.e. zero-initialized).
271 void Init() {
272 internal_memset(this, 0, sizeof(*this));
273 }
274
275 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
276 CHECK_LT(class_id, kNumClasses);
277 AllocatorFreeList *free_list = &free_lists_[class_id];
278 if (free_list->empty())
279 allocator->BulkAllocate(class_id, free_list);
280 CHECK(!free_list->empty());
281 void *res = free_list->front();
282 free_list->pop_front();
283 return res;
284 }
285
286 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
287 CHECK_LT(class_id, kNumClasses);
288 free_lists_[class_id].push_front(reinterpret_cast<AllocatorListNode*>(p));
289 }
290
291 void Drain(SizeClassAllocator *allocator) {
292 for (uptr i = 0; i < kNumClasses; i++) {
293 allocator->BulkDeallocate(i, &free_lists_[i]);
294 CHECK(free_lists_[i].empty());
295 }
296 }
297
298 // private:
299 AllocatorFreeList free_lists_[kNumClasses];
300};
301
Kostya Serebryany41960462012-06-26 14:23:32 +0000302// This class can (de)allocate only large chunks of memory using mmap/unmap.
303// The main purpose of this allocator is to cover large and rare allocation
304// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
305// The result is always page-aligned.
306class LargeMmapAllocator {
307 public:
308 void Init() {
309 internal_memset(this, 0, sizeof(*this));
310 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000311 void *Allocate(uptr size, uptr alignment) {
312 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000313 if (size + alignment + 2 * kPageSize < size)
314 return 0;
Kostya Serebryany41960462012-06-26 14:23:32 +0000315 uptr map_size = RoundUpMapSize(size);
316 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
317 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
318 + kPageSize);
319 Header *h = GetHeader(res);
320 h->size = size;
321 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000322 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000323 h->next = list_;
324 h->prev = 0;
325 if (list_)
326 list_->prev = h;
327 list_ = h;
328 }
329 return res;
330 }
331
332 void Deallocate(void *p) {
333 Header *h = GetHeader(p);
334 uptr map_size = RoundUpMapSize(h->size);
335 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000336 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000337 Header *prev = h->prev;
338 Header *next = h->next;
339 if (prev)
340 prev->next = next;
341 if (next)
342 next->prev = prev;
343 if (h == list_)
344 list_ = next;
345 }
346 UnmapOrDie(h, map_size);
347 }
348
349 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000350 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000351 uptr res = 0;
352 for (Header *l = list_; l; l = l->next) {
353 res += RoundUpMapSize(l->size);
354 }
355 return res;
356 }
357
358 bool PointerIsMine(void *p) {
359 // Fast check.
360 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000361 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000362 for (Header *l = list_; l; l = l->next) {
363 if (GetUser(l) == p) return true;
364 }
365 return false;
366 }
367
Kostya Serebryanyab349192012-07-18 16:04:55 +0000368 uptr GetActuallyAllocatedSize(void *p) {
369 return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
370 }
371
Kostya Serebryany41960462012-06-26 14:23:32 +0000372 // At least kPageSize/2 metadata bytes is available.
373 void *GetMetaData(void *p) {
374 return GetHeader(p) + 1;
375 }
376
377 private:
378 struct Header {
379 uptr size;
380 Header *next;
381 Header *prev;
382 };
383
384 Header *GetHeader(void *p) {
385 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
386 }
387
388 void *GetUser(Header *h) {
389 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
390 }
391
392 uptr RoundUpMapSize(uptr size) {
393 return RoundUpTo(size, kPageSize) + kPageSize;
394 }
395
396 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000397 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000398};
399
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000400// This class implements a complete memory allocator by using two
401// internal allocators:
402// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
403// When allocating 2^x bytes it should return 2^x aligned chunk.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000404// PrimaryAllocator is used via a local AllocatorCache.
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000405// SecondaryAllocator can allocate anything, but is not efficient.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000406template <class PrimaryAllocator, class AllocatorCache,
Alexander Potapenkob4e9fd22012-07-08 15:00:06 +0000407 class SecondaryAllocator> // NOLINT
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000408class CombinedAllocator {
409 public:
410 void Init() {
411 primary_.Init();
412 secondary_.Init();
413 }
414
Kostya Serebryanyab349192012-07-18 16:04:55 +0000415 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
416 bool cleared = false) {
Kostya Serebryanya415df62012-07-19 12:15:33 +0000417 // Returning 0 on malloc(0) may break a lot of code.
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000418 if (size == 0)
419 size = 1;
420 if (size + alignment < size)
421 return 0;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000422 if (alignment > 8)
423 size = RoundUpTo(size, alignment);
424 void *res;
425 if (primary_.CanAllocate(size, alignment))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000426 res = cache->Allocate(&primary_, primary_.ClassID(size));
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000427 else
428 res = secondary_.Allocate(size, alignment);
429 if (alignment > 8)
430 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000431 if (cleared && res)
Kostya Serebryanyab349192012-07-18 16:04:55 +0000432 internal_memset(res, 0, size);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000433 return res;
434 }
435
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000436 void Deallocate(AllocatorCache *cache, void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000437 if (!p) return;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000438 if (primary_.PointerIsMine(p))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000439 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000440 else
441 secondary_.Deallocate(p);
442 }
443
Kostya Serebryanyab349192012-07-18 16:04:55 +0000444 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
445 uptr alignment) {
446 if (!p)
447 return Allocate(cache, new_size, alignment);
448 if (!new_size) {
449 Deallocate(cache, p);
450 return 0;
451 }
452 CHECK(PointerIsMine(p));
453 uptr old_size = GetActuallyAllocatedSize(p);
454 uptr memcpy_size = Min(new_size, old_size);
455 void *new_p = Allocate(cache, new_size, alignment);
456 if (new_p)
457 internal_memcpy(new_p, p, memcpy_size);
458 Deallocate(cache, p);
459 return new_p;
460 }
461
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000462 bool PointerIsMine(void *p) {
463 if (primary_.PointerIsMine(p))
464 return true;
465 return secondary_.PointerIsMine(p);
466 }
467
468 void *GetMetaData(void *p) {
469 if (primary_.PointerIsMine(p))
470 return primary_.GetMetaData(p);
471 return secondary_.GetMetaData(p);
472 }
473
Kostya Serebryanyab349192012-07-18 16:04:55 +0000474 uptr GetActuallyAllocatedSize(void *p) {
475 if (primary_.PointerIsMine(p))
476 return primary_.GetActuallyAllocatedSize(p);
477 return secondary_.GetActuallyAllocatedSize(p);
478 }
479
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000480 uptr TotalMemoryUsed() {
481 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
482 }
483
484 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
485
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000486 void SwallowCache(AllocatorCache *cache) {
487 cache->Drain(&primary_);
488 }
489
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000490 private:
491 PrimaryAllocator primary_;
492 SecondaryAllocator secondary_;
493};
494
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000495} // namespace __sanitizer
496
497#endif // SANITIZER_ALLOCATOR_H