blob: c6abd1e6cbc7bff118d73c4022d880199d5f990b [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
Kostya Serebryany41960462012-06-26 14:23:32 +000022#include "sanitizer_libc.h"
Kostya Serebryany78e973f2012-07-06 09:26:01 +000023#include "sanitizer_list.h"
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +000024#include "sanitizer_mutex.h"
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000025
26namespace __sanitizer {
27
Kostya Serebryany5b014152012-06-22 13:00:50 +000028// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000029class DefaultSizeClassMap {
30 private:
31 // Here we use a spline composed of 5 polynomials of oder 1.
32 // The first size class is l0, then the classes go with step s0
33 // untill they reach l1, after which they go with step s1 and so on.
34 // Steps should be powers of two for cheap division.
35 // The size of the last size class should be a power of two.
36 // There should be at most 256 size classes.
37 static const uptr l0 = 1 << 4;
38 static const uptr l1 = 1 << 9;
39 static const uptr l2 = 1 << 12;
40 static const uptr l3 = 1 << 15;
41 static const uptr l4 = 1 << 18;
42 static const uptr l5 = 1 << 21;
43
44 static const uptr s0 = 1 << 4;
45 static const uptr s1 = 1 << 6;
46 static const uptr s2 = 1 << 9;
47 static const uptr s3 = 1 << 12;
48 static const uptr s4 = 1 << 15;
49
50 static const uptr u0 = 0 + (l1 - l0) / s0;
51 static const uptr u1 = u0 + (l2 - l1) / s1;
52 static const uptr u2 = u1 + (l3 - l2) / s2;
53 static const uptr u3 = u2 + (l4 - l3) / s3;
54 static const uptr u4 = u3 + (l5 - l4) / s4;
55
Dmitry Vyukov7e634742012-08-23 17:16:07 +000056 // Max cached in local cache blocks.
57 static const uptr c0 = 256;
58 static const uptr c1 = 64;
59 static const uptr c2 = 16;
60 static const uptr c3 = 4;
61 static const uptr c4 = 1;
62
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000063 public:
64 static const uptr kNumClasses = u4 + 1;
65 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000066 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000067
68 COMPILER_CHECK(kNumClasses <= 256);
69 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
70
Kostya Serebryany5b014152012-06-22 13:00:50 +000071 static uptr Size(uptr class_id) {
72 if (class_id <= u0) return l0 + s0 * (class_id - 0);
73 if (class_id <= u1) return l1 + s1 * (class_id - u0);
74 if (class_id <= u2) return l2 + s2 * (class_id - u1);
75 if (class_id <= u3) return l3 + s3 * (class_id - u2);
76 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000077 return 0;
78 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000079 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000080 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
81 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
82 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
83 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
84 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
85 return 0;
86 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +000087
88 static uptr MaxCached(uptr class_id) {
89 if (class_id <= u0) return c0;
90 if (class_id <= u1) return c1;
91 if (class_id <= u2) return c2;
92 if (class_id <= u3) return c3;
93 if (class_id <= u4) return c4;
94 return 0;
95 }
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000096};
97
Kostya Serebryanyd1e60942012-07-06 13:46:49 +000098struct AllocatorListNode {
99 AllocatorListNode *next;
100};
101
102typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
103
104
Kostya Serebryany5b014152012-06-22 13:00:50 +0000105// Space: a portion of address space of kSpaceSize bytes starting at
106// a fixed address (kSpaceBeg). Both constants are powers of two and
107// kSpaceBeg is kSpaceSize-aligned.
108//
109// Region: a part of Space dedicated to a single size class.
110// There are kNumClasses Regions of equal size.
111//
112// UserChunk: a piece of memory returned to user.
113// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
114//
115// A Region looks like this:
116// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
117template <const uptr kSpaceBeg, const uptr kSpaceSize,
118 const uptr kMetadataSize, class SizeClassMap>
119class SizeClassAllocator64 {
120 public:
121 void Init() {
122 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
123 AllocBeg(), AllocSize())));
124 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000125
126 bool CanAllocate(uptr size, uptr alignment) {
127 return size <= SizeClassMap::kMaxSize &&
128 alignment <= SizeClassMap::kMaxSize;
129 }
130
131 void *Allocate(uptr size, uptr alignment) {
132 CHECK(CanAllocate(size, alignment));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000133 return AllocateBySizeClass(SizeClassMap::ClassID(size));
134 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000135
Kostya Serebryany5b014152012-06-22 13:00:50 +0000136 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000137 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000138 DeallocateBySizeClass(p, GetSizeClass(p));
139 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000140
141 // Allocate several chunks of the given class_id.
142 void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
143 CHECK_LT(class_id, kNumClasses);
144 RegionInfo *region = GetRegionInfo(class_id);
145 SpinMutexLock l(&region->mutex);
146 if (region->free_list.empty()) {
147 PopulateFreeList(class_id, region);
148 }
149 CHECK(!region->free_list.empty());
Dmitry Vyukov7d3d9442012-08-24 15:53:14 +0000150 uptr count = SizeClassMap::MaxCached(class_id);
151 if (region->free_list.size() <= count) {
152 free_list->append_front(&region->free_list);
153 } else {
154 for (uptr i = 0; i < count; i++) {
155 AllocatorListNode *node = region->free_list.front();
156 region->free_list.pop_front();
157 free_list->push_front(node);
158 }
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000159 }
160 CHECK(!free_list->empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000161 }
162
163 // Swallow the entire free_list for the given class_id.
164 void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
165 CHECK_LT(class_id, kNumClasses);
166 RegionInfo *region = GetRegionInfo(class_id);
167 SpinMutexLock l(&region->mutex);
168 region->free_list.append_front(free_list);
169 }
170
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000171 static bool PointerIsMine(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000172 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
173 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000174
175 static uptr GetSizeClass(void *p) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000176 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
177 }
178
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000179 static void *GetBlockBegin(void *p) {
Dmitry Vyukovb244c462012-08-30 13:55:43 +0000180 uptr class_id = GetSizeClass(p);
181 uptr size = SizeClassMap::Size(class_id);
182 uptr chunk_idx = GetChunkIdx((uptr)p, size);
183 uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
184 uptr begin = reg_beg + chunk_idx * size;
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000185 return (void*)begin;
186 }
187
188 static uptr GetActuallyAllocatedSize(void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000189 CHECK(PointerIsMine(p));
190 return SizeClassMap::Size(GetSizeClass(p));
191 }
192
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000193 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
194
Kostya Serebryany41960462012-06-26 14:23:32 +0000195 void *GetMetaData(void *p) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000196 uptr class_id = GetSizeClass(p);
Dmitry Vyukovb244c462012-08-30 13:55:43 +0000197 uptr size = SizeClassMap::Size(class_id);
198 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
Kostya Serebryany41960462012-06-26 14:23:32 +0000199 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
200 (1 + chunk_idx) * kMetadataSize);
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000201 }
202
Kostya Serebryany100590f2012-06-25 14:53:49 +0000203 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000204 uptr res = 0;
205 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000206 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000207 return res;
208 }
209
210 // Test-only.
211 void TestOnlyUnmap() {
212 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
213 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000214
Kostya Serebryanyd19c8cb2012-08-31 12:41:00 +0000215 static uptr AllocBeg() { return kSpaceBeg; }
216 static uptr AllocEnd() { return kSpaceBeg + kSpaceSize + AdditionalSize(); }
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000217 static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
218
Kostya Serebryany5b014152012-06-22 13:00:50 +0000219 static const uptr kNumClasses = 256; // Power of two <= 256
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000220 typedef SizeClassMap SizeClassMapT;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000221
222 private:
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000223 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000224 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
225 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000226 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000227 // Populate the free list with at most this number of bytes at once
228 // or with one element if its size is greater.
229 static const uptr kPopulateSize = 1 << 18;
230
Kostya Serebryany5b014152012-06-22 13:00:50 +0000231 struct RegionInfo {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000232 SpinMutex mutex;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000233 AllocatorFreeList free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000234 uptr allocated_user; // Bytes allocated for user memory.
235 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000236 char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000237 };
238 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
239
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000240 static uptr AdditionalSize() {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000241 uptr res = sizeof(RegionInfo) * kNumClasses;
242 CHECK_EQ(res % kPageSize, 0);
243 return res;
244 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000245
246 RegionInfo *GetRegionInfo(uptr class_id) {
247 CHECK_LT(class_id, kNumClasses);
Kostya Serebryanyd19c8cb2012-08-31 12:41:00 +0000248 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
249 return &regions[class_id];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000250 }
251
Dmitry Vyukovb244c462012-08-30 13:55:43 +0000252 static uptr GetChunkIdx(uptr chunk, uptr size) {
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000253 u32 offset = chunk % kRegionSize;
254 // Here we divide by a non-constant. This is costly.
255 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
256 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
Dmitry Vyukovb244c462012-08-30 13:55:43 +0000257 return offset / (u32)size;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000258 }
259
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000260 void PopulateFreeList(uptr class_id, RegionInfo *region) {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000261 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000262 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000263 uptr end_idx = beg_idx + kPopulateSize;
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000264 region->free_list.clear();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000265 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
266 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000267 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000268 do { // do-while loop because we need to put at least one item.
269 uptr p = region_beg + idx;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000270 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000271 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000272 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000273 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000274 region->allocated_user += idx - beg_idx;
275 region->allocated_meta += i * kMetadataSize;
276 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000277 }
278
279 void *AllocateBySizeClass(uptr class_id) {
280 CHECK_LT(class_id, kNumClasses);
281 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000282 SpinMutexLock l(&region->mutex);
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000283 if (region->free_list.empty()) {
284 PopulateFreeList(class_id, region);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000285 }
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000286 CHECK(!region->free_list.empty());
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000287 AllocatorListNode *node = region->free_list.front();
Kostya Serebryany78e973f2012-07-06 09:26:01 +0000288 region->free_list.pop_front();
Kostya Serebryany5b014152012-06-22 13:00:50 +0000289 return reinterpret_cast<void*>(node);
290 }
291
292 void DeallocateBySizeClass(void *p, uptr class_id) {
293 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000294 SpinMutexLock l(&region->mutex);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000295 region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000296 }
297};
298
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000299// Objects of this type should be used as local caches for SizeClassAllocator64.
300// Since the typical use of this class is to have one object per thread in TLS,
301// is has to be POD.
302template<const uptr kNumClasses, class SizeClassAllocator>
303struct SizeClassAllocatorLocalCache {
304 // Don't need to call Init if the object is a global (i.e. zero-initialized).
305 void Init() {
306 internal_memset(this, 0, sizeof(*this));
307 }
308
309 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
310 CHECK_LT(class_id, kNumClasses);
311 AllocatorFreeList *free_list = &free_lists_[class_id];
312 if (free_list->empty())
313 allocator->BulkAllocate(class_id, free_list);
314 CHECK(!free_list->empty());
315 void *res = free_list->front();
316 free_list->pop_front();
317 return res;
318 }
319
320 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
321 CHECK_LT(class_id, kNumClasses);
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000322 AllocatorFreeList *free_list = &free_lists_[class_id];
323 free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
324 if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
325 DrainHalf(allocator, class_id);
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000326 }
327
328 void Drain(SizeClassAllocator *allocator) {
329 for (uptr i = 0; i < kNumClasses; i++) {
330 allocator->BulkDeallocate(i, &free_lists_[i]);
331 CHECK(free_lists_[i].empty());
332 }
333 }
334
335 // private:
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000336 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000337 AllocatorFreeList free_lists_[kNumClasses];
Dmitry Vyukov7e634742012-08-23 17:16:07 +0000338
339 void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
340 AllocatorFreeList *free_list = &free_lists_[class_id];
341 AllocatorFreeList half;
342 half.clear();
343 const uptr count = free_list->size() / 2;
344 for (uptr i = 0; i < count; i++) {
345 AllocatorListNode *node = free_list->front();
346 free_list->pop_front();
347 half.push_front(node);
348 }
349 allocator->BulkDeallocate(class_id, &half);
350 }
Kostya Serebryanyd1e60942012-07-06 13:46:49 +0000351};
352
Kostya Serebryany41960462012-06-26 14:23:32 +0000353// This class can (de)allocate only large chunks of memory using mmap/unmap.
354// The main purpose of this allocator is to cover large and rare allocation
355// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
356// The result is always page-aligned.
357class LargeMmapAllocator {
358 public:
359 void Init() {
360 internal_memset(this, 0, sizeof(*this));
361 }
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000362 void *Allocate(uptr size, uptr alignment) {
363 CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it?
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000364 if (size + alignment + 2 * kPageSize < size)
365 return 0;
Kostya Serebryany41960462012-06-26 14:23:32 +0000366 uptr map_size = RoundUpMapSize(size);
367 void *map = MmapOrDie(map_size, "LargeMmapAllocator");
368 void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
369 + kPageSize);
370 Header *h = GetHeader(res);
371 h->size = size;
372 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000373 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000374 h->next = list_;
375 h->prev = 0;
376 if (list_)
377 list_->prev = h;
378 list_ = h;
379 }
380 return res;
381 }
382
383 void Deallocate(void *p) {
384 Header *h = GetHeader(p);
385 uptr map_size = RoundUpMapSize(h->size);
386 {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000387 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000388 Header *prev = h->prev;
389 Header *next = h->next;
390 if (prev)
391 prev->next = next;
392 if (next)
393 next->prev = prev;
394 if (h == list_)
395 list_ = next;
396 }
397 UnmapOrDie(h, map_size);
398 }
399
400 uptr TotalMemoryUsed() {
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000401 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000402 uptr res = 0;
403 for (Header *l = list_; l; l = l->next) {
404 res += RoundUpMapSize(l->size);
405 }
406 return res;
407 }
408
409 bool PointerIsMine(void *p) {
410 // Fast check.
411 if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000412 SpinMutexLock l(&mutex_);
Kostya Serebryany41960462012-06-26 14:23:32 +0000413 for (Header *l = list_; l; l = l->next) {
414 if (GetUser(l) == p) return true;
415 }
416 return false;
417 }
418
Kostya Serebryanyab349192012-07-18 16:04:55 +0000419 uptr GetActuallyAllocatedSize(void *p) {
420 return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
421 }
422
Kostya Serebryany41960462012-06-26 14:23:32 +0000423 // At least kPageSize/2 metadata bytes is available.
424 void *GetMetaData(void *p) {
425 return GetHeader(p) + 1;
426 }
427
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000428 void *GetBlockBegin(void *p) {
429 SpinMutexLock l(&mutex_);
430 for (Header *l = list_; l; l = l->next) {
431 void *b = GetUser(l);
432 if (p >= b && p < (u8*)b + l->size)
433 return b;
434 }
435 return 0;
436 }
437
Kostya Serebryany41960462012-06-26 14:23:32 +0000438 private:
439 struct Header {
440 uptr size;
441 Header *next;
442 Header *prev;
443 };
444
445 Header *GetHeader(void *p) {
446 return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
447 }
448
449 void *GetUser(Header *h) {
450 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
451 }
452
453 uptr RoundUpMapSize(uptr size) {
454 return RoundUpTo(size, kPageSize) + kPageSize;
455 }
456
457 Header *list_;
Dmitry Vyukovb462dfc2012-07-02 06:54:24 +0000458 SpinMutex mutex_;
Kostya Serebryany41960462012-06-26 14:23:32 +0000459};
460
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000461// This class implements a complete memory allocator by using two
462// internal allocators:
463// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
464// When allocating 2^x bytes it should return 2^x aligned chunk.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000465// PrimaryAllocator is used via a local AllocatorCache.
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000466// SecondaryAllocator can allocate anything, but is not efficient.
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000467template <class PrimaryAllocator, class AllocatorCache,
Alexander Potapenkob4e9fd22012-07-08 15:00:06 +0000468 class SecondaryAllocator> // NOLINT
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000469class CombinedAllocator {
470 public:
471 void Init() {
472 primary_.Init();
473 secondary_.Init();
474 }
475
Kostya Serebryanyab349192012-07-18 16:04:55 +0000476 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
477 bool cleared = false) {
Kostya Serebryanya415df62012-07-19 12:15:33 +0000478 // Returning 0 on malloc(0) may break a lot of code.
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000479 if (size == 0)
480 size = 1;
481 if (size + alignment < size)
482 return 0;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000483 if (alignment > 8)
484 size = RoundUpTo(size, alignment);
485 void *res;
486 if (primary_.CanAllocate(size, alignment))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000487 res = cache->Allocate(&primary_, primary_.ClassID(size));
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000488 else
489 res = secondary_.Allocate(size, alignment);
490 if (alignment > 8)
491 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
Dmitry Vyukovc6936892012-08-15 14:25:08 +0000492 if (cleared && res)
Kostya Serebryanyab349192012-07-18 16:04:55 +0000493 internal_memset(res, 0, size);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000494 return res;
495 }
496
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000497 void Deallocate(AllocatorCache *cache, void *p) {
Kostya Serebryanyab349192012-07-18 16:04:55 +0000498 if (!p) return;
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000499 if (primary_.PointerIsMine(p))
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000500 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000501 else
502 secondary_.Deallocate(p);
503 }
504
Kostya Serebryanyab349192012-07-18 16:04:55 +0000505 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
506 uptr alignment) {
507 if (!p)
508 return Allocate(cache, new_size, alignment);
509 if (!new_size) {
510 Deallocate(cache, p);
511 return 0;
512 }
513 CHECK(PointerIsMine(p));
514 uptr old_size = GetActuallyAllocatedSize(p);
515 uptr memcpy_size = Min(new_size, old_size);
516 void *new_p = Allocate(cache, new_size, alignment);
517 if (new_p)
518 internal_memcpy(new_p, p, memcpy_size);
519 Deallocate(cache, p);
520 return new_p;
521 }
522
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000523 bool PointerIsMine(void *p) {
524 if (primary_.PointerIsMine(p))
525 return true;
526 return secondary_.PointerIsMine(p);
527 }
528
529 void *GetMetaData(void *p) {
530 if (primary_.PointerIsMine(p))
531 return primary_.GetMetaData(p);
532 return secondary_.GetMetaData(p);
533 }
534
Dmitry Vyukov191f2f72012-08-30 13:02:30 +0000535 void *GetBlockBegin(void *p) {
536 if (primary_.PointerIsMine(p))
537 return primary_.GetBlockBegin(p);
538 return secondary_.GetBlockBegin(p);
539 }
540
Kostya Serebryanyab349192012-07-18 16:04:55 +0000541 uptr GetActuallyAllocatedSize(void *p) {
542 if (primary_.PointerIsMine(p))
543 return primary_.GetActuallyAllocatedSize(p);
544 return secondary_.GetActuallyAllocatedSize(p);
545 }
546
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000547 uptr TotalMemoryUsed() {
548 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
549 }
550
551 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
552
Kostya Serebryany739b0de2012-07-06 14:32:00 +0000553 void SwallowCache(AllocatorCache *cache) {
554 cache->Drain(&primary_);
555 }
556
Kostya Serebryany92afdb62012-06-29 15:35:18 +0000557 private:
558 PrimaryAllocator primary_;
559 SecondaryAllocator secondary_;
560};
561
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000562} // namespace __sanitizer
563
564#endif // SANITIZER_ALLOCATOR_H