Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 1 | //===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // Specialized allocator which works only in 64-bit address space. |
| 10 | // To be used by ThreadSanitizer, MemorySanitizer and possibly other tools. |
| 11 | // The main feature of this allocator is that the header is located far away |
| 12 | // from the user memory region, so that the tool does not use extra shadow |
| 13 | // for the header. |
| 14 | // |
| 15 | // Status: not yet ready. |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | #ifndef SANITIZER_ALLOCATOR_H |
| 18 | #define SANITIZER_ALLOCATOR_H |
| 19 | |
| 20 | #include "sanitizer_common.h" |
| 21 | #include "sanitizer_internal_defs.h" |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 22 | #include "sanitizer_libc.h" |
Kostya Serebryany | 78e973f | 2012-07-06 09:26:01 +0000 | [diff] [blame] | 23 | #include "sanitizer_list.h" |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 24 | #include "sanitizer_mutex.h" |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 25 | |
| 26 | namespace __sanitizer { |
| 27 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 28 | // Maps size class id to size and back. |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 29 | class DefaultSizeClassMap { |
| 30 | private: |
| 31 | // Here we use a spline composed of 5 polynomials of oder 1. |
| 32 | // The first size class is l0, then the classes go with step s0 |
| 33 | // untill they reach l1, after which they go with step s1 and so on. |
| 34 | // Steps should be powers of two for cheap division. |
| 35 | // The size of the last size class should be a power of two. |
| 36 | // There should be at most 256 size classes. |
| 37 | static const uptr l0 = 1 << 4; |
| 38 | static const uptr l1 = 1 << 9; |
| 39 | static const uptr l2 = 1 << 12; |
| 40 | static const uptr l3 = 1 << 15; |
| 41 | static const uptr l4 = 1 << 18; |
| 42 | static const uptr l5 = 1 << 21; |
| 43 | |
| 44 | static const uptr s0 = 1 << 4; |
| 45 | static const uptr s1 = 1 << 6; |
| 46 | static const uptr s2 = 1 << 9; |
| 47 | static const uptr s3 = 1 << 12; |
| 48 | static const uptr s4 = 1 << 15; |
| 49 | |
| 50 | static const uptr u0 = 0 + (l1 - l0) / s0; |
| 51 | static const uptr u1 = u0 + (l2 - l1) / s1; |
| 52 | static const uptr u2 = u1 + (l3 - l2) / s2; |
| 53 | static const uptr u3 = u2 + (l4 - l3) / s3; |
| 54 | static const uptr u4 = u3 + (l5 - l4) / s4; |
| 55 | |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 56 | // Max cached in local cache blocks. |
| 57 | static const uptr c0 = 256; |
| 58 | static const uptr c1 = 64; |
| 59 | static const uptr c2 = 16; |
| 60 | static const uptr c3 = 4; |
| 61 | static const uptr c4 = 1; |
| 62 | |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 63 | public: |
| 64 | static const uptr kNumClasses = u4 + 1; |
| 65 | static const uptr kMaxSize = l5; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 66 | static const uptr kMinSize = l0; |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 67 | |
| 68 | COMPILER_CHECK(kNumClasses <= 256); |
| 69 | COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0); |
| 70 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 71 | static uptr Size(uptr class_id) { |
| 72 | if (class_id <= u0) return l0 + s0 * (class_id - 0); |
| 73 | if (class_id <= u1) return l1 + s1 * (class_id - u0); |
| 74 | if (class_id <= u2) return l2 + s2 * (class_id - u1); |
| 75 | if (class_id <= u3) return l3 + s3 * (class_id - u2); |
| 76 | if (class_id <= u4) return l4 + s4 * (class_id - u3); |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 77 | return 0; |
| 78 | } |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 79 | static uptr ClassID(uptr size) { |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 80 | if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0; |
| 81 | if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1; |
| 82 | if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2; |
| 83 | if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3; |
| 84 | if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4; |
| 85 | return 0; |
| 86 | } |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 87 | |
| 88 | static uptr MaxCached(uptr class_id) { |
| 89 | if (class_id <= u0) return c0; |
| 90 | if (class_id <= u1) return c1; |
| 91 | if (class_id <= u2) return c2; |
| 92 | if (class_id <= u3) return c3; |
| 93 | if (class_id <= u4) return c4; |
| 94 | return 0; |
| 95 | } |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 96 | }; |
| 97 | |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 98 | struct AllocatorListNode { |
| 99 | AllocatorListNode *next; |
| 100 | }; |
| 101 | |
| 102 | typedef IntrusiveList<AllocatorListNode> AllocatorFreeList; |
| 103 | |
| 104 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 105 | // Space: a portion of address space of kSpaceSize bytes starting at |
| 106 | // a fixed address (kSpaceBeg). Both constants are powers of two and |
| 107 | // kSpaceBeg is kSpaceSize-aligned. |
| 108 | // |
| 109 | // Region: a part of Space dedicated to a single size class. |
| 110 | // There are kNumClasses Regions of equal size. |
| 111 | // |
| 112 | // UserChunk: a piece of memory returned to user. |
| 113 | // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. |
| 114 | // |
| 115 | // A Region looks like this: |
| 116 | // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 |
| 117 | template <const uptr kSpaceBeg, const uptr kSpaceSize, |
| 118 | const uptr kMetadataSize, class SizeClassMap> |
| 119 | class SizeClassAllocator64 { |
| 120 | public: |
| 121 | void Init() { |
| 122 | CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve( |
| 123 | AllocBeg(), AllocSize()))); |
| 124 | } |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 125 | |
| 126 | bool CanAllocate(uptr size, uptr alignment) { |
| 127 | return size <= SizeClassMap::kMaxSize && |
| 128 | alignment <= SizeClassMap::kMaxSize; |
| 129 | } |
| 130 | |
| 131 | void *Allocate(uptr size, uptr alignment) { |
| 132 | CHECK(CanAllocate(size, alignment)); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 133 | return AllocateBySizeClass(SizeClassMap::ClassID(size)); |
| 134 | } |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 135 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 136 | void Deallocate(void *p) { |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 137 | CHECK(PointerIsMine(p)); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 138 | DeallocateBySizeClass(p, GetSizeClass(p)); |
| 139 | } |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 140 | |
| 141 | // Allocate several chunks of the given class_id. |
| 142 | void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) { |
| 143 | CHECK_LT(class_id, kNumClasses); |
| 144 | RegionInfo *region = GetRegionInfo(class_id); |
| 145 | SpinMutexLock l(®ion->mutex); |
| 146 | if (region->free_list.empty()) { |
| 147 | PopulateFreeList(class_id, region); |
| 148 | } |
| 149 | CHECK(!region->free_list.empty()); |
Dmitry Vyukov | 7d3d944 | 2012-08-24 15:53:14 +0000 | [diff] [blame] | 150 | uptr count = SizeClassMap::MaxCached(class_id); |
| 151 | if (region->free_list.size() <= count) { |
| 152 | free_list->append_front(®ion->free_list); |
| 153 | } else { |
| 154 | for (uptr i = 0; i < count; i++) { |
| 155 | AllocatorListNode *node = region->free_list.front(); |
| 156 | region->free_list.pop_front(); |
| 157 | free_list->push_front(node); |
| 158 | } |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 159 | } |
| 160 | CHECK(!free_list->empty()); |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | // Swallow the entire free_list for the given class_id. |
| 164 | void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) { |
| 165 | CHECK_LT(class_id, kNumClasses); |
| 166 | RegionInfo *region = GetRegionInfo(class_id); |
| 167 | SpinMutexLock l(®ion->mutex); |
| 168 | region->free_list.append_front(free_list); |
| 169 | } |
| 170 | |
Dmitry Vyukov | 1c0b3c6 | 2012-08-15 17:27:20 +0000 | [diff] [blame] | 171 | static bool PointerIsMine(void *p) { |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 172 | return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize; |
| 173 | } |
Dmitry Vyukov | 1c0b3c6 | 2012-08-15 17:27:20 +0000 | [diff] [blame] | 174 | |
| 175 | static uptr GetSizeClass(void *p) { |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 176 | return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses; |
| 177 | } |
| 178 | |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 179 | static void *GetBlockBegin(void *p) { |
Dmitry Vyukov | b244c46 | 2012-08-30 13:55:43 +0000 | [diff] [blame] | 180 | uptr class_id = GetSizeClass(p); |
| 181 | uptr size = SizeClassMap::Size(class_id); |
| 182 | uptr chunk_idx = GetChunkIdx((uptr)p, size); |
| 183 | uptr reg_beg = (uptr)p & ~(kRegionSize - 1); |
| 184 | uptr begin = reg_beg + chunk_idx * size; |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 185 | return (void*)begin; |
| 186 | } |
| 187 | |
| 188 | static uptr GetActuallyAllocatedSize(void *p) { |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 189 | CHECK(PointerIsMine(p)); |
| 190 | return SizeClassMap::Size(GetSizeClass(p)); |
| 191 | } |
| 192 | |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 193 | uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } |
| 194 | |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 195 | void *GetMetaData(void *p) { |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 196 | uptr class_id = GetSizeClass(p); |
Dmitry Vyukov | b244c46 | 2012-08-30 13:55:43 +0000 | [diff] [blame] | 197 | uptr size = SizeClassMap::Size(class_id); |
| 198 | uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size); |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 199 | return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) - |
| 200 | (1 + chunk_idx) * kMetadataSize); |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 201 | } |
| 202 | |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 203 | uptr TotalMemoryUsed() { |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 204 | uptr res = 0; |
| 205 | for (uptr i = 0; i < kNumClasses; i++) |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 206 | res += GetRegionInfo(i)->allocated_user; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 207 | return res; |
| 208 | } |
| 209 | |
| 210 | // Test-only. |
| 211 | void TestOnlyUnmap() { |
| 212 | UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize()); |
| 213 | } |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 214 | |
Kostya Serebryany | d19c8cb | 2012-08-31 12:41:00 +0000 | [diff] [blame] | 215 | static uptr AllocBeg() { return kSpaceBeg; } |
| 216 | static uptr AllocEnd() { return kSpaceBeg + kSpaceSize + AdditionalSize(); } |
Dmitry Vyukov | c693689 | 2012-08-15 14:25:08 +0000 | [diff] [blame] | 217 | static uptr AllocSize() { return kSpaceSize + AdditionalSize(); } |
| 218 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 219 | static const uptr kNumClasses = 256; // Power of two <= 256 |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 220 | typedef SizeClassMap SizeClassMapT; |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 221 | |
| 222 | private: |
Dmitry Vyukov | c693689 | 2012-08-15 14:25:08 +0000 | [diff] [blame] | 223 | COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 224 | COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses); |
| 225 | static const uptr kRegionSize = kSpaceSize / kNumClasses; |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 226 | COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32. |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 227 | // Populate the free list with at most this number of bytes at once |
| 228 | // or with one element if its size is greater. |
| 229 | static const uptr kPopulateSize = 1 << 18; |
| 230 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 231 | struct RegionInfo { |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 232 | SpinMutex mutex; |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 233 | AllocatorFreeList free_list; |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 234 | uptr allocated_user; // Bytes allocated for user memory. |
| 235 | uptr allocated_meta; // Bytes allocated for metadata. |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 236 | char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)]; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 237 | }; |
| 238 | COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize); |
| 239 | |
Dmitry Vyukov | c693689 | 2012-08-15 14:25:08 +0000 | [diff] [blame] | 240 | static uptr AdditionalSize() { |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 241 | uptr res = sizeof(RegionInfo) * kNumClasses; |
| 242 | CHECK_EQ(res % kPageSize, 0); |
| 243 | return res; |
| 244 | } |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 245 | |
| 246 | RegionInfo *GetRegionInfo(uptr class_id) { |
| 247 | CHECK_LT(class_id, kNumClasses); |
Kostya Serebryany | d19c8cb | 2012-08-31 12:41:00 +0000 | [diff] [blame] | 248 | RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize); |
| 249 | return ®ions[class_id]; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 250 | } |
| 251 | |
Dmitry Vyukov | b244c46 | 2012-08-30 13:55:43 +0000 | [diff] [blame] | 252 | static uptr GetChunkIdx(uptr chunk, uptr size) { |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 253 | u32 offset = chunk % kRegionSize; |
| 254 | // Here we divide by a non-constant. This is costly. |
| 255 | // We require that kRegionSize is at least 2^32 so that offset is 32-bit. |
| 256 | // We save 2x by using 32-bit div, but may need to use a 256-way switch. |
Dmitry Vyukov | b244c46 | 2012-08-30 13:55:43 +0000 | [diff] [blame] | 257 | return offset / (u32)size; |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 258 | } |
| 259 | |
Kostya Serebryany | 78e973f | 2012-07-06 09:26:01 +0000 | [diff] [blame] | 260 | void PopulateFreeList(uptr class_id, RegionInfo *region) { |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 261 | uptr size = SizeClassMap::Size(class_id); |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 262 | uptr beg_idx = region->allocated_user; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 263 | uptr end_idx = beg_idx + kPopulateSize; |
Kostya Serebryany | 78e973f | 2012-07-06 09:26:01 +0000 | [diff] [blame] | 264 | region->free_list.clear(); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 265 | uptr region_beg = kSpaceBeg + kRegionSize * class_id; |
| 266 | uptr idx = beg_idx; |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 267 | uptr i = 0; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 268 | do { // do-while loop because we need to put at least one item. |
| 269 | uptr p = region_beg + idx; |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 270 | region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p)); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 271 | idx += size; |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 272 | i++; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 273 | } while (idx < end_idx); |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 274 | region->allocated_user += idx - beg_idx; |
| 275 | region->allocated_meta += i * kMetadataSize; |
| 276 | CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | void *AllocateBySizeClass(uptr class_id) { |
| 280 | CHECK_LT(class_id, kNumClasses); |
| 281 | RegionInfo *region = GetRegionInfo(class_id); |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 282 | SpinMutexLock l(®ion->mutex); |
Kostya Serebryany | 78e973f | 2012-07-06 09:26:01 +0000 | [diff] [blame] | 283 | if (region->free_list.empty()) { |
| 284 | PopulateFreeList(class_id, region); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 285 | } |
Kostya Serebryany | 78e973f | 2012-07-06 09:26:01 +0000 | [diff] [blame] | 286 | CHECK(!region->free_list.empty()); |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 287 | AllocatorListNode *node = region->free_list.front(); |
Kostya Serebryany | 78e973f | 2012-07-06 09:26:01 +0000 | [diff] [blame] | 288 | region->free_list.pop_front(); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 289 | return reinterpret_cast<void*>(node); |
| 290 | } |
| 291 | |
| 292 | void DeallocateBySizeClass(void *p, uptr class_id) { |
| 293 | RegionInfo *region = GetRegionInfo(class_id); |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 294 | SpinMutexLock l(®ion->mutex); |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 295 | region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p)); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 296 | } |
| 297 | }; |
| 298 | |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 299 | // Objects of this type should be used as local caches for SizeClassAllocator64. |
| 300 | // Since the typical use of this class is to have one object per thread in TLS, |
| 301 | // is has to be POD. |
| 302 | template<const uptr kNumClasses, class SizeClassAllocator> |
| 303 | struct SizeClassAllocatorLocalCache { |
| 304 | // Don't need to call Init if the object is a global (i.e. zero-initialized). |
| 305 | void Init() { |
| 306 | internal_memset(this, 0, sizeof(*this)); |
| 307 | } |
| 308 | |
| 309 | void *Allocate(SizeClassAllocator *allocator, uptr class_id) { |
| 310 | CHECK_LT(class_id, kNumClasses); |
| 311 | AllocatorFreeList *free_list = &free_lists_[class_id]; |
| 312 | if (free_list->empty()) |
| 313 | allocator->BulkAllocate(class_id, free_list); |
| 314 | CHECK(!free_list->empty()); |
| 315 | void *res = free_list->front(); |
| 316 | free_list->pop_front(); |
| 317 | return res; |
| 318 | } |
| 319 | |
| 320 | void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { |
| 321 | CHECK_LT(class_id, kNumClasses); |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 322 | AllocatorFreeList *free_list = &free_lists_[class_id]; |
| 323 | free_list->push_front(reinterpret_cast<AllocatorListNode*>(p)); |
| 324 | if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id)) |
| 325 | DrainHalf(allocator, class_id); |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | void Drain(SizeClassAllocator *allocator) { |
| 329 | for (uptr i = 0; i < kNumClasses; i++) { |
| 330 | allocator->BulkDeallocate(i, &free_lists_[i]); |
| 331 | CHECK(free_lists_[i].empty()); |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | // private: |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 336 | typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap; |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 337 | AllocatorFreeList free_lists_[kNumClasses]; |
Dmitry Vyukov | 7e63474 | 2012-08-23 17:16:07 +0000 | [diff] [blame] | 338 | |
| 339 | void DrainHalf(SizeClassAllocator *allocator, uptr class_id) { |
| 340 | AllocatorFreeList *free_list = &free_lists_[class_id]; |
| 341 | AllocatorFreeList half; |
| 342 | half.clear(); |
| 343 | const uptr count = free_list->size() / 2; |
| 344 | for (uptr i = 0; i < count; i++) { |
| 345 | AllocatorListNode *node = free_list->front(); |
| 346 | free_list->pop_front(); |
| 347 | half.push_front(node); |
| 348 | } |
| 349 | allocator->BulkDeallocate(class_id, &half); |
| 350 | } |
Kostya Serebryany | d1e6094 | 2012-07-06 13:46:49 +0000 | [diff] [blame] | 351 | }; |
| 352 | |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 353 | // This class can (de)allocate only large chunks of memory using mmap/unmap. |
| 354 | // The main purpose of this allocator is to cover large and rare allocation |
| 355 | // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). |
| 356 | // The result is always page-aligned. |
| 357 | class LargeMmapAllocator { |
| 358 | public: |
| 359 | void Init() { |
| 360 | internal_memset(this, 0, sizeof(*this)); |
| 361 | } |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 362 | void *Allocate(uptr size, uptr alignment) { |
| 363 | CHECK_LE(alignment, kPageSize); // Not implemented. Do we need it? |
Dmitry Vyukov | c693689 | 2012-08-15 14:25:08 +0000 | [diff] [blame] | 364 | if (size + alignment + 2 * kPageSize < size) |
| 365 | return 0; |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 366 | uptr map_size = RoundUpMapSize(size); |
| 367 | void *map = MmapOrDie(map_size, "LargeMmapAllocator"); |
| 368 | void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map) |
| 369 | + kPageSize); |
| 370 | Header *h = GetHeader(res); |
| 371 | h->size = size; |
| 372 | { |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 373 | SpinMutexLock l(&mutex_); |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 374 | h->next = list_; |
| 375 | h->prev = 0; |
| 376 | if (list_) |
| 377 | list_->prev = h; |
| 378 | list_ = h; |
| 379 | } |
| 380 | return res; |
| 381 | } |
| 382 | |
| 383 | void Deallocate(void *p) { |
| 384 | Header *h = GetHeader(p); |
| 385 | uptr map_size = RoundUpMapSize(h->size); |
| 386 | { |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 387 | SpinMutexLock l(&mutex_); |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 388 | Header *prev = h->prev; |
| 389 | Header *next = h->next; |
| 390 | if (prev) |
| 391 | prev->next = next; |
| 392 | if (next) |
| 393 | next->prev = prev; |
| 394 | if (h == list_) |
| 395 | list_ = next; |
| 396 | } |
| 397 | UnmapOrDie(h, map_size); |
| 398 | } |
| 399 | |
| 400 | uptr TotalMemoryUsed() { |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 401 | SpinMutexLock l(&mutex_); |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 402 | uptr res = 0; |
| 403 | for (Header *l = list_; l; l = l->next) { |
| 404 | res += RoundUpMapSize(l->size); |
| 405 | } |
| 406 | return res; |
| 407 | } |
| 408 | |
| 409 | bool PointerIsMine(void *p) { |
| 410 | // Fast check. |
| 411 | if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false; |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 412 | SpinMutexLock l(&mutex_); |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 413 | for (Header *l = list_; l; l = l->next) { |
| 414 | if (GetUser(l) == p) return true; |
| 415 | } |
| 416 | return false; |
| 417 | } |
| 418 | |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 419 | uptr GetActuallyAllocatedSize(void *p) { |
| 420 | return RoundUpMapSize(GetHeader(p)->size) - kPageSize; |
| 421 | } |
| 422 | |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 423 | // At least kPageSize/2 metadata bytes is available. |
| 424 | void *GetMetaData(void *p) { |
| 425 | return GetHeader(p) + 1; |
| 426 | } |
| 427 | |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 428 | void *GetBlockBegin(void *p) { |
| 429 | SpinMutexLock l(&mutex_); |
| 430 | for (Header *l = list_; l; l = l->next) { |
| 431 | void *b = GetUser(l); |
| 432 | if (p >= b && p < (u8*)b + l->size) |
| 433 | return b; |
| 434 | } |
| 435 | return 0; |
| 436 | } |
| 437 | |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 438 | private: |
| 439 | struct Header { |
| 440 | uptr size; |
| 441 | Header *next; |
| 442 | Header *prev; |
| 443 | }; |
| 444 | |
| 445 | Header *GetHeader(void *p) { |
| 446 | return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize); |
| 447 | } |
| 448 | |
| 449 | void *GetUser(Header *h) { |
| 450 | return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize); |
| 451 | } |
| 452 | |
| 453 | uptr RoundUpMapSize(uptr size) { |
| 454 | return RoundUpTo(size, kPageSize) + kPageSize; |
| 455 | } |
| 456 | |
| 457 | Header *list_; |
Dmitry Vyukov | b462dfc | 2012-07-02 06:54:24 +0000 | [diff] [blame] | 458 | SpinMutex mutex_; |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame] | 459 | }; |
| 460 | |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 461 | // This class implements a complete memory allocator by using two |
| 462 | // internal allocators: |
| 463 | // PrimaryAllocator is efficient, but may not allocate some sizes (alignments). |
| 464 | // When allocating 2^x bytes it should return 2^x aligned chunk. |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 465 | // PrimaryAllocator is used via a local AllocatorCache. |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 466 | // SecondaryAllocator can allocate anything, but is not efficient. |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 467 | template <class PrimaryAllocator, class AllocatorCache, |
Alexander Potapenko | b4e9fd2 | 2012-07-08 15:00:06 +0000 | [diff] [blame] | 468 | class SecondaryAllocator> // NOLINT |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 469 | class CombinedAllocator { |
| 470 | public: |
| 471 | void Init() { |
| 472 | primary_.Init(); |
| 473 | secondary_.Init(); |
| 474 | } |
| 475 | |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 476 | void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, |
| 477 | bool cleared = false) { |
Kostya Serebryany | a415df6 | 2012-07-19 12:15:33 +0000 | [diff] [blame] | 478 | // Returning 0 on malloc(0) may break a lot of code. |
Dmitry Vyukov | c693689 | 2012-08-15 14:25:08 +0000 | [diff] [blame] | 479 | if (size == 0) |
| 480 | size = 1; |
| 481 | if (size + alignment < size) |
| 482 | return 0; |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 483 | if (alignment > 8) |
| 484 | size = RoundUpTo(size, alignment); |
| 485 | void *res; |
| 486 | if (primary_.CanAllocate(size, alignment)) |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 487 | res = cache->Allocate(&primary_, primary_.ClassID(size)); |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 488 | else |
| 489 | res = secondary_.Allocate(size, alignment); |
| 490 | if (alignment > 8) |
| 491 | CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); |
Dmitry Vyukov | c693689 | 2012-08-15 14:25:08 +0000 | [diff] [blame] | 492 | if (cleared && res) |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 493 | internal_memset(res, 0, size); |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 494 | return res; |
| 495 | } |
| 496 | |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 497 | void Deallocate(AllocatorCache *cache, void *p) { |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 498 | if (!p) return; |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 499 | if (primary_.PointerIsMine(p)) |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 500 | cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 501 | else |
| 502 | secondary_.Deallocate(p); |
| 503 | } |
| 504 | |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 505 | void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, |
| 506 | uptr alignment) { |
| 507 | if (!p) |
| 508 | return Allocate(cache, new_size, alignment); |
| 509 | if (!new_size) { |
| 510 | Deallocate(cache, p); |
| 511 | return 0; |
| 512 | } |
| 513 | CHECK(PointerIsMine(p)); |
| 514 | uptr old_size = GetActuallyAllocatedSize(p); |
| 515 | uptr memcpy_size = Min(new_size, old_size); |
| 516 | void *new_p = Allocate(cache, new_size, alignment); |
| 517 | if (new_p) |
| 518 | internal_memcpy(new_p, p, memcpy_size); |
| 519 | Deallocate(cache, p); |
| 520 | return new_p; |
| 521 | } |
| 522 | |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 523 | bool PointerIsMine(void *p) { |
| 524 | if (primary_.PointerIsMine(p)) |
| 525 | return true; |
| 526 | return secondary_.PointerIsMine(p); |
| 527 | } |
| 528 | |
| 529 | void *GetMetaData(void *p) { |
| 530 | if (primary_.PointerIsMine(p)) |
| 531 | return primary_.GetMetaData(p); |
| 532 | return secondary_.GetMetaData(p); |
| 533 | } |
| 534 | |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 535 | void *GetBlockBegin(void *p) { |
| 536 | if (primary_.PointerIsMine(p)) |
| 537 | return primary_.GetBlockBegin(p); |
| 538 | return secondary_.GetBlockBegin(p); |
| 539 | } |
| 540 | |
Kostya Serebryany | ab34919 | 2012-07-18 16:04:55 +0000 | [diff] [blame] | 541 | uptr GetActuallyAllocatedSize(void *p) { |
| 542 | if (primary_.PointerIsMine(p)) |
| 543 | return primary_.GetActuallyAllocatedSize(p); |
| 544 | return secondary_.GetActuallyAllocatedSize(p); |
| 545 | } |
| 546 | |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 547 | uptr TotalMemoryUsed() { |
| 548 | return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); |
| 549 | } |
| 550 | |
| 551 | void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } |
| 552 | |
Kostya Serebryany | 739b0de | 2012-07-06 14:32:00 +0000 | [diff] [blame] | 553 | void SwallowCache(AllocatorCache *cache) { |
| 554 | cache->Drain(&primary_); |
| 555 | } |
| 556 | |
Kostya Serebryany | 92afdb6 | 2012-06-29 15:35:18 +0000 | [diff] [blame] | 557 | private: |
| 558 | PrimaryAllocator primary_; |
| 559 | SecondaryAllocator secondary_; |
| 560 | }; |
| 561 | |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 562 | } // namespace __sanitizer |
| 563 | |
| 564 | #endif // SANITIZER_ALLOCATOR_H |