Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 1 | //===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // Specialized allocator which works only in 64-bit address space. |
| 10 | // To be used by ThreadSanitizer, MemorySanitizer and possibly other tools. |
| 11 | // The main feature of this allocator is that the header is located far away |
| 12 | // from the user memory region, so that the tool does not use extra shadow |
| 13 | // for the header. |
| 14 | // |
| 15 | // Status: not yet ready. |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | #ifndef SANITIZER_ALLOCATOR_H |
| 18 | #define SANITIZER_ALLOCATOR_H |
| 19 | |
| 20 | #include "sanitizer_common.h" |
| 21 | #include "sanitizer_internal_defs.h" |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame^] | 22 | #include "sanitizer_libc.h" |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 23 | |
| 24 | namespace __sanitizer { |
| 25 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 26 | // Maps size class id to size and back. |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 27 | class DefaultSizeClassMap { |
| 28 | private: |
| 29 | // Here we use a spline composed of 5 polynomials of oder 1. |
| 30 | // The first size class is l0, then the classes go with step s0 |
| 31 | // untill they reach l1, after which they go with step s1 and so on. |
| 32 | // Steps should be powers of two for cheap division. |
| 33 | // The size of the last size class should be a power of two. |
| 34 | // There should be at most 256 size classes. |
| 35 | static const uptr l0 = 1 << 4; |
| 36 | static const uptr l1 = 1 << 9; |
| 37 | static const uptr l2 = 1 << 12; |
| 38 | static const uptr l3 = 1 << 15; |
| 39 | static const uptr l4 = 1 << 18; |
| 40 | static const uptr l5 = 1 << 21; |
| 41 | |
| 42 | static const uptr s0 = 1 << 4; |
| 43 | static const uptr s1 = 1 << 6; |
| 44 | static const uptr s2 = 1 << 9; |
| 45 | static const uptr s3 = 1 << 12; |
| 46 | static const uptr s4 = 1 << 15; |
| 47 | |
| 48 | static const uptr u0 = 0 + (l1 - l0) / s0; |
| 49 | static const uptr u1 = u0 + (l2 - l1) / s1; |
| 50 | static const uptr u2 = u1 + (l3 - l2) / s2; |
| 51 | static const uptr u3 = u2 + (l4 - l3) / s3; |
| 52 | static const uptr u4 = u3 + (l5 - l4) / s4; |
| 53 | |
| 54 | public: |
| 55 | static const uptr kNumClasses = u4 + 1; |
| 56 | static const uptr kMaxSize = l5; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 57 | static const uptr kMinSize = l0; |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 58 | |
| 59 | COMPILER_CHECK(kNumClasses <= 256); |
| 60 | COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0); |
| 61 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 62 | static uptr Size(uptr class_id) { |
| 63 | if (class_id <= u0) return l0 + s0 * (class_id - 0); |
| 64 | if (class_id <= u1) return l1 + s1 * (class_id - u0); |
| 65 | if (class_id <= u2) return l2 + s2 * (class_id - u1); |
| 66 | if (class_id <= u3) return l3 + s3 * (class_id - u2); |
| 67 | if (class_id <= u4) return l4 + s4 * (class_id - u3); |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 68 | return 0; |
| 69 | } |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 70 | static uptr ClassID(uptr size) { |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 71 | if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0; |
| 72 | if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1; |
| 73 | if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2; |
| 74 | if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3; |
| 75 | if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4; |
| 76 | return 0; |
| 77 | } |
| 78 | }; |
| 79 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 80 | // Space: a portion of address space of kSpaceSize bytes starting at |
| 81 | // a fixed address (kSpaceBeg). Both constants are powers of two and |
| 82 | // kSpaceBeg is kSpaceSize-aligned. |
| 83 | // |
| 84 | // Region: a part of Space dedicated to a single size class. |
| 85 | // There are kNumClasses Regions of equal size. |
| 86 | // |
| 87 | // UserChunk: a piece of memory returned to user. |
| 88 | // MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk. |
| 89 | // |
| 90 | // A Region looks like this: |
| 91 | // UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 |
| 92 | template <const uptr kSpaceBeg, const uptr kSpaceSize, |
| 93 | const uptr kMetadataSize, class SizeClassMap> |
| 94 | class SizeClassAllocator64 { |
| 95 | public: |
| 96 | void Init() { |
| 97 | CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve( |
| 98 | AllocBeg(), AllocSize()))); |
| 99 | } |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 100 | NOINLINE |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 101 | void *Allocate(uptr size) { |
| 102 | CHECK_LE(size, SizeClassMap::kMaxSize); |
| 103 | return AllocateBySizeClass(SizeClassMap::ClassID(size)); |
| 104 | } |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 105 | NOINLINE |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 106 | void Deallocate(void *p) { |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 107 | CHECK(PointerIsMine(p)); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 108 | DeallocateBySizeClass(p, GetSizeClass(p)); |
| 109 | } |
| 110 | bool PointerIsMine(void *p) { |
| 111 | return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize; |
| 112 | } |
| 113 | uptr GetSizeClass(void *p) { |
| 114 | return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses; |
| 115 | } |
| 116 | |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame^] | 117 | void *GetMetaData(void *p) { |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 118 | uptr class_id = GetSizeClass(p); |
| 119 | uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id); |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame^] | 120 | return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) - |
| 121 | (1 + chunk_idx) * kMetadataSize); |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 122 | } |
| 123 | |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 124 | uptr TotalMemoryUsed() { |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 125 | uptr res = 0; |
| 126 | for (uptr i = 0; i < kNumClasses; i++) |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 127 | res += GetRegionInfo(i)->allocated_user; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 128 | return res; |
| 129 | } |
| 130 | |
| 131 | // Test-only. |
| 132 | void TestOnlyUnmap() { |
| 133 | UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize()); |
| 134 | } |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 135 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 136 | private: |
| 137 | static const uptr kNumClasses = 256; // Power of two <= 256 |
| 138 | COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses); |
| 139 | static const uptr kRegionSize = kSpaceSize / kNumClasses; |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 140 | COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32. |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 141 | // Populate the free list with at most this number of bytes at once |
| 142 | // or with one element if its size is greater. |
| 143 | static const uptr kPopulateSize = 1 << 18; |
| 144 | |
| 145 | struct LifoListNode { |
| 146 | LifoListNode *next; |
| 147 | }; |
| 148 | |
| 149 | struct RegionInfo { |
| 150 | uptr mutex; // FIXME |
| 151 | LifoListNode *free_list; |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 152 | uptr allocated_user; // Bytes allocated for user memory. |
| 153 | uptr allocated_meta; // Bytes allocated for metadata. |
Kostya Serebryany | 6bbb514 | 2012-06-25 14:31:59 +0000 | [diff] [blame] | 154 | char padding[kCacheLineSize - 4 * sizeof(uptr)]; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 155 | }; |
| 156 | COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize); |
| 157 | |
Kostya Serebryany | aad697e | 2012-06-25 14:58:17 +0000 | [diff] [blame] | 158 | uptr AdditionalSize() { |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 159 | uptr res = sizeof(RegionInfo) * kNumClasses; |
| 160 | CHECK_EQ(res % kPageSize, 0); |
| 161 | return res; |
| 162 | } |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 163 | uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); } |
| 164 | uptr AllocSize() { return kSpaceSize + AdditionalSize(); } |
| 165 | |
| 166 | RegionInfo *GetRegionInfo(uptr class_id) { |
| 167 | CHECK_LT(class_id, kNumClasses); |
| 168 | RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg); |
| 169 | return ®ions[-1 - class_id]; |
| 170 | } |
| 171 | |
| 172 | void PushLifoList(LifoListNode **list, LifoListNode *node) { |
| 173 | node->next = *list; |
| 174 | *list = node; |
| 175 | } |
| 176 | |
| 177 | LifoListNode *PopLifoList(LifoListNode **list) { |
| 178 | LifoListNode *res = *list; |
Kostya Serebryany | 100590f | 2012-06-25 14:53:49 +0000 | [diff] [blame] | 179 | *list = res->next; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 180 | return res; |
| 181 | } |
| 182 | |
Kostya Serebryany | 278ccda | 2012-06-22 16:13:28 +0000 | [diff] [blame] | 183 | uptr GetChunkIdx(uptr chunk, uptr class_id) { |
| 184 | u32 offset = chunk % kRegionSize; |
| 185 | // Here we divide by a non-constant. This is costly. |
| 186 | // We require that kRegionSize is at least 2^32 so that offset is 32-bit. |
| 187 | // We save 2x by using 32-bit div, but may need to use a 256-way switch. |
| 188 | return offset / (u32)SizeClassMap::Size(class_id); |
| 189 | } |
| 190 | |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 191 | LifoListNode *PopulateFreeList(uptr class_id, RegionInfo *region) { |
| 192 | uptr size = SizeClassMap::Size(class_id); |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 193 | uptr beg_idx = region->allocated_user; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 194 | uptr end_idx = beg_idx + kPopulateSize; |
| 195 | LifoListNode *res = 0; |
| 196 | uptr region_beg = kSpaceBeg + kRegionSize * class_id; |
| 197 | uptr idx = beg_idx; |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 198 | uptr i = 0; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 199 | do { // do-while loop because we need to put at least one item. |
| 200 | uptr p = region_beg + idx; |
| 201 | PushLifoList(&res, reinterpret_cast<LifoListNode*>(p)); |
| 202 | idx += size; |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 203 | i++; |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 204 | } while (idx < end_idx); |
Kostya Serebryany | f299f70 | 2012-06-25 04:12:49 +0000 | [diff] [blame] | 205 | region->allocated_user += idx - beg_idx; |
| 206 | region->allocated_meta += i * kMetadataSize; |
| 207 | CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize); |
Kostya Serebryany | 5b01415 | 2012-06-22 13:00:50 +0000 | [diff] [blame] | 208 | return res; |
| 209 | } |
| 210 | |
| 211 | void *AllocateBySizeClass(uptr class_id) { |
| 212 | CHECK_LT(class_id, kNumClasses); |
| 213 | RegionInfo *region = GetRegionInfo(class_id); |
| 214 | // FIXME: Lock region->mutex; |
| 215 | if (!region->free_list) { |
| 216 | region->free_list = PopulateFreeList(class_id, region); |
| 217 | } |
| 218 | CHECK_NE(region->free_list, 0); |
| 219 | LifoListNode *node = PopLifoList(®ion->free_list); |
| 220 | return reinterpret_cast<void*>(node); |
| 221 | } |
| 222 | |
| 223 | void DeallocateBySizeClass(void *p, uptr class_id) { |
| 224 | RegionInfo *region = GetRegionInfo(class_id); |
| 225 | // FIXME: Lock region->mutex; |
| 226 | PushLifoList(®ion->free_list, reinterpret_cast<LifoListNode*>(p)); |
| 227 | } |
| 228 | }; |
| 229 | |
Kostya Serebryany | 4196046 | 2012-06-26 14:23:32 +0000 | [diff] [blame^] | 230 | // This class can (de)allocate only large chunks of memory using mmap/unmap. |
| 231 | // The main purpose of this allocator is to cover large and rare allocation |
| 232 | // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). |
| 233 | // The result is always page-aligned. |
| 234 | class LargeMmapAllocator { |
| 235 | public: |
| 236 | void Init() { |
| 237 | internal_memset(this, 0, sizeof(*this)); |
| 238 | } |
| 239 | void *Allocate(uptr size) { |
| 240 | uptr map_size = RoundUpMapSize(size); |
| 241 | void *map = MmapOrDie(map_size, "LargeMmapAllocator"); |
| 242 | void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map) |
| 243 | + kPageSize); |
| 244 | Header *h = GetHeader(res); |
| 245 | h->size = size; |
| 246 | { |
| 247 | // FIXME: lock |
| 248 | h->next = list_; |
| 249 | h->prev = 0; |
| 250 | if (list_) |
| 251 | list_->prev = h; |
| 252 | list_ = h; |
| 253 | } |
| 254 | return res; |
| 255 | } |
| 256 | |
| 257 | void Deallocate(void *p) { |
| 258 | Header *h = GetHeader(p); |
| 259 | uptr map_size = RoundUpMapSize(h->size); |
| 260 | { |
| 261 | // FIXME: lock |
| 262 | Header *prev = h->prev; |
| 263 | Header *next = h->next; |
| 264 | if (prev) |
| 265 | prev->next = next; |
| 266 | if (next) |
| 267 | next->prev = prev; |
| 268 | if (h == list_) |
| 269 | list_ = next; |
| 270 | } |
| 271 | UnmapOrDie(h, map_size); |
| 272 | } |
| 273 | |
| 274 | uptr TotalMemoryUsed() { |
| 275 | // FIXME: lock |
| 276 | uptr res = 0; |
| 277 | for (Header *l = list_; l; l = l->next) { |
| 278 | res += RoundUpMapSize(l->size); |
| 279 | } |
| 280 | return res; |
| 281 | } |
| 282 | |
| 283 | bool PointerIsMine(void *p) { |
| 284 | // Fast check. |
| 285 | if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false; |
| 286 | // FIXME: lock |
| 287 | for (Header *l = list_; l; l = l->next) { |
| 288 | if (GetUser(l) == p) return true; |
| 289 | } |
| 290 | return false; |
| 291 | } |
| 292 | |
| 293 | // At least kPageSize/2 metadata bytes is available. |
| 294 | void *GetMetaData(void *p) { |
| 295 | return GetHeader(p) + 1; |
| 296 | } |
| 297 | |
| 298 | private: |
| 299 | struct Header { |
| 300 | uptr size; |
| 301 | Header *next; |
| 302 | Header *prev; |
| 303 | }; |
| 304 | |
| 305 | Header *GetHeader(void *p) { |
| 306 | return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize); |
| 307 | } |
| 308 | |
| 309 | void *GetUser(Header *h) { |
| 310 | return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize); |
| 311 | } |
| 312 | |
| 313 | uptr RoundUpMapSize(uptr size) { |
| 314 | return RoundUpTo(size, kPageSize) + kPageSize; |
| 315 | } |
| 316 | |
| 317 | Header *list_; |
| 318 | uptr lock_; // FIXME |
| 319 | }; |
| 320 | |
Kostya Serebryany | 6e26fa9 | 2012-06-21 10:04:36 +0000 | [diff] [blame] | 321 | } // namespace __sanitizer |
| 322 | |
| 323 | #endif // SANITIZER_ALLOCATOR_H |