blob: 31a7ceaff1fe196d359508b660d926705b759dce [file] [log] [blame]
Kostya Serebryany6e26fa92012-06-21 10:04:36 +00001//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// Specialized allocator which works only in 64-bit address space.
10// To be used by ThreadSanitizer, MemorySanitizer and possibly other tools.
11// The main feature of this allocator is that the header is located far away
12// from the user memory region, so that the tool does not use extra shadow
13// for the header.
14//
15// Status: not yet ready.
16//===----------------------------------------------------------------------===//
17#ifndef SANITIZER_ALLOCATOR_H
18#define SANITIZER_ALLOCATOR_H
19
20#include "sanitizer_common.h"
21#include "sanitizer_internal_defs.h"
22
23namespace __sanitizer {
24
Kostya Serebryany5b014152012-06-22 13:00:50 +000025// Maps size class id to size and back.
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000026class DefaultSizeClassMap {
27 private:
28 // Here we use a spline composed of 5 polynomials of oder 1.
29 // The first size class is l0, then the classes go with step s0
30 // untill they reach l1, after which they go with step s1 and so on.
31 // Steps should be powers of two for cheap division.
32 // The size of the last size class should be a power of two.
33 // There should be at most 256 size classes.
34 static const uptr l0 = 1 << 4;
35 static const uptr l1 = 1 << 9;
36 static const uptr l2 = 1 << 12;
37 static const uptr l3 = 1 << 15;
38 static const uptr l4 = 1 << 18;
39 static const uptr l5 = 1 << 21;
40
41 static const uptr s0 = 1 << 4;
42 static const uptr s1 = 1 << 6;
43 static const uptr s2 = 1 << 9;
44 static const uptr s3 = 1 << 12;
45 static const uptr s4 = 1 << 15;
46
47 static const uptr u0 = 0 + (l1 - l0) / s0;
48 static const uptr u1 = u0 + (l2 - l1) / s1;
49 static const uptr u2 = u1 + (l3 - l2) / s2;
50 static const uptr u3 = u2 + (l4 - l3) / s3;
51 static const uptr u4 = u3 + (l5 - l4) / s4;
52
53 public:
54 static const uptr kNumClasses = u4 + 1;
55 static const uptr kMaxSize = l5;
Kostya Serebryany5b014152012-06-22 13:00:50 +000056 static const uptr kMinSize = l0;
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000057
58 COMPILER_CHECK(kNumClasses <= 256);
59 COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
60
Kostya Serebryany5b014152012-06-22 13:00:50 +000061 static uptr Size(uptr class_id) {
62 if (class_id <= u0) return l0 + s0 * (class_id - 0);
63 if (class_id <= u1) return l1 + s1 * (class_id - u0);
64 if (class_id <= u2) return l2 + s2 * (class_id - u1);
65 if (class_id <= u3) return l3 + s3 * (class_id - u2);
66 if (class_id <= u4) return l4 + s4 * (class_id - u3);
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000067 return 0;
68 }
Kostya Serebryany5b014152012-06-22 13:00:50 +000069 static uptr ClassID(uptr size) {
Kostya Serebryany6e26fa92012-06-21 10:04:36 +000070 if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
71 if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
72 if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
73 if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
74 if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
75 return 0;
76 }
77};
78
Kostya Serebryany5b014152012-06-22 13:00:50 +000079// Space: a portion of address space of kSpaceSize bytes starting at
80// a fixed address (kSpaceBeg). Both constants are powers of two and
81// kSpaceBeg is kSpaceSize-aligned.
82//
83// Region: a part of Space dedicated to a single size class.
84// There are kNumClasses Regions of equal size.
85//
86// UserChunk: a piece of memory returned to user.
87// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
88//
89// A Region looks like this:
90// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
91template <const uptr kSpaceBeg, const uptr kSpaceSize,
92 const uptr kMetadataSize, class SizeClassMap>
93class SizeClassAllocator64 {
94 public:
95 void Init() {
96 CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
97 AllocBeg(), AllocSize())));
98 }
Kostya Serebryany278ccda2012-06-22 16:13:28 +000099 NOINLINE
Kostya Serebryany5b014152012-06-22 13:00:50 +0000100 void *Allocate(uptr size) {
101 CHECK_LE(size, SizeClassMap::kMaxSize);
102 return AllocateBySizeClass(SizeClassMap::ClassID(size));
103 }
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000104 NOINLINE
Kostya Serebryany5b014152012-06-22 13:00:50 +0000105 void Deallocate(void *p) {
Kostya Serebryany100590f2012-06-25 14:53:49 +0000106 CHECK(PointerIsMine(p));
Kostya Serebryany5b014152012-06-22 13:00:50 +0000107 DeallocateBySizeClass(p, GetSizeClass(p));
108 }
109 bool PointerIsMine(void *p) {
110 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
111 }
112 uptr GetSizeClass(void *p) {
113 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
114 }
115
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000116 uptr GetMetaData(void *p) {
117 uptr class_id = GetSizeClass(p);
118 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
119 return kSpaceBeg + (kRegionSize * (class_id + 1)) -
120 (1 + chunk_idx) * kMetadataSize;
121 }
122
Kostya Serebryany100590f2012-06-25 14:53:49 +0000123 uptr TotalMemoryUsed() {
Kostya Serebryany5b014152012-06-22 13:00:50 +0000124 uptr res = 0;
125 for (uptr i = 0; i < kNumClasses; i++)
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000126 res += GetRegionInfo(i)->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000127 return res;
128 }
129
130 // Test-only.
131 void TestOnlyUnmap() {
132 UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
133 }
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000134
Kostya Serebryany5b014152012-06-22 13:00:50 +0000135 private:
136 static const uptr kNumClasses = 256; // Power of two <= 256
137 COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
138 static const uptr kRegionSize = kSpaceSize / kNumClasses;
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000139 COMPILER_CHECK((kRegionSize >> 32) > 0); // kRegionSize must be >= 2^32.
Kostya Serebryany5b014152012-06-22 13:00:50 +0000140 // Populate the free list with at most this number of bytes at once
141 // or with one element if its size is greater.
142 static const uptr kPopulateSize = 1 << 18;
143
144 struct LifoListNode {
145 LifoListNode *next;
146 };
147
148 struct RegionInfo {
149 uptr mutex; // FIXME
150 LifoListNode *free_list;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000151 uptr allocated_user; // Bytes allocated for user memory.
152 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryany6bbb5142012-06-25 14:31:59 +0000153 char padding[kCacheLineSize - 4 * sizeof(uptr)];
Kostya Serebryany5b014152012-06-22 13:00:50 +0000154 };
155 COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
156
Kostya Serebryany100590f2012-06-25 14:53:49 +0000157 uptr AdditionalSize() {
158 uptr res = sizeof(RegionInfo) * kNumClasses;
159 CHECK_EQ(res % kPageSize, 0);
160 return res;
161 }
Kostya Serebryany5b014152012-06-22 13:00:50 +0000162 uptr AllocBeg() { return kSpaceBeg - AdditionalSize(); }
163 uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
164
165 RegionInfo *GetRegionInfo(uptr class_id) {
166 CHECK_LT(class_id, kNumClasses);
167 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
168 return &regions[-1 - class_id];
169 }
170
171 void PushLifoList(LifoListNode **list, LifoListNode *node) {
172 node->next = *list;
173 *list = node;
174 }
175
176 LifoListNode *PopLifoList(LifoListNode **list) {
177 LifoListNode *res = *list;
Kostya Serebryany100590f2012-06-25 14:53:49 +0000178 *list = res->next;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000179 return res;
180 }
181
Kostya Serebryany278ccda2012-06-22 16:13:28 +0000182 uptr GetChunkIdx(uptr chunk, uptr class_id) {
183 u32 offset = chunk % kRegionSize;
184 // Here we divide by a non-constant. This is costly.
185 // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
186 // We save 2x by using 32-bit div, but may need to use a 256-way switch.
187 return offset / (u32)SizeClassMap::Size(class_id);
188 }
189
Kostya Serebryany5b014152012-06-22 13:00:50 +0000190 LifoListNode *PopulateFreeList(uptr class_id, RegionInfo *region) {
191 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000192 uptr beg_idx = region->allocated_user;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000193 uptr end_idx = beg_idx + kPopulateSize;
194 LifoListNode *res = 0;
195 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
196 uptr idx = beg_idx;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000197 uptr i = 0;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000198 do { // do-while loop because we need to put at least one item.
199 uptr p = region_beg + idx;
200 PushLifoList(&res, reinterpret_cast<LifoListNode*>(p));
201 idx += size;
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000202 i++;
Kostya Serebryany5b014152012-06-22 13:00:50 +0000203 } while (idx < end_idx);
Kostya Serebryanyf299f702012-06-25 04:12:49 +0000204 region->allocated_user += idx - beg_idx;
205 region->allocated_meta += i * kMetadataSize;
206 CHECK_LT(region->allocated_user + region->allocated_meta, kRegionSize);
Kostya Serebryany5b014152012-06-22 13:00:50 +0000207 return res;
208 }
209
210 void *AllocateBySizeClass(uptr class_id) {
211 CHECK_LT(class_id, kNumClasses);
212 RegionInfo *region = GetRegionInfo(class_id);
213 // FIXME: Lock region->mutex;
214 if (!region->free_list) {
215 region->free_list = PopulateFreeList(class_id, region);
216 }
217 CHECK_NE(region->free_list, 0);
218 LifoListNode *node = PopLifoList(&region->free_list);
219 return reinterpret_cast<void*>(node);
220 }
221
222 void DeallocateBySizeClass(void *p, uptr class_id) {
223 RegionInfo *region = GetRegionInfo(class_id);
224 // FIXME: Lock region->mutex;
225 PushLifoList(&region->free_list, reinterpret_cast<LifoListNode*>(p));
226 }
227};
228
Kostya Serebryany6e26fa92012-06-21 10:04:36 +0000229} // namespace __sanitizer
230
231#endif // SANITIZER_ALLOCATOR_H