blob: deaffef7150db840fb6c0268f120708f6927c8b8 [file] [log] [blame]
Kostya Serebryany43811a12012-12-04 07:54:41 +00001//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef SANITIZER_ALLOCATOR_H
15#define SANITIZER_ALLOCATOR_H
16
17#include "sanitizer_internal_defs.h"
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_list.h"
21#include "sanitizer_mutex.h"
Dmitry Vyukovb48224c2013-01-14 08:23:34 +000022#include "sanitizer_lfstack.h"
Kostya Serebryany43811a12012-12-04 07:54:41 +000023
24namespace __sanitizer {
25
Stephen Hines86277eb2015-03-23 12:06:32 -070026// Prints error message and kills the program.
27void NORETURN ReportAllocatorCannotReturnNull();
Kostya Serebryany9150f392013-09-06 09:25:11 +000028
Kostya Serebryany038820f2012-12-24 13:41:07 +000029// SizeClassMap maps allocation sizes into size classes and back.
30// Class 0 corresponds to size 0.
Kostya Serebryany3661bae2013-02-07 15:04:06 +000031// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000032// Next 4 classes: 256 + i * 64 (i = 1 to 4).
33// Next 4 classes: 512 + i * 128 (i = 1 to 4).
Kostya Serebryany038820f2012-12-24 13:41:07 +000034// ...
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000035// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
Kostya Serebryany038820f2012-12-24 13:41:07 +000036// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
37//
38// This structure of the size class map gives us:
39// - Efficient table-free class-to-size and size-to-class functions.
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000040// - Difference between two consequent size classes is betweed 14% and 25%
Kostya Serebryany038820f2012-12-24 13:41:07 +000041//
42// This class also gives a hint to a thread-caching allocator about the amount
43// of chunks that need to be cached per-thread:
44// - kMaxNumCached is the maximal number of chunks per size class.
45// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
46//
47// Part of output of SizeClassMap::Print():
Kostya Serebryany3661bae2013-02-07 15:04:06 +000048// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
49// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
50// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
51// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
52// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
53// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
54// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
55// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
Kostya Serebryany038820f2012-12-24 13:41:07 +000056//
Kostya Serebryany3661bae2013-02-07 15:04:06 +000057// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
58// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
59// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
60// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
61// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
62// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
63// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
64// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
Kostya Serebryany038820f2012-12-24 13:41:07 +000065//
Kostya Serebryany3661bae2013-02-07 15:04:06 +000066// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000067// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
68// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
69// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
Kostya Serebryany038820f2012-12-24 13:41:07 +000070//
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000071// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
72// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
73// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
74// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
Kostya Serebryany038820f2012-12-24 13:41:07 +000075//
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000076// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
77// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
78// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
79// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
80//
81// ...
82//
83// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
84// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
85// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
86// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
87//
88// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
Kostya Serebryany038820f2012-12-24 13:41:07 +000089
Kostya Serebryany7811b4a2013-03-12 07:01:27 +000090template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
Kostya Serebryany038820f2012-12-24 13:41:07 +000091class SizeClassMap {
Kostya Serebryany3661bae2013-02-07 15:04:06 +000092 static const uptr kMinSizeLog = 4;
Kostya Serebryany038820f2012-12-24 13:41:07 +000093 static const uptr kMidSizeLog = kMinSizeLog + 4;
94 static const uptr kMinSize = 1 << kMinSizeLog;
95 static const uptr kMidSize = 1 << kMidSizeLog;
96 static const uptr kMidClass = kMidSize / kMinSize;
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +000097 static const uptr S = 2;
Kostya Serebryany038820f2012-12-24 13:41:07 +000098 static const uptr M = (1 << S) - 1;
Kostya Serebryany43811a12012-12-04 07:54:41 +000099
100 public:
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000101 static const uptr kMaxNumCached = kMaxNumCachedT;
Kostya Serebryany7811b4a2013-03-12 07:01:27 +0000102 // We transfer chunks between central and thread-local free lists in batches.
103 // For small size classes we allocate batches separately.
104 // For large size classes we use one of the chunks to store the batch.
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000105 struct TransferBatch {
106 TransferBatch *next;
107 uptr count;
108 void *batch[kMaxNumCached];
109 };
110
Kostya Serebryany8d6854a2013-05-16 13:24:31 +0000111 static const uptr kMaxSize = 1UL << kMaxSizeLog;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000112 static const uptr kNumClasses =
113 kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
114 COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
115 static const uptr kNumClassesRounded =
116 kNumClasses == 32 ? 32 :
117 kNumClasses <= 64 ? 64 :
118 kNumClasses <= 128 ? 128 : 256;
Kostya Serebryany43811a12012-12-04 07:54:41 +0000119
120 static uptr Size(uptr class_id) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000121 if (class_id <= kMidClass)
122 return kMinSize * class_id;
123 class_id -= kMidClass;
124 uptr t = kMidSize << (class_id >> S);
125 return t + (t >> S) * (class_id & M);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000126 }
Kostya Serebryany038820f2012-12-24 13:41:07 +0000127
Kostya Serebryany43811a12012-12-04 07:54:41 +0000128 static uptr ClassID(uptr size) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000129 if (size <= kMidSize)
130 return (size + kMinSize - 1) >> kMinSizeLog;
131 if (size > kMaxSize) return 0;
Timur Iskhodzhanov2b10d392013-02-08 12:02:00 +0000132 uptr l = MostSignificantSetBitIndex(size);
Kostya Serebryany038820f2012-12-24 13:41:07 +0000133 uptr hbits = (size >> (l - S)) & M;
134 uptr lbits = size & ((1 << (l - S)) - 1);
135 uptr l1 = l - kMidSizeLog;
136 return kMidClass + (l1 << S) + hbits + (lbits > 0);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000137 }
138
139 static uptr MaxCached(uptr class_id) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000140 if (class_id == 0) return 0;
141 uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
Dmitry Vyukov8ceeec42013-02-04 08:07:45 +0000142 return Max<uptr>(1, Min(kMaxNumCached, n));
Kostya Serebryany038820f2012-12-24 13:41:07 +0000143 }
144
145 static void Print() {
146 uptr prev_s = 0;
147 uptr total_cached = 0;
148 for (uptr i = 0; i < kNumClasses; i++) {
149 uptr s = Size(i);
150 if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
151 Printf("\n");
152 uptr d = s - prev_s;
153 uptr p = prev_s ? (d * 100 / prev_s) : 0;
Evgeniy Stepanovcf808712013-03-01 08:49:14 +0000154 uptr l = s ? MostSignificantSetBitIndex(s) : 0;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000155 uptr cached = MaxCached(i) * s;
156 Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
157 "cached: %zd %zd; id %zd\n",
158 i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
159 total_cached += cached;
160 prev_s = s;
161 }
162 Printf("Total cached: %zd\n", total_cached);
163 }
164
Kostya Serebryany7811b4a2013-03-12 07:01:27 +0000165 static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
166 return Size(class_id) < sizeof(TransferBatch) -
167 sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
168 }
169
Kostya Serebryany038820f2012-12-24 13:41:07 +0000170 static void Validate() {
171 for (uptr c = 1; c < kNumClasses; c++) {
172 // Printf("Validate: c%zd\n", c);
173 uptr s = Size(c);
Kostya Serebryanyf155fcc2013-02-26 12:59:06 +0000174 CHECK_NE(s, 0U);
Kostya Serebryany038820f2012-12-24 13:41:07 +0000175 CHECK_EQ(ClassID(s), c);
176 if (c != kNumClasses - 1)
177 CHECK_EQ(ClassID(s + 1), c + 1);
178 CHECK_EQ(ClassID(s - 1), c);
179 if (c)
180 CHECK_GT(Size(c), Size(c-1));
181 }
182 CHECK_EQ(ClassID(kMaxSize + 1), 0);
183
184 for (uptr s = 1; s <= kMaxSize; s++) {
185 uptr c = ClassID(s);
186 // Printf("s%zd => c%zd\n", s, c);
187 CHECK_LT(c, kNumClasses);
188 CHECK_GE(Size(c), s);
189 if (c > 0)
190 CHECK_LT(Size(c-1), s);
191 }
Kostya Serebryany43811a12012-12-04 07:54:41 +0000192 }
193};
194
Dmitry Vyukov4b450f42013-03-20 09:26:46 +0000195typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
Kostya Serebryanyf2c417c2013-03-12 08:44:40 +0000196typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000197template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
Kostya Serebryany43811a12012-12-04 07:54:41 +0000198
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000199// Memory allocator statistics
200enum AllocatorStat {
Stephen Hines6a211c52014-07-21 00:49:56 -0700201 AllocatorStatAllocated,
202 AllocatorStatMapped,
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000203 AllocatorStatCount
204};
205
Stephen Hines6a211c52014-07-21 00:49:56 -0700206typedef uptr AllocatorStatCounters[AllocatorStatCount];
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000207
208// Per-thread stats, live in per-thread cache.
209class AllocatorStats {
210 public:
211 void Init() {
212 internal_memset(this, 0, sizeof(*this));
213 }
Stephen Hines86277eb2015-03-23 12:06:32 -0700214 void InitLinkerInitialized() {}
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000215
Stephen Hines6a211c52014-07-21 00:49:56 -0700216 void Add(AllocatorStat i, uptr v) {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000217 v += atomic_load(&stats_[i], memory_order_relaxed);
218 atomic_store(&stats_[i], v, memory_order_relaxed);
219 }
220
Stephen Hines6a211c52014-07-21 00:49:56 -0700221 void Sub(AllocatorStat i, uptr v) {
222 v = atomic_load(&stats_[i], memory_order_relaxed) - v;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000223 atomic_store(&stats_[i], v, memory_order_relaxed);
224 }
225
Stephen Hines6a211c52014-07-21 00:49:56 -0700226 void Set(AllocatorStat i, uptr v) {
227 atomic_store(&stats_[i], v, memory_order_relaxed);
228 }
229
230 uptr Get(AllocatorStat i) const {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000231 return atomic_load(&stats_[i], memory_order_relaxed);
232 }
233
234 private:
235 friend class AllocatorGlobalStats;
236 AllocatorStats *next_;
237 AllocatorStats *prev_;
Stephen Hines6a211c52014-07-21 00:49:56 -0700238 atomic_uintptr_t stats_[AllocatorStatCount];
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000239};
240
241// Global stats, used for aggregation and querying.
242class AllocatorGlobalStats : public AllocatorStats {
243 public:
Stephen Hines86277eb2015-03-23 12:06:32 -0700244 void InitLinkerInitialized() {
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000245 next_ = this;
246 prev_ = this;
247 }
Stephen Hines86277eb2015-03-23 12:06:32 -0700248 void Init() {
249 internal_memset(this, 0, sizeof(*this));
250 InitLinkerInitialized();
251 }
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000252
253 void Register(AllocatorStats *s) {
254 SpinMutexLock l(&mu_);
255 s->next_ = next_;
256 s->prev_ = this;
257 next_->prev_ = s;
258 next_ = s;
259 }
260
261 void Unregister(AllocatorStats *s) {
262 SpinMutexLock l(&mu_);
263 s->prev_->next_ = s->next_;
264 s->next_->prev_ = s->prev_;
265 for (int i = 0; i < AllocatorStatCount; i++)
266 Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
267 }
268
269 void Get(AllocatorStatCounters s) const {
Stephen Hines6a211c52014-07-21 00:49:56 -0700270 internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000271 SpinMutexLock l(&mu_);
272 const AllocatorStats *stats = this;
273 for (;;) {
274 for (int i = 0; i < AllocatorStatCount; i++)
275 s[i] += stats->Get(AllocatorStat(i));
276 stats = stats->next_;
277 if (stats == this)
278 break;
279 }
Stephen Hines6a211c52014-07-21 00:49:56 -0700280 // All stats must be non-negative.
281 for (int i = 0; i < AllocatorStatCount; i++)
282 s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000283 }
284
285 private:
286 mutable SpinMutex mu_;
287};
288
Kostya Serebryany214621f2012-12-12 14:32:18 +0000289// Allocators call these callbacks on mmap/munmap.
290struct NoOpMapUnmapCallback {
291 void OnMap(uptr p, uptr size) const { }
292 void OnUnmap(uptr p, uptr size) const { }
293};
294
Sergey Matveevac78d002013-06-24 08:34:50 +0000295// Callback type for iterating over chunks.
296typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
297
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000298// SizeClassAllocator64 -- allocator for 64-bit address space.
299//
300// Space: a portion of address space of kSpaceSize bytes starting at
301// a fixed address (kSpaceBeg). Both constants are powers of two and
302// kSpaceBeg is kSpaceSize-aligned.
Kostya Serebryany567ad072012-12-13 05:05:11 +0000303// At the beginning the entire space is mprotect-ed, then small parts of it
304// are mapped on demand.
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000305//
306// Region: a part of Space dedicated to a single size class.
307// There are kNumClasses Regions of equal size.
308//
309// UserChunk: a piece of memory returned to user.
310// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
311//
312// A Region looks like this:
313// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
314template <const uptr kSpaceBeg, const uptr kSpaceSize,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000315 const uptr kMetadataSize, class SizeClassMap,
316 class MapUnmapCallback = NoOpMapUnmapCallback>
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000317class SizeClassAllocator64 {
318 public:
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000319 typedef typename SizeClassMap::TransferBatch Batch;
320 typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
321 SizeClassMap, MapUnmapCallback> ThisT;
322 typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
323
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000324 void Init() {
Kostya Serebryany567ad072012-12-13 05:05:11 +0000325 CHECK_EQ(kSpaceBeg,
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700326 reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
Kostya Serebryany214621f2012-12-12 14:32:18 +0000327 MapWithCallback(kSpaceEnd, AdditionalSize());
328 }
329
330 void MapWithCallback(uptr beg, uptr size) {
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000331 CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
Kostya Serebryany214621f2012-12-12 14:32:18 +0000332 MapUnmapCallback().OnMap(beg, size);
333 }
334
335 void UnmapWithCallback(uptr beg, uptr size) {
336 MapUnmapCallback().OnUnmap(beg, size);
337 UnmapOrDie(reinterpret_cast<void *>(beg), size);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000338 }
339
Kostya Serebryanyf1877cf2012-12-25 09:40:20 +0000340 static bool CanAllocate(uptr size, uptr alignment) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000341 return size <= SizeClassMap::kMaxSize &&
342 alignment <= SizeClassMap::kMaxSize;
343 }
344
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000345 NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000346 uptr class_id) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000347 CHECK_LT(class_id, kNumClasses);
348 RegionInfo *region = GetRegionInfo(class_id);
Dmitry Vyukovb48224c2013-01-14 08:23:34 +0000349 Batch *b = region->free_list.Pop();
350 if (b == 0)
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000351 b = PopulateFreeList(stat, c, class_id, region);
Dmitry Vyukovd6188672013-01-14 08:51:08 +0000352 region->n_allocated += b->count;
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000353 return b;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000354 }
355
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000356 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000357 RegionInfo *region = GetRegionInfo(class_id);
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000358 CHECK_GT(b->count, 0);
Dmitry Vyukovb48224c2013-01-14 08:23:34 +0000359 region->free_list.Push(b);
Dmitry Vyukovd6188672013-01-14 08:51:08 +0000360 region->n_freed += b->count;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000361 }
362
Sergey Matveevcd571e02013-06-06 14:17:56 +0000363 static bool PointerIsMine(const void *p) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000364 return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
365 }
366
Sergey Matveevcd571e02013-06-06 14:17:56 +0000367 static uptr GetSizeClass(const void *p) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000368 return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000369 }
370
Sergey Matveevcd571e02013-06-06 14:17:56 +0000371 void *GetBlockBegin(const void *p) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000372 uptr class_id = GetSizeClass(p);
373 uptr size = SizeClassMap::Size(class_id);
Kostya Serebryanyaa0f20d2013-03-11 09:43:12 +0000374 if (!size) return 0;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000375 uptr chunk_idx = GetChunkIdx((uptr)p, size);
376 uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000377 uptr beg = chunk_idx * size;
378 uptr next_beg = beg + size;
Kostya Serebryanyaa0f20d2013-03-11 09:43:12 +0000379 if (class_id >= kNumClasses) return 0;
Kostya Serebryany2592d762012-12-19 08:32:50 +0000380 RegionInfo *region = GetRegionInfo(class_id);
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000381 if (region->mapped_user >= next_beg)
382 return reinterpret_cast<void*>(reg_beg + beg);
Kostya Serebryany2592d762012-12-19 08:32:50 +0000383 return 0;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000384 }
385
386 static uptr GetActuallyAllocatedSize(void *p) {
387 CHECK(PointerIsMine(p));
388 return SizeClassMap::Size(GetSizeClass(p));
389 }
390
391 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
392
Evgeniy Stepanov5c48a8c2013-08-02 14:26:58 +0000393 void *GetMetaData(const void *p) {
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000394 uptr class_id = GetSizeClass(p);
395 uptr size = SizeClassMap::Size(class_id);
396 uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
397 return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
398 (1 + chunk_idx) * kMetadataSize);
399 }
400
401 uptr TotalMemoryUsed() {
402 uptr res = 0;
403 for (uptr i = 0; i < kNumClasses; i++)
404 res += GetRegionInfo(i)->allocated_user;
405 return res;
406 }
407
408 // Test-only.
409 void TestOnlyUnmap() {
Kostya Serebryany214621f2012-12-12 14:32:18 +0000410 UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000411 }
412
Kostya Serebryany4b48f452012-12-27 14:09:19 +0000413 void PrintStats() {
414 uptr total_mapped = 0;
415 uptr n_allocated = 0;
416 uptr n_freed = 0;
417 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
418 RegionInfo *region = GetRegionInfo(class_id);
419 total_mapped += region->mapped_user;
420 n_allocated += region->n_allocated;
421 n_freed += region->n_freed;
422 }
423 Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
424 "remains %zd\n",
425 total_mapped >> 20, n_allocated, n_allocated - n_freed);
426 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
427 RegionInfo *region = GetRegionInfo(class_id);
428 if (region->mapped_user == 0) continue;
429 Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
430 class_id,
431 SizeClassMap::Size(class_id),
432 region->mapped_user >> 10,
433 region->n_allocated,
434 region->n_allocated - region->n_freed);
435 }
436 }
437
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000438 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
439 // introspection API.
440 void ForceLock() {
441 for (uptr i = 0; i < kNumClasses; i++) {
442 GetRegionInfo(i)->mutex.Lock();
Alexander Potapenko3c5ebf62013-02-07 14:58:04 +0000443 }
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000444 }
445
446 void ForceUnlock() {
447 for (int i = (int)kNumClasses - 1; i >= 0; i--) {
448 GetRegionInfo(i)->mutex.Unlock();
Alexander Potapenko3c5ebf62013-02-07 14:58:04 +0000449 }
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000450 }
451
Sergey Matveevac78d002013-06-24 08:34:50 +0000452 // Iterate over all existing chunks.
453 // The allocator must be locked when calling this function.
454 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
Kostya Serebryany300f9532013-03-15 11:39:41 +0000455 for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
456 RegionInfo *region = GetRegionInfo(class_id);
457 uptr chunk_size = SizeClassMap::Size(class_id);
458 uptr region_beg = kSpaceBeg + class_id * kRegionSize;
Sergey Matveevac78d002013-06-24 08:34:50 +0000459 for (uptr chunk = region_beg;
460 chunk < region_beg + region->allocated_user;
461 chunk += chunk_size) {
462 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
463 callback(chunk, arg);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000464 }
465 }
466 }
467
Stephen Hines6d186232014-11-26 17:56:19 -0800468 static uptr AdditionalSize() {
469 return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
470 GetPageSizeCached());
471 }
472
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000473 typedef SizeClassMap SizeClassMapT;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000474 static const uptr kNumClasses = SizeClassMap::kNumClasses;
475 static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000476
477 private:
Kostya Serebryany038820f2012-12-24 13:41:07 +0000478 static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
Kostya Serebryany214621f2012-12-12 14:32:18 +0000479 static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000480 COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000481 // kRegionSize must be >= 2^32.
482 COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
483 // Populate the free list with at most this number of bytes at once
484 // or with one element if its size is greater.
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000485 static const uptr kPopulateSize = 1 << 14;
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000486 // Call mmap for user memory with at least this size.
Kostya Serebryanyed847e32013-01-23 14:07:17 +0000487 static const uptr kUserMapSize = 1 << 16;
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000488 // Call mmap for metadata memory with at least this size.
489 static const uptr kMetaMapSize = 1 << 16;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000490
491 struct RegionInfo {
Dmitry Vyukovf4f51f22013-01-14 07:51:39 +0000492 BlockingMutex mutex;
Dmitry Vyukovb48224c2013-01-14 08:23:34 +0000493 LFStack<Batch> free_list;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000494 uptr allocated_user; // Bytes allocated for user memory.
495 uptr allocated_meta; // Bytes allocated for metadata.
Kostya Serebryany567ad072012-12-13 05:05:11 +0000496 uptr mapped_user; // Bytes mapped for user memory.
497 uptr mapped_meta; // Bytes mapped for metadata.
Kostya Serebryany4b48f452012-12-27 14:09:19 +0000498 uptr n_allocated, n_freed; // Just stats.
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000499 };
Kostya Serebryany567ad072012-12-13 05:05:11 +0000500 COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000501
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000502 RegionInfo *GetRegionInfo(uptr class_id) {
503 CHECK_LT(class_id, kNumClasses);
504 RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
505 return &regions[class_id];
506 }
507
508 static uptr GetChunkIdx(uptr chunk, uptr size) {
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000509 uptr offset = chunk % kRegionSize;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000510 // Here we divide by a non-constant. This is costly.
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000511 // size always fits into 32-bits. If the offset fits too, use 32-bit div.
Kostya Serebryanyc7014ed2013-05-16 08:03:26 +0000512 if (offset >> (SANITIZER_WORDSIZE / 2))
Kostya Serebryany871b7fd2013-05-16 07:11:16 +0000513 return offset / size;
514 return (u32)offset / (u32)size;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000515 }
516
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000517 NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000518 uptr class_id, RegionInfo *region) {
Dmitry Vyukovb48224c2013-01-14 08:23:34 +0000519 BlockingMutexLock l(&region->mutex);
520 Batch *b = region->free_list.Pop();
521 if (b)
522 return b;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000523 uptr size = SizeClassMap::Size(class_id);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000524 uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000525 uptr beg_idx = region->allocated_user;
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000526 uptr end_idx = beg_idx + count * size;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000527 uptr region_beg = kSpaceBeg + kRegionSize * class_id;
Kostya Serebryanyb1f21c62012-12-19 06:51:45 +0000528 if (end_idx + size > region->mapped_user) {
Kostya Serebryany567ad072012-12-13 05:05:11 +0000529 // Do the mmap for the user memory.
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000530 uptr map_size = kUserMapSize;
531 while (end_idx + size > region->mapped_user + map_size)
532 map_size += kUserMapSize;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000533 CHECK_GE(region->mapped_user + map_size, end_idx);
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000534 MapWithCallback(region_beg + region->mapped_user, map_size);
Stephen Hines6a211c52014-07-21 00:49:56 -0700535 stat->Add(AllocatorStatMapped, map_size);
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000536 region->mapped_user += map_size;
Kostya Serebryany567ad072012-12-13 05:05:11 +0000537 }
Dmitry Vyukov8ef9ac92013-01-14 10:49:11 +0000538 uptr total_count = (region->mapped_user - beg_idx - size)
539 / size / count * count;
540 region->allocated_meta += total_count * kMetadataSize;
Kostya Serebryany567ad072012-12-13 05:05:11 +0000541 if (region->allocated_meta > region->mapped_meta) {
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000542 uptr map_size = kMetaMapSize;
543 while (region->allocated_meta > region->mapped_meta + map_size)
544 map_size += kMetaMapSize;
Kostya Serebryany567ad072012-12-13 05:05:11 +0000545 // Do the mmap for the metadata.
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000546 CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
Kostya Serebryany567ad072012-12-13 05:05:11 +0000547 MapWithCallback(region_beg + kRegionSize -
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000548 region->mapped_meta - map_size, map_size);
549 region->mapped_meta += map_size;
Kostya Serebryany567ad072012-12-13 05:05:11 +0000550 }
Kostya Serebryanyaaf72102012-12-21 13:38:05 +0000551 CHECK_LE(region->allocated_meta, region->mapped_meta);
Sergey Matveevf14ef722013-05-16 12:58:34 +0000552 if (region->mapped_user + region->mapped_meta > kRegionSize) {
Kostya Serebryanyd39a34e2013-03-14 13:16:09 +0000553 Printf("%s: Out of memory. Dying. ", SanitizerToolName);
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000554 Printf("The process has exhausted %zuMB for size class %zu.\n",
555 kRegionSize / 1024 / 1024, size);
556 Die();
557 }
Dmitry Vyukov8ef9ac92013-01-14 10:49:11 +0000558 for (;;) {
Kostya Serebryany7811b4a2013-03-12 07:01:27 +0000559 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Dmitry Vyukov8ef9ac92013-01-14 10:49:11 +0000560 b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
561 else
562 b = (Batch*)(region_beg + beg_idx);
563 b->count = count;
564 for (uptr i = 0; i < count; i++)
565 b->batch[i] = (void*)(region_beg + beg_idx + i * size);
566 region->allocated_user += count * size;
567 CHECK_LE(region->allocated_user, region->mapped_user);
568 beg_idx += count * size;
569 if (beg_idx + count * size + size > region->mapped_user)
570 break;
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000571 CHECK_GT(b->count, 0);
Dmitry Vyukov8ef9ac92013-01-14 10:49:11 +0000572 region->free_list.Push(b);
573 }
Dmitry Vyukovb48224c2013-01-14 08:23:34 +0000574 return b;
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000575 }
Kostya Serebryany72166ca2012-12-05 10:09:15 +0000576};
577
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000578// Maps integers in rage [0, kSize) to u8 values.
579template<u64 kSize>
580class FlatByteMap {
581 public:
582 void TestOnlyInit() {
583 internal_memset(map_, 0, sizeof(map_));
584 }
585
586 void set(uptr idx, u8 val) {
587 CHECK_LT(idx, kSize);
588 CHECK_EQ(0U, map_[idx]);
589 map_[idx] = val;
590 }
591 u8 operator[] (uptr idx) {
592 CHECK_LT(idx, kSize);
593 // FIXME: CHECK may be too expensive here.
594 return map_[idx];
595 }
596 private:
597 u8 map_[kSize];
598};
599
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700600// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
601// It is implemented as a two-dimensional array: array of kSize1 pointers
602// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
603// Each value is initially zero and can be set to something else only once.
604// Setting and getting values from multiple threads is safe w/o extra locking.
605template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
606class TwoLevelByteMap {
607 public:
608 void TestOnlyInit() {
609 internal_memset(map1_, 0, sizeof(map1_));
610 mu_.Init();
611 }
612 void TestOnlyUnmap() {
613 for (uptr i = 0; i < kSize1; i++) {
614 u8 *p = Get(i);
615 if (!p) continue;
616 MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
617 UnmapOrDie(p, kSize2);
618 }
619 }
620
621 uptr size() const { return kSize1 * kSize2; }
622 uptr size1() const { return kSize1; }
623 uptr size2() const { return kSize2; }
624
625 void set(uptr idx, u8 val) {
626 CHECK_LT(idx, kSize1 * kSize2);
627 u8 *map2 = GetOrCreate(idx / kSize2);
628 CHECK_EQ(0U, map2[idx % kSize2]);
629 map2[idx % kSize2] = val;
630 }
631
632 u8 operator[] (uptr idx) const {
633 CHECK_LT(idx, kSize1 * kSize2);
634 u8 *map2 = Get(idx / kSize2);
635 if (!map2) return 0;
636 return map2[idx % kSize2];
637 }
638
639 private:
640 u8 *Get(uptr idx) const {
641 CHECK_LT(idx, kSize1);
642 return reinterpret_cast<u8 *>(
643 atomic_load(&map1_[idx], memory_order_acquire));
644 }
645
646 u8 *GetOrCreate(uptr idx) {
647 u8 *res = Get(idx);
648 if (!res) {
649 SpinMutexLock l(&mu_);
650 if (!(res = Get(idx))) {
651 res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
652 MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
653 atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
654 memory_order_release);
655 }
656 }
657 return res;
658 }
659
660 atomic_uintptr_t map1_[kSize1];
661 StaticSpinMutex mu_;
662};
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000663
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000664// SizeClassAllocator32 -- allocator for 32-bit address space.
665// This allocator can theoretically be used on 64-bit arch, but there it is less
666// efficient than SizeClassAllocator64.
667//
668// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
669// be returned by MmapOrDie().
670//
671// Region:
672// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
673// Since the regions are aligned by kRegionSize, there are exactly
674// kNumPossibleRegions possible regions in the address space and so we keep
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000675// a ByteMap possible_regions to store the size classes of each Region.
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000676// 0 size class means the region is not used by the allocator.
677//
678// One Region is used to allocate chunks of a single size class.
679// A Region looks like this:
680// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
681//
682// In order to avoid false sharing the objects of this class should be
683// chache-line aligned.
684template <const uptr kSpaceBeg, const u64 kSpaceSize,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000685 const uptr kMetadataSize, class SizeClassMap,
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000686 const uptr kRegionSizeLog,
687 class ByteMap,
Kostya Serebryany214621f2012-12-12 14:32:18 +0000688 class MapUnmapCallback = NoOpMapUnmapCallback>
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000689class SizeClassAllocator32 {
690 public:
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000691 typedef typename SizeClassMap::TransferBatch Batch;
692 typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000693 SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000694 typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
695
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000696 void Init() {
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000697 possible_regions.TestOnlyInit();
698 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
Kostya Serebryany214621f2012-12-12 14:32:18 +0000699 }
700
701 void *MapWithCallback(uptr size) {
702 size = RoundUpTo(size, GetPageSizeCached());
703 void *res = MmapOrDie(size, "SizeClassAllocator32");
704 MapUnmapCallback().OnMap((uptr)res, size);
705 return res;
706 }
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000707
Kostya Serebryany214621f2012-12-12 14:32:18 +0000708 void UnmapWithCallback(uptr beg, uptr size) {
709 MapUnmapCallback().OnUnmap(beg, size);
710 UnmapOrDie(reinterpret_cast<void *>(beg), size);
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000711 }
712
Kostya Serebryanyf1877cf2012-12-25 09:40:20 +0000713 static bool CanAllocate(uptr size, uptr alignment) {
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000714 return size <= SizeClassMap::kMaxSize &&
715 alignment <= SizeClassMap::kMaxSize;
716 }
717
Evgeniy Stepanov5c48a8c2013-08-02 14:26:58 +0000718 void *GetMetaData(const void *p) {
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000719 CHECK(PointerIsMine(p));
720 uptr mem = reinterpret_cast<uptr>(p);
721 uptr beg = ComputeRegionBeg(mem);
722 uptr size = SizeClassMap::Size(GetSizeClass(p));
723 u32 offset = mem - beg;
724 uptr n = offset / (u32)size; // 32-bit division
725 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
Kostya Serebryany8a41bdc2012-12-06 13:13:58 +0000726 return reinterpret_cast<void*>(meta);
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000727 }
728
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000729 NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000730 uptr class_id) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000731 CHECK_LT(class_id, kNumClasses);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000732 SizeClassInfo *sci = GetSizeClassInfo(class_id);
733 SpinMutexLock l(&sci->mutex);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000734 if (sci->free_list.empty())
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000735 PopulateFreeList(stat, c, sci, class_id);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000736 CHECK(!sci->free_list.empty());
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000737 Batch *b = sci->free_list.front();
738 sci->free_list.pop_front();
739 return b;
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000740 }
741
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000742 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000743 CHECK_LT(class_id, kNumClasses);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000744 SizeClassInfo *sci = GetSizeClassInfo(class_id);
745 SpinMutexLock l(&sci->mutex);
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000746 CHECK_GT(b->count, 0);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000747 sci->free_list.push_front(b);
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000748 }
749
Sergey Matveevcd571e02013-06-06 14:17:56 +0000750 bool PointerIsMine(const void *p) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000751 return GetSizeClass(p) != 0;
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000752 }
753
Sergey Matveevcd571e02013-06-06 14:17:56 +0000754 uptr GetSizeClass(const void *p) {
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000755 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000756 }
757
Sergey Matveevcd571e02013-06-06 14:17:56 +0000758 void *GetBlockBegin(const void *p) {
Kostya Serebryany8a41bdc2012-12-06 13:13:58 +0000759 CHECK(PointerIsMine(p));
760 uptr mem = reinterpret_cast<uptr>(p);
761 uptr beg = ComputeRegionBeg(mem);
762 uptr size = SizeClassMap::Size(GetSizeClass(p));
763 u32 offset = mem - beg;
764 u32 n = offset / (u32)size; // 32-bit division
765 uptr res = beg + (n * (u32)size);
766 return reinterpret_cast<void*>(res);
767 }
768
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000769 uptr GetActuallyAllocatedSize(void *p) {
770 CHECK(PointerIsMine(p));
771 return SizeClassMap::Size(GetSizeClass(p));
772 }
773
Kostya Serebryany674d05c2012-12-06 14:27:32 +0000774 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
775
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000776 uptr TotalMemoryUsed() {
777 // No need to lock here.
778 uptr res = 0;
779 for (uptr i = 0; i < kNumPossibleRegions; i++)
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000780 if (possible_regions[i])
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000781 res += kRegionSize;
782 return res;
783 }
784
785 void TestOnlyUnmap() {
786 for (uptr i = 0; i < kNumPossibleRegions; i++)
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000787 if (possible_regions[i])
Kostya Serebryany214621f2012-12-12 14:32:18 +0000788 UnmapWithCallback((i * kRegionSize), kRegionSize);
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000789 }
790
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000791 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
792 // introspection API.
793 void ForceLock() {
Alexander Potapenkoaeedfd62013-02-07 12:00:40 +0000794 for (uptr i = 0; i < kNumClasses; i++) {
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000795 GetSizeClassInfo(i)->mutex.Lock();
Alexander Potapenko3c5ebf62013-02-07 14:58:04 +0000796 }
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000797 }
798
799 void ForceUnlock() {
800 for (int i = kNumClasses - 1; i >= 0; i--) {
801 GetSizeClassInfo(i)->mutex.Unlock();
Alexander Potapenko3c5ebf62013-02-07 14:58:04 +0000802 }
Alexander Potapenko6a11cc12013-02-07 11:40:03 +0000803 }
804
Sergey Matveevac78d002013-06-24 08:34:50 +0000805 // Iterate over all existing chunks.
806 // The allocator must be locked when calling this function.
807 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
Kostya Serebryany300f9532013-03-15 11:39:41 +0000808 for (uptr region = 0; region < kNumPossibleRegions; region++)
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000809 if (possible_regions[region]) {
810 uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000811 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
812 uptr region_beg = region * kRegionSize;
Sergey Matveevac78d002013-06-24 08:34:50 +0000813 for (uptr chunk = region_beg;
814 chunk < region_beg + max_chunks_in_region * chunk_size;
815 chunk += chunk_size) {
816 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
817 callback(chunk, arg);
Kostya Serebryany300f9532013-03-15 11:39:41 +0000818 }
819 }
820 }
821
Kostya Serebryany4b48f452012-12-27 14:09:19 +0000822 void PrintStats() {
823 }
824
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000825 typedef SizeClassMap SizeClassMapT;
Kostya Serebryany038820f2012-12-24 13:41:07 +0000826 static const uptr kNumClasses = SizeClassMap::kNumClasses;
Kostya Serebryany19313d92012-12-07 09:40:17 +0000827
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000828 private:
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000829 static const uptr kRegionSize = 1 << kRegionSizeLog;
830 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000831
832 struct SizeClassInfo {
833 SpinMutex mutex;
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000834 IntrusiveList<Batch> free_list;
835 char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000836 };
837 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
838
839 uptr ComputeRegionId(uptr mem) {
840 uptr res = mem >> kRegionSizeLog;
841 CHECK_LT(res, kNumPossibleRegions);
842 return res;
843 }
844
845 uptr ComputeRegionBeg(uptr mem) {
846 return mem & ~(kRegionSize - 1);
847 }
848
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000849 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000850 CHECK_LT(class_id, kNumClasses);
851 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
852 "SizeClassAllocator32"));
Kostya Serebryany214621f2012-12-12 14:32:18 +0000853 MapUnmapCallback().OnMap(res, kRegionSize);
Stephen Hines6a211c52014-07-21 00:49:56 -0700854 stat->Add(AllocatorStatMapped, kRegionSize);
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000855 CHECK_EQ(0U, (res & (kRegionSize - 1)));
Timur Iskhodzhanov5e97ba32013-05-29 14:11:44 +0000856 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000857 return res;
858 }
859
860 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
861 CHECK_LT(class_id, kNumClasses);
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000862 return &size_class_info_array[class_id];
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000863 }
864
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000865 void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
866 SizeClassInfo *sci, uptr class_id) {
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000867 uptr size = SizeClassMap::Size(class_id);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000868 uptr reg = AllocateRegion(stat, class_id);
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000869 uptr n_chunks = kRegionSize / (size + kMetadataSize);
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000870 uptr max_count = SizeClassMap::MaxCached(class_id);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000871 Batch *b = 0;
872 for (uptr i = reg; i < reg + n_chunks * size; i += size) {
873 if (b == 0) {
Kostya Serebryany7811b4a2013-03-12 07:01:27 +0000874 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000875 b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
876 else
877 b = (Batch*)i;
878 b->count = 0;
879 }
880 b->batch[b->count++] = (void*)i;
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000881 if (b->count == max_count) {
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000882 CHECK_GT(b->count, 0);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000883 sci->free_list.push_back(b);
884 b = 0;
885 }
886 }
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000887 if (b) {
888 CHECK_GT(b->count, 0);
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000889 sci->free_list.push_back(b);
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000890 }
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000891 }
892
Kostya Serebryany1b54cbf2013-05-20 07:29:21 +0000893 ByteMap possible_regions;
894 SizeClassInfo size_class_info_array[kNumClasses];
Kostya Serebryany45595ba2012-12-06 12:49:28 +0000895};
896
Dmitry Vyukov3bf34d62013-01-10 13:06:35 +0000897// Objects of this type should be used as local caches for SizeClassAllocator64
898// or SizeClassAllocator32. Since the typical use of this class is to have one
899// object per thread in TLS, is has to be POD.
Kostya Serebryany82de9422012-12-04 14:15:17 +0000900template<class SizeClassAllocator>
Kostya Serebryany43811a12012-12-04 07:54:41 +0000901struct SizeClassAllocatorLocalCache {
Kostya Serebryanycd815ad2012-12-04 13:59:22 +0000902 typedef SizeClassAllocator Allocator;
Kostya Serebryany82de9422012-12-04 14:15:17 +0000903 static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000904
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000905 void Init(AllocatorGlobalStats *s) {
906 stats_.Init();
907 if (s)
908 s->Register(&stats_);
909 }
910
911 void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
912 Drain(allocator);
913 if (s)
914 s->Unregister(&stats_);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000915 }
916
917 void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000918 CHECK_NE(class_id, 0UL);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000919 CHECK_LT(class_id, kNumClasses);
Stephen Hines6a211c52014-07-21 00:49:56 -0700920 stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000921 PerClass *c = &per_class_[class_id];
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000922 if (UNLIKELY(c->count == 0))
923 Refill(allocator, class_id);
924 void *res = c->batch[--c->count];
925 PREFETCH(c->batch[c->count - 1]);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000926 return res;
927 }
928
929 void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
Kostya Serebryany038820f2012-12-24 13:41:07 +0000930 CHECK_NE(class_id, 0UL);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000931 CHECK_LT(class_id, kNumClasses);
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000932 // If the first allocator call on a new thread is a deallocation, then
933 // max_count will be zero, leading to check failure.
934 InitCache();
Stephen Hines6a211c52014-07-21 00:49:56 -0700935 stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000936 PerClass *c = &per_class_[class_id];
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000937 CHECK_NE(c->max_count, 0UL);
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000938 if (UNLIKELY(c->count == c->max_count))
939 Drain(allocator, class_id);
940 c->batch[c->count++] = p;
Kostya Serebryany43811a12012-12-04 07:54:41 +0000941 }
942
943 void Drain(SizeClassAllocator *allocator) {
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000944 for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
945 PerClass *c = &per_class_[class_id];
946 while (c->count > 0)
947 Drain(allocator, class_id);
Kostya Serebryany43811a12012-12-04 07:54:41 +0000948 }
949 }
950
951 // private:
952 typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000953 typedef typename SizeClassMap::TransferBatch Batch;
954 struct PerClass {
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000955 uptr count;
956 uptr max_count;
957 void *batch[2 * SizeClassMap::kMaxNumCached];
Dmitry Vyukova343ca02013-01-11 16:41:19 +0000958 };
959 PerClass per_class_[kNumClasses];
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000960 AllocatorStats stats_;
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000961
962 void InitCache() {
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000963 if (per_class_[1].max_count)
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000964 return;
965 for (uptr i = 0; i < kNumClasses; i++) {
966 PerClass *c = &per_class_[i];
967 c->max_count = 2 * SizeClassMap::MaxCached(i);
968 }
969 }
970
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000971 NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000972 InitCache();
973 PerClass *c = &per_class_[class_id];
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000974 Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
975 CHECK_GT(b->count, 0);
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000976 for (uptr i = 0; i < b->count; i++)
977 c->batch[i] = b->batch[i];
978 c->count = b->count;
Kostya Serebryany7811b4a2013-03-12 07:01:27 +0000979 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000980 Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
981 }
982
Timur Iskhodzhanov53becd82013-01-28 17:29:50 +0000983 NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000984 InitCache();
985 PerClass *c = &per_class_[class_id];
986 Batch *b;
Kostya Serebryany7811b4a2013-03-12 07:01:27 +0000987 if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Dmitry Vyukovce8674f2013-01-15 09:02:20 +0000988 b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
989 else
990 b = (Batch*)c->batch[0];
991 uptr cnt = Min(c->max_count / 2, c->count);
992 for (uptr i = 0; i < cnt; i++) {
993 b->batch[i] = c->batch[i];
994 c->batch[i] = c->batch[i + c->max_count / 2];
995 }
996 b->count = cnt;
997 c->count -= cnt;
Reid Kleckner5d3dcb82013-03-06 14:54:08 +0000998 CHECK_GT(b->count, 0);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000999 allocator->DeallocateBatch(&stats_, class_id, b);
Dmitry Vyukovce8674f2013-01-15 09:02:20 +00001000 }
Kostya Serebryany43811a12012-12-04 07:54:41 +00001001};
1002
1003// This class can (de)allocate only large chunks of memory using mmap/unmap.
1004// The main purpose of this allocator is to cover large and rare allocation
1005// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
Kostya Serebryany214621f2012-12-12 14:32:18 +00001006template <class MapUnmapCallback = NoOpMapUnmapCallback>
Kostya Serebryany43811a12012-12-04 07:54:41 +00001007class LargeMmapAllocator {
1008 public:
Stephen Hines86277eb2015-03-23 12:06:32 -07001009 void InitLinkerInitialized(bool may_return_null) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001010 page_size_ = GetPageSizeCached();
Stephen Hines86277eb2015-03-23 12:06:32 -07001011 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
1012 }
1013
1014 void Init(bool may_return_null) {
1015 internal_memset(this, 0, sizeof(*this));
1016 InitLinkerInitialized(may_return_null);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001017 }
Dmitry Vyukovce173842013-01-11 11:15:48 +00001018
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001019 void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001020 CHECK(IsPowerOfTwo(alignment));
1021 uptr map_size = RoundUpMapSize(size);
1022 if (alignment > page_size_)
1023 map_size += alignment;
Stephen Hines86277eb2015-03-23 12:06:32 -07001024 // Overflow.
1025 if (map_size < size)
1026 return ReturnNullOrDie();
Kostya Serebryany43811a12012-12-04 07:54:41 +00001027 uptr map_beg = reinterpret_cast<uptr>(
1028 MmapOrDie(map_size, "LargeMmapAllocator"));
Stephen Hines6d186232014-11-26 17:56:19 -08001029 CHECK(IsAligned(map_beg, page_size_));
Kostya Serebryany214621f2012-12-12 14:32:18 +00001030 MapUnmapCallback().OnMap(map_beg, map_size);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001031 uptr map_end = map_beg + map_size;
1032 uptr res = map_beg + page_size_;
1033 if (res & (alignment - 1)) // Align.
1034 res += alignment - (res & (alignment - 1));
Stephen Hines6d186232014-11-26 17:56:19 -08001035 CHECK(IsAligned(res, alignment));
1036 CHECK(IsAligned(res, page_size_));
1037 CHECK_GE(res + size, map_beg);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001038 CHECK_LE(res + size, map_end);
1039 Header *h = GetHeader(res);
1040 h->size = size;
1041 h->map_beg = map_beg;
1042 h->map_size = map_size;
Timur Iskhodzhanov2b10d392013-02-08 12:02:00 +00001043 uptr size_log = MostSignificantSetBitIndex(map_size);
Kostya Serebryany800344c2013-01-10 13:38:38 +00001044 CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
Kostya Serebryany43811a12012-12-04 07:54:41 +00001045 {
1046 SpinMutexLock l(&mutex_);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001047 uptr idx = n_chunks_++;
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +00001048 chunks_sorted_ = false;
Kostya Serebryanybb5d0572012-12-25 07:50:35 +00001049 CHECK_LT(idx, kMaxNumChunks);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001050 h->chunk_idx = idx;
1051 chunks_[idx] = h;
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001052 stats.n_allocs++;
1053 stats.currently_allocated += map_size;
1054 stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
Kostya Serebryany800344c2013-01-10 13:38:38 +00001055 stats.by_size_log[size_log]++;
Stephen Hines6a211c52014-07-21 00:49:56 -07001056 stat->Add(AllocatorStatAllocated, map_size);
1057 stat->Add(AllocatorStatMapped, map_size);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001058 }
1059 return reinterpret_cast<void*>(res);
1060 }
1061
Stephen Hines86277eb2015-03-23 12:06:32 -07001062 void *ReturnNullOrDie() {
1063 if (atomic_load(&may_return_null_, memory_order_acquire))
1064 return 0;
1065 ReportAllocatorCannotReturnNull();
1066 }
1067
1068 void SetMayReturnNull(bool may_return_null) {
1069 atomic_store(&may_return_null_, may_return_null, memory_order_release);
1070 }
1071
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001072 void Deallocate(AllocatorStats *stat, void *p) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001073 Header *h = GetHeader(p);
1074 {
1075 SpinMutexLock l(&mutex_);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001076 uptr idx = h->chunk_idx;
1077 CHECK_EQ(chunks_[idx], h);
1078 CHECK_LT(idx, n_chunks_);
1079 chunks_[idx] = chunks_[n_chunks_ - 1];
1080 chunks_[idx]->chunk_idx = idx;
1081 n_chunks_--;
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +00001082 chunks_sorted_ = false;
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001083 stats.n_frees++;
1084 stats.currently_allocated -= h->map_size;
Stephen Hines6a211c52014-07-21 00:49:56 -07001085 stat->Sub(AllocatorStatAllocated, h->map_size);
1086 stat->Sub(AllocatorStatMapped, h->map_size);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001087 }
Kostya Serebryany214621f2012-12-12 14:32:18 +00001088 MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001089 UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
1090 }
1091
1092 uptr TotalMemoryUsed() {
1093 SpinMutexLock l(&mutex_);
1094 uptr res = 0;
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001095 for (uptr i = 0; i < n_chunks_; i++) {
1096 Header *h = chunks_[i];
1097 CHECK_EQ(h->chunk_idx, i);
1098 res += RoundUpMapSize(h->size);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001099 }
1100 return res;
1101 }
1102
Sergey Matveevcd571e02013-06-06 14:17:56 +00001103 bool PointerIsMine(const void *p) {
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +00001104 return GetBlockBegin(p) != 0;
Kostya Serebryany43811a12012-12-04 07:54:41 +00001105 }
1106
1107 uptr GetActuallyAllocatedSize(void *p) {
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +00001108 return RoundUpTo(GetHeader(p)->size, page_size_);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001109 }
1110
1111 // At least page_size_/2 metadata bytes is available.
Evgeniy Stepanov5c48a8c2013-08-02 14:26:58 +00001112 void *GetMetaData(const void *p) {
Kostya Serebryanybb5d0572012-12-25 07:50:35 +00001113 // Too slow: CHECK_EQ(p, GetBlockBegin(p));
Dmitry Vyukovcde3bae2013-08-08 13:47:50 +00001114 if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
1115 Printf("%s: bad pointer %p\n", SanitizerToolName, p);
1116 CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
1117 }
Kostya Serebryany43811a12012-12-04 07:54:41 +00001118 return GetHeader(p) + 1;
1119 }
1120
Sergey Matveevcd571e02013-06-06 14:17:56 +00001121 void *GetBlockBegin(const void *ptr) {
Kostya Serebryanyda1f82b2012-12-18 14:56:38 +00001122 uptr p = reinterpret_cast<uptr>(ptr);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001123 SpinMutexLock l(&mutex_);
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001124 uptr nearest_chunk = 0;
1125 // Cache-friendly linear search.
1126 for (uptr i = 0; i < n_chunks_; i++) {
1127 uptr ch = reinterpret_cast<uptr>(chunks_[i]);
1128 if (p < ch) continue; // p is at left to this chunk, skip it.
1129 if (p - ch < p - nearest_chunk)
1130 nearest_chunk = ch;
Kostya Serebryany43811a12012-12-04 07:54:41 +00001131 }
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001132 if (!nearest_chunk)
1133 return 0;
1134 Header *h = reinterpret_cast<Header *>(nearest_chunk);
1135 CHECK_GE(nearest_chunk, h->map_beg);
1136 CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
1137 CHECK_LE(nearest_chunk, p);
Kostya Serebryanya2c1d982013-04-08 08:43:22 +00001138 if (h->map_beg + h->map_size <= p)
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001139 return 0;
1140 return GetUser(h);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001141 }
1142
Sergey Matveevba2169a2013-05-31 11:13:45 +00001143 // This function does the same as GetBlockBegin, but is much faster.
1144 // Must be called with the allocator locked.
1145 void *GetBlockBeginFastLocked(void *ptr) {
Peter Collingbourne8f0c5bd2013-10-25 23:03:21 +00001146 mutex_.CheckLocked();
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +00001147 uptr p = reinterpret_cast<uptr>(ptr);
1148 uptr n = n_chunks_;
1149 if (!n) return 0;
1150 if (!chunks_sorted_) {
1151 // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
1152 SortArray(reinterpret_cast<uptr*>(chunks_), n);
1153 for (uptr i = 0; i < n; i++)
1154 chunks_[i]->chunk_idx = i;
1155 chunks_sorted_ = true;
1156 min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
1157 max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
1158 chunks_[n - 1]->map_size;
1159 }
1160 if (p < min_mmap_ || p >= max_mmap_)
1161 return 0;
1162 uptr beg = 0, end = n - 1;
1163 // This loop is a log(n) lower_bound. It does not check for the exact match
1164 // to avoid expensive cache-thrashing loads.
1165 while (end - beg >= 2) {
1166 uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
1167 if (p < reinterpret_cast<uptr>(chunks_[mid]))
1168 end = mid - 1; // We are not interested in chunks_[mid].
1169 else
1170 beg = mid; // chunks_[mid] may still be what we want.
1171 }
1172
1173 if (beg < end) {
1174 CHECK_EQ(beg + 1, end);
1175 // There are 2 chunks left, choose one.
1176 if (p >= reinterpret_cast<uptr>(chunks_[end]))
1177 beg = end;
1178 }
1179
1180 Header *h = chunks_[beg];
1181 if (h->map_beg + h->map_size <= p || p < h->map_beg)
1182 return 0;
1183 return GetUser(h);
1184 }
1185
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001186 void PrintStats() {
1187 Printf("Stats: LargeMmapAllocator: allocated %zd times, "
Kostya Serebryany800344c2013-01-10 13:38:38 +00001188 "remains %zd (%zd K) max %zd M; by size logs: ",
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001189 stats.n_allocs, stats.n_allocs - stats.n_frees,
1190 stats.currently_allocated >> 10, stats.max_allocated >> 20);
Kostya Serebryany800344c2013-01-10 13:38:38 +00001191 for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
1192 uptr c = stats.by_size_log[i];
1193 if (!c) continue;
1194 Printf("%zd:%zd; ", i, c);
1195 }
1196 Printf("\n");
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001197 }
1198
Alexander Potapenko6a11cc12013-02-07 11:40:03 +00001199 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1200 // introspection API.
1201 void ForceLock() {
1202 mutex_.Lock();
1203 }
1204
1205 void ForceUnlock() {
1206 mutex_.Unlock();
1207 }
1208
Sergey Matveevac78d002013-06-24 08:34:50 +00001209 // Iterate over all existing chunks.
1210 // The allocator must be locked when calling this function.
1211 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
Kostya Serebryany300f9532013-03-15 11:39:41 +00001212 for (uptr i = 0; i < n_chunks_; i++)
Sergey Matveevac78d002013-06-24 08:34:50 +00001213 callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
Kostya Serebryany300f9532013-03-15 11:39:41 +00001214 }
1215
Kostya Serebryany43811a12012-12-04 07:54:41 +00001216 private:
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001217 static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001218 struct Header {
1219 uptr map_beg;
1220 uptr map_size;
1221 uptr size;
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001222 uptr chunk_idx;
Kostya Serebryany43811a12012-12-04 07:54:41 +00001223 };
1224
1225 Header *GetHeader(uptr p) {
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +00001226 CHECK(IsAligned(p, page_size_));
Kostya Serebryany43811a12012-12-04 07:54:41 +00001227 return reinterpret_cast<Header*>(p - page_size_);
1228 }
Alexey Samsonov53fbbf42013-08-05 13:20:39 +00001229 Header *GetHeader(const void *p) {
1230 return GetHeader(reinterpret_cast<uptr>(p));
1231 }
Kostya Serebryany43811a12012-12-04 07:54:41 +00001232
1233 void *GetUser(Header *h) {
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +00001234 CHECK(IsAligned((uptr)h, page_size_));
Kostya Serebryany43811a12012-12-04 07:54:41 +00001235 return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
1236 }
1237
1238 uptr RoundUpMapSize(uptr size) {
1239 return RoundUpTo(size, page_size_) + page_size_;
1240 }
1241
1242 uptr page_size_;
Kostya Serebryanyb8c363d2012-12-24 14:35:14 +00001243 Header *chunks_[kMaxNumChunks];
1244 uptr n_chunks_;
Kostya Serebryanyf8c3f3d2013-05-30 08:43:30 +00001245 uptr min_mmap_, max_mmap_;
1246 bool chunks_sorted_;
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001247 struct Stats {
Kostya Serebryany800344c2013-01-10 13:38:38 +00001248 uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001249 } stats;
Stephen Hines86277eb2015-03-23 12:06:32 -07001250 atomic_uint8_t may_return_null_;
Kostya Serebryany43811a12012-12-04 07:54:41 +00001251 SpinMutex mutex_;
1252};
1253
1254// This class implements a complete memory allocator by using two
1255// internal allocators:
1256// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
1257// When allocating 2^x bytes it should return 2^x aligned chunk.
1258// PrimaryAllocator is used via a local AllocatorCache.
1259// SecondaryAllocator can allocate anything, but is not efficient.
1260template <class PrimaryAllocator, class AllocatorCache,
1261 class SecondaryAllocator> // NOLINT
1262class CombinedAllocator {
1263 public:
Stephen Hines86277eb2015-03-23 12:06:32 -07001264 void InitCommon(bool may_return_null) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001265 primary_.Init();
Stephen Hines86277eb2015-03-23 12:06:32 -07001266 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
1267 }
1268
1269 void InitLinkerInitialized(bool may_return_null) {
1270 secondary_.InitLinkerInitialized(may_return_null);
1271 stats_.InitLinkerInitialized();
1272 InitCommon(may_return_null);
1273 }
1274
1275 void Init(bool may_return_null) {
1276 secondary_.Init(may_return_null);
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001277 stats_.Init();
Stephen Hines86277eb2015-03-23 12:06:32 -07001278 InitCommon(may_return_null);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001279 }
1280
1281 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
Stephen Hines86277eb2015-03-23 12:06:32 -07001282 bool cleared = false, bool check_rss_limit = false) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001283 // Returning 0 on malloc(0) may break a lot of code.
1284 if (size == 0)
1285 size = 1;
1286 if (size + alignment < size)
Stephen Hines86277eb2015-03-23 12:06:32 -07001287 return ReturnNullOrDie();
1288 if (check_rss_limit && RssLimitIsExceeded())
1289 return ReturnNullOrDie();
Kostya Serebryany43811a12012-12-04 07:54:41 +00001290 if (alignment > 8)
1291 size = RoundUpTo(size, alignment);
1292 void *res;
Stephen Hines2d1fdb22014-05-28 23:58:16 -07001293 bool from_primary = primary_.CanAllocate(size, alignment);
1294 if (from_primary)
Dmitry Vyukovce173842013-01-11 11:15:48 +00001295 res = cache->Allocate(&primary_, primary_.ClassID(size));
1296 else
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001297 res = secondary_.Allocate(&stats_, size, alignment);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001298 if (alignment > 8)
1299 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
Stephen Hines2d1fdb22014-05-28 23:58:16 -07001300 if (cleared && res && from_primary)
1301 internal_bzero_aligned16(res, RoundUpTo(size, 16));
Kostya Serebryany43811a12012-12-04 07:54:41 +00001302 return res;
1303 }
1304
Stephen Hines86277eb2015-03-23 12:06:32 -07001305 bool MayReturnNull() const {
1306 return atomic_load(&may_return_null_, memory_order_acquire);
1307 }
1308
1309 void *ReturnNullOrDie() {
1310 if (MayReturnNull())
1311 return 0;
1312 ReportAllocatorCannotReturnNull();
1313 }
1314
1315 void SetMayReturnNull(bool may_return_null) {
1316 secondary_.SetMayReturnNull(may_return_null);
1317 atomic_store(&may_return_null_, may_return_null, memory_order_release);
1318 }
1319
1320 bool RssLimitIsExceeded() {
1321 return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
1322 }
1323
1324 void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
1325 atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
1326 memory_order_release);
1327 }
1328
Kostya Serebryany43811a12012-12-04 07:54:41 +00001329 void Deallocate(AllocatorCache *cache, void *p) {
1330 if (!p) return;
1331 if (primary_.PointerIsMine(p))
1332 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
1333 else
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001334 secondary_.Deallocate(&stats_, p);
Kostya Serebryany43811a12012-12-04 07:54:41 +00001335 }
1336
1337 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
1338 uptr alignment) {
1339 if (!p)
1340 return Allocate(cache, new_size, alignment);
1341 if (!new_size) {
1342 Deallocate(cache, p);
1343 return 0;
1344 }
1345 CHECK(PointerIsMine(p));
1346 uptr old_size = GetActuallyAllocatedSize(p);
1347 uptr memcpy_size = Min(new_size, old_size);
1348 void *new_p = Allocate(cache, new_size, alignment);
1349 if (new_p)
1350 internal_memcpy(new_p, p, memcpy_size);
1351 Deallocate(cache, p);
1352 return new_p;
1353 }
1354
1355 bool PointerIsMine(void *p) {
1356 if (primary_.PointerIsMine(p))
1357 return true;
1358 return secondary_.PointerIsMine(p);
1359 }
1360
Kostya Serebryany111a0712012-12-26 04:52:07 +00001361 bool FromPrimary(void *p) {
1362 return primary_.PointerIsMine(p);
1363 }
1364
Evgeniy Stepanov5c48a8c2013-08-02 14:26:58 +00001365 void *GetMetaData(const void *p) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001366 if (primary_.PointerIsMine(p))
1367 return primary_.GetMetaData(p);
1368 return secondary_.GetMetaData(p);
1369 }
1370
Sergey Matveevcd571e02013-06-06 14:17:56 +00001371 void *GetBlockBegin(const void *p) {
Kostya Serebryany43811a12012-12-04 07:54:41 +00001372 if (primary_.PointerIsMine(p))
1373 return primary_.GetBlockBegin(p);
1374 return secondary_.GetBlockBegin(p);
1375 }
1376
Sergey Matveevba2169a2013-05-31 11:13:45 +00001377 // This function does the same as GetBlockBegin, but is much faster.
1378 // Must be called with the allocator locked.
1379 void *GetBlockBeginFastLocked(void *p) {
1380 if (primary_.PointerIsMine(p))
1381 return primary_.GetBlockBegin(p);
1382 return secondary_.GetBlockBeginFastLocked(p);
1383 }
1384
Kostya Serebryany43811a12012-12-04 07:54:41 +00001385 uptr GetActuallyAllocatedSize(void *p) {
1386 if (primary_.PointerIsMine(p))
1387 return primary_.GetActuallyAllocatedSize(p);
1388 return secondary_.GetActuallyAllocatedSize(p);
1389 }
1390
1391 uptr TotalMemoryUsed() {
1392 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
1393 }
1394
1395 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
1396
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001397 void InitCache(AllocatorCache *cache) {
1398 cache->Init(&stats_);
1399 }
1400
1401 void DestroyCache(AllocatorCache *cache) {
1402 cache->Destroy(&primary_, &stats_);
1403 }
1404
Kostya Serebryany43811a12012-12-04 07:54:41 +00001405 void SwallowCache(AllocatorCache *cache) {
1406 cache->Drain(&primary_);
1407 }
1408
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001409 void GetStats(AllocatorStatCounters s) const {
1410 stats_.Get(s);
1411 }
1412
Kostya Serebryany4b48f452012-12-27 14:09:19 +00001413 void PrintStats() {
1414 primary_.PrintStats();
1415 secondary_.PrintStats();
1416 }
1417
Alexander Potapenko6a11cc12013-02-07 11:40:03 +00001418 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
1419 // introspection API.
1420 void ForceLock() {
1421 primary_.ForceLock();
1422 secondary_.ForceLock();
1423 }
1424
1425 void ForceUnlock() {
1426 secondary_.ForceUnlock();
1427 primary_.ForceUnlock();
1428 }
1429
Sergey Matveevac78d002013-06-24 08:34:50 +00001430 // Iterate over all existing chunks.
1431 // The allocator must be locked when calling this function.
1432 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
1433 primary_.ForEachChunk(callback, arg);
1434 secondary_.ForEachChunk(callback, arg);
Kostya Serebryany300f9532013-03-15 11:39:41 +00001435 }
1436
Kostya Serebryany43811a12012-12-04 07:54:41 +00001437 private:
1438 PrimaryAllocator primary_;
1439 SecondaryAllocator secondary_;
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +00001440 AllocatorGlobalStats stats_;
Stephen Hines86277eb2015-03-23 12:06:32 -07001441 atomic_uint8_t may_return_null_;
1442 atomic_uint8_t rss_limit_is_exceeded_;
Kostya Serebryany43811a12012-12-04 07:54:41 +00001443};
1444
Kostya Serebryany65199f12013-01-25 11:46:22 +00001445// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
1446bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
1447
Kostya Serebryany43811a12012-12-04 07:54:41 +00001448} // namespace __sanitizer
1449
1450#endif // SANITIZER_ALLOCATOR_H
1451