blob: 059680b0613352a681e65c43f8ad520b4eada56d [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- primary32.h ---------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_PRIMARY32_H_
10#define SCUDO_PRIMARY32_H_
11
12#include "bytemap.h"
13#include "common.h"
14#include "list.h"
15#include "local_cache.h"
Peter Collingbourne3cf60372020-09-24 17:01:24 -070016#include "options.h"
Dynamic Tools Team517193e2019-09-11 14:48:41 +000017#include "release.h"
18#include "report.h"
19#include "stats.h"
20#include "string_utils.h"
21
22namespace scudo {
23
24// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
25//
26// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
27// boundary, and keeps a bytemap of the mappable address space to track the size
28// class they are associated with.
29//
30// Mapped regions are split into equally sized Blocks according to the size
31// class they belong to, and the associated pointers are shuffled to prevent any
32// predictable address pattern (the predictability increases with the block
33// size).
34//
35// Regions for size class 0 are special and used to hold TransferBatches, which
36// allow to transfer arrays of pointers from the global size class freelist to
37// the thread specific freelist for said class, and back.
38//
39// Memory used by this allocator is never unmapped but can be partially
40// reclaimed if the platform allows for it.
41
Peter Collingbourne6be49192020-12-15 14:26:10 -080042template <typename Config> class SizeClassAllocator32 {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000043public:
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080044 typedef typename Config::PrimaryCompactPtrT CompactPtrT;
Peter Collingbourne6be49192020-12-15 14:26:10 -080045 typedef typename Config::SizeClassMap SizeClassMap;
Dynamic Tools Teamac403052020-02-06 15:46:05 -080046 // The bytemap can only track UINT8_MAX - 1 classes.
47 static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
Dynamic Tools Team517193e2019-09-11 14:48:41 +000048 // Regions should be large enough to hold the largest Block.
Peter Collingbourne6be49192020-12-15 14:26:10 -080049 static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
50 "");
51 typedef SizeClassAllocator32<Config> ThisT;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000052 typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
53 typedef typename CacheT::TransferBatch TransferBatch;
54
55 static uptr getSizeByClassId(uptr ClassId) {
56 return (ClassId == SizeClassMap::BatchClassId)
57 ? sizeof(TransferBatch)
58 : SizeClassMap::getSizeByClassId(ClassId);
59 }
60
61 static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
62
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070063 void init(s32 ReleaseToOsInterval) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000064 if (SCUDO_FUCHSIA)
65 reportError("SizeClassAllocator32 is not supported on Fuchsia");
66
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070067 PossibleRegions.init();
Dynamic Tools Team517193e2019-09-11 14:48:41 +000068
69 u32 Seed;
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -080070 const u64 Time = getMonotonicTime();
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080071 if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -080072 Seed = static_cast<u32>(
73 Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
Dynamic Tools Team517193e2019-09-11 14:48:41 +000074 for (uptr I = 0; I < NumClasses; I++) {
75 SizeClassInfo *Sci = getSizeClassInfo(I);
76 Sci->RandState = getRandomU32(&Seed);
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -080077 // Sci->MaxRegionIndex is already initialized to 0.
78 Sci->MinRegionIndex = NumRegions;
Kostya Kortchinsky156d47c2020-12-11 14:04:47 -080079 Sci->ReleaseInfo.LastReleaseAtNs = Time;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000080 }
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070081 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
Dynamic Tools Team517193e2019-09-11 14:48:41 +000082 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +000083
84 void unmapTestOnly() {
85 while (NumberOfStashedRegions > 0)
86 unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
87 RegionSize);
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -080088 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
89 for (uptr I = 0; I < NumClasses; I++) {
90 SizeClassInfo *Sci = getSizeClassInfo(I);
91 if (Sci->MinRegionIndex < MinRegionIndex)
92 MinRegionIndex = Sci->MinRegionIndex;
93 if (Sci->MaxRegionIndex > MaxRegionIndex)
94 MaxRegionIndex = Sci->MaxRegionIndex;
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070095 *Sci = {};
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -080096 }
97 for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
Dynamic Tools Team517193e2019-09-11 14:48:41 +000098 if (PossibleRegions[I])
99 unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
100 PossibleRegions.unmapTestOnly();
101 }
102
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800103 CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
104 return static_cast<CompactPtrT>(Ptr);
105 }
106
107 void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
108 return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
109 }
110
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000111 TransferBatch *popBatch(CacheT *C, uptr ClassId) {
112 DCHECK_LT(ClassId, NumClasses);
113 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
114 ScopedLock L(Sci->Mutex);
115 TransferBatch *B = Sci->FreeList.front();
116 if (B) {
117 Sci->FreeList.pop_front();
118 } else {
119 B = populateFreeList(C, ClassId, Sci);
120 if (UNLIKELY(!B))
121 return nullptr;
122 }
123 DCHECK_GT(B->getCount(), 0);
124 Sci->Stats.PoppedBlocks += B->getCount();
125 return B;
126 }
127
128 void pushBatch(uptr ClassId, TransferBatch *B) {
129 DCHECK_LT(ClassId, NumClasses);
130 DCHECK_GT(B->getCount(), 0);
131 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
132 ScopedLock L(Sci->Mutex);
133 Sci->FreeList.push_front(B);
134 Sci->Stats.PushedBlocks += B->getCount();
Kostya Kortchinsky156d47c2020-12-11 14:04:47 -0800135 if (ClassId != SizeClassMap::BatchClassId)
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000136 releaseToOSMaybe(Sci, ClassId);
137 }
138
139 void disable() {
Dynamic Tools Team83eaa512020-01-09 11:43:16 -0800140 // The BatchClassId must be locked last since other classes can use it.
141 for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
142 if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
143 continue;
144 getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
145 }
146 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
147 RegionsStashMutex.lock();
148 PossibleRegions.disable();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000149 }
150
151 void enable() {
Dynamic Tools Team83eaa512020-01-09 11:43:16 -0800152 PossibleRegions.enable();
153 RegionsStashMutex.unlock();
154 getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
155 for (uptr I = 0; I < NumClasses; I++) {
156 if (I == SizeClassMap::BatchClassId)
157 continue;
158 getSizeClassInfo(I)->Mutex.unlock();
159 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000160 }
161
162 template <typename F> void iterateOverBlocks(F Callback) {
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800163 uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
164 for (uptr I = 0; I < NumClasses; I++) {
165 SizeClassInfo *Sci = getSizeClassInfo(I);
166 if (Sci->MinRegionIndex < MinRegionIndex)
167 MinRegionIndex = Sci->MinRegionIndex;
168 if (Sci->MaxRegionIndex > MaxRegionIndex)
169 MaxRegionIndex = Sci->MaxRegionIndex;
170 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000171 for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
Dynamic Tools Teamac403052020-02-06 15:46:05 -0800172 if (PossibleRegions[I] &&
173 (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
174 const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000175 const uptr From = I * RegionSize;
176 const uptr To = From + (RegionSize / BlockSize) * BlockSize;
177 for (uptr Block = From; Block < To; Block += BlockSize)
178 Callback(Block);
179 }
180 }
181
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000182 void getStats(ScopedString *Str) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000183 // TODO(kostyak): get the RSS per region.
184 uptr TotalMapped = 0;
185 uptr PoppedBlocks = 0;
186 uptr PushedBlocks = 0;
187 for (uptr I = 0; I < NumClasses; I++) {
188 SizeClassInfo *Sci = getSizeClassInfo(I);
189 TotalMapped += Sci->AllocatedUser;
190 PoppedBlocks += Sci->Stats.PoppedBlocks;
191 PushedBlocks += Sci->Stats.PushedBlocks;
192 }
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000193 Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
194 "remains %zu\n",
195 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000196 for (uptr I = 0; I < NumClasses; I++)
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000197 getStats(Str, I, 0);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000198 }
199
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700200 bool setOption(Option O, sptr Value) {
201 if (O == Option::ReleaseInterval) {
Peter Collingbourne6be49192020-12-15 14:26:10 -0800202 const s32 Interval = Max(
203 Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
204 Config::PrimaryMinReleaseToOsIntervalMs);
Kostya Kortchinskya51a8922020-11-02 14:27:11 -0800205 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700206 return true;
Dynamic Tools Team26387462020-02-14 12:24:03 -0800207 }
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700208 // Not supported by the Primary, but not an error either.
209 return true;
Dynamic Tools Team26387462020-02-14 12:24:03 -0800210 }
211
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000212 uptr releaseToOS() {
213 uptr TotalReleasedBytes = 0;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000214 for (uptr I = 0; I < NumClasses; I++) {
Kostya Kortchinsky156d47c2020-12-11 14:04:47 -0800215 if (I == SizeClassMap::BatchClassId)
216 continue;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000217 SizeClassInfo *Sci = getSizeClassInfo(I);
218 ScopedLock L(Sci->Mutex);
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000219 TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000220 }
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000221 return TotalReleasedBytes;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000222 }
223
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700224 const char *getRegionInfoArrayAddress() const { return nullptr; }
225 static uptr getRegionInfoArraySize() { return 0; }
226
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800227 static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
228 UNUSED uptr Ptr) {
Dynamic Tools Team0d7d2ae2020-04-21 15:30:50 -0700229 return {};
230 }
231
Peter Collingbourne3cf60372020-09-24 17:01:24 -0700232 AtomicOptions Options;
233
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000234private:
235 static const uptr NumClasses = SizeClassMap::NumClasses;
Peter Collingbourne6be49192020-12-15 14:26:10 -0800236 static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
237 static const uptr NumRegions =
238 SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800239 static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000240 typedef FlatByteMap<NumRegions> ByteMap;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000241
242 struct SizeClassStats {
243 uptr PoppedBlocks;
244 uptr PushedBlocks;
245 };
246
247 struct ReleaseToOsInfo {
248 uptr PushedBlocksAtLastRelease;
249 uptr RangesReleased;
250 uptr LastReleasedBytes;
251 u64 LastReleaseAtNs;
252 };
253
Dynamic Tools Team08b690a2020-04-10 13:41:12 -0700254 struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000255 HybridMutex Mutex;
Dynamic Tools Team35997702019-10-28 15:06:10 -0700256 SinglyLinkedList<TransferBatch> FreeList;
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800257 uptr CurrentRegion;
258 uptr CurrentRegionAllocated;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000259 SizeClassStats Stats;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000260 u32 RandState;
261 uptr AllocatedUser;
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800262 // Lowest & highest region index allocated for this size class, to avoid
263 // looping through the whole NumRegions.
264 uptr MinRegionIndex;
265 uptr MaxRegionIndex;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000266 ReleaseToOsInfo ReleaseInfo;
267 };
Dynamic Tools Team09e6d482019-11-26 18:18:14 -0800268 static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000269
270 uptr computeRegionId(uptr Mem) {
Peter Collingbourne6be49192020-12-15 14:26:10 -0800271 const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000272 CHECK_LT(Id, NumRegions);
273 return Id;
274 }
275
276 uptr allocateRegionSlow() {
277 uptr MapSize = 2 * RegionSize;
278 const uptr MapBase = reinterpret_cast<uptr>(
279 map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800280 if (!MapBase)
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000281 return 0;
282 const uptr MapEnd = MapBase + MapSize;
283 uptr Region = MapBase;
284 if (isAligned(Region, RegionSize)) {
285 ScopedLock L(RegionsStashMutex);
286 if (NumberOfStashedRegions < MaxStashedRegions)
287 RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
288 else
289 MapSize = RegionSize;
290 } else {
291 Region = roundUpTo(MapBase, RegionSize);
292 unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
293 MapSize = RegionSize;
294 }
295 const uptr End = Region + MapSize;
296 if (End != MapEnd)
297 unmap(reinterpret_cast<void *>(End), MapEnd - End);
298 return Region;
299 }
300
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800301 uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000302 DCHECK_LT(ClassId, NumClasses);
303 uptr Region = 0;
304 {
305 ScopedLock L(RegionsStashMutex);
306 if (NumberOfStashedRegions > 0)
307 Region = RegionsStash[--NumberOfStashedRegions];
308 }
309 if (!Region)
310 Region = allocateRegionSlow();
311 if (LIKELY(Region)) {
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800312 // Sci->Mutex is held by the caller, updating the Min/Max is safe.
Dynamic Tools Teamac403052020-02-06 15:46:05 -0800313 const uptr RegionIndex = computeRegionId(Region);
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800314 if (RegionIndex < Sci->MinRegionIndex)
315 Sci->MinRegionIndex = RegionIndex;
316 if (RegionIndex > Sci->MaxRegionIndex)
317 Sci->MaxRegionIndex = RegionIndex;
Dynamic Tools Teamac403052020-02-06 15:46:05 -0800318 PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000319 }
320 return Region;
321 }
322
323 SizeClassInfo *getSizeClassInfo(uptr ClassId) {
324 DCHECK_LT(ClassId, NumClasses);
325 return &SizeClassInfoArray[ClassId];
326 }
327
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000328 NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
329 SizeClassInfo *Sci) {
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800330 uptr Region;
331 uptr Offset;
332 // If the size-class currently has a region associated to it, use it. The
333 // newly created blocks will be located after the currently allocated memory
334 // for that region (up to RegionSize). Otherwise, create a new region, where
335 // the new blocks will be carved from the beginning.
336 if (Sci->CurrentRegion) {
337 Region = Sci->CurrentRegion;
338 DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
339 Offset = Sci->CurrentRegionAllocated;
340 } else {
341 DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800342 Region = allocateRegion(Sci, ClassId);
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800343 if (UNLIKELY(!Region))
344 return nullptr;
345 C->getStats().add(StatMapped, RegionSize);
346 Sci->CurrentRegion = Region;
347 Offset = 0;
348 }
349
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000350 const uptr Size = getSizeByClassId(ClassId);
351 const u32 MaxCount = TransferBatch::getMaxCached(Size);
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800352 DCHECK_GT(MaxCount, 0U);
353 // The maximum number of blocks we should carve in the region is dictated
354 // by the maximum number of batches we want to fill, and the amount of
355 // memory left in the current region (we use the lowest of the two). This
356 // will not be 0 as we ensure that a region can at least hold one block (via
357 // static_assert and at the end of this function).
358 const u32 NumberOfBlocks =
359 Min(MaxNumBatches * MaxCount,
360 static_cast<u32>((RegionSize - Offset) / Size));
361 DCHECK_GT(NumberOfBlocks, 0U);
362
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800363 constexpr u32 ShuffleArraySize =
364 MaxNumBatches * TransferBatch::MaxNumCached;
365 // Fill the transfer batches and put them in the size-class freelist. We
366 // need to randomize the blocks for security purposes, so we first fill a
367 // local array that we then shuffle before populating the batches.
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800368 CompactPtrT ShuffleArray[ShuffleArraySize];
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800369 DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
370
371 uptr P = Region + Offset;
372 for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800373 ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800374 // No need to shuffle the batches size class.
375 if (ClassId != SizeClassMap::BatchClassId)
376 shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState);
377 for (u32 I = 0; I < NumberOfBlocks;) {
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800378 TransferBatch *B =
379 C->createBatch(ClassId, reinterpret_cast<void *>(ShuffleArray[I]));
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800380 if (UNLIKELY(!B))
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000381 return nullptr;
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800382 const u32 N = Min(MaxCount, NumberOfBlocks - I);
383 B->setFromArray(&ShuffleArray[I], N);
Dynamic Tools Teamaa152462019-11-19 10:18:38 -0800384 Sci->FreeList.push_back(B);
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800385 I += N;
Dynamic Tools Teamaa152462019-11-19 10:18:38 -0800386 }
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800387 TransferBatch *B = Sci->FreeList.front();
388 Sci->FreeList.pop_front();
389 DCHECK(B);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000390 DCHECK_GT(B->getCount(), 0);
391
Kostya Kortchinskye7788062020-11-03 11:21:15 -0800392 const uptr AllocatedUser = Size * NumberOfBlocks;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000393 C->getStats().add(StatFree, AllocatedUser);
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800394 DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
395 // If there is not enough room in the region currently associated to fit
396 // more blocks, we deassociate the region by resetting CurrentRegion and
397 // CurrentRegionAllocated. Otherwise, update the allocated amount.
398 if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
399 Sci->CurrentRegion = 0;
400 Sci->CurrentRegionAllocated = 0;
401 } else {
402 Sci->CurrentRegionAllocated += AllocatedUser;
403 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000404 Sci->AllocatedUser += AllocatedUser;
Dynamic Tools Team6a8384a2020-03-03 11:16:31 -0800405
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000406 return B;
407 }
408
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000409 void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000410 SizeClassInfo *Sci = getSizeClassInfo(ClassId);
411 if (Sci->AllocatedUser == 0)
412 return;
413 const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
414 const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000415 Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
Dynamic Tools Teamac403052020-02-06 15:46:05 -0800416 "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
Dynamic Tools Team3e8c65b2019-10-18 20:00:32 +0000417 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
418 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
Dynamic Tools Teamac403052020-02-06 15:46:05 -0800419 AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000420 }
421
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000422 NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000423 bool Force = false) {
424 const uptr BlockSize = getSizeByClassId(ClassId);
425 const uptr PageSize = getPageSizeCached();
426
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800427 DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000428 const uptr BytesInFreeList =
429 Sci->AllocatedUser -
430 (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
431 if (BytesInFreeList < PageSize)
432 return 0; // No chance to release anything.
Dynamic Tools Teamebcf82d2020-02-25 14:23:34 -0800433 const uptr BytesPushed =
434 (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
435 BlockSize;
436 if (BytesPushed < PageSize)
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000437 return 0; // Nothing new to release.
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000438
Kostya Kortchinsky394cc822020-06-17 10:31:53 -0700439 // Releasing smaller blocks is expensive, so we want to make sure that a
440 // significant amount of bytes are free, and that there has been a good
441 // amount of batches pushed to the freelist before attempting to release.
442 if (BlockSize < PageSize / 16U) {
443 if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
444 return 0;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800445 // We want 8x% to 9x% free bytes (the larger the block, the lower the %).
Kostya Kortchinsky394cc822020-06-17 10:31:53 -0700446 if ((BytesInFreeList * 100U) / Sci->AllocatedUser <
447 (100U - 1U - BlockSize / 16U))
448 return 0;
449 }
450
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000451 if (!Force) {
Kostya Kortchinskya51a8922020-11-02 14:27:11 -0800452 const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000453 if (IntervalMs < 0)
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000454 return 0;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000455 if (Sci->ReleaseInfo.LastReleaseAtNs +
Dynamic Tools Teamc5d5abc2020-01-27 14:03:21 -0800456 static_cast<u64>(IntervalMs) * 1000000 >
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000457 getMonotonicTime()) {
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000458 return 0; // Memory was returned recently.
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000459 }
460 }
461
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800462 const uptr First = Sci->MinRegionIndex;
463 const uptr Last = Sci->MaxRegionIndex;
464 DCHECK_NE(Last, 0U);
465 DCHECK_LE(First, Last);
Kostya Kortchinskyd618cbf2020-07-16 16:13:04 -0700466 uptr TotalReleasedBytes = 0;
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800467 const uptr Base = First * RegionSize;
468 const uptr NumberOfRegions = Last - First + 1U;
469 ReleaseRecorder Recorder(Base);
Kostya Kortchinskybcd746b2020-08-24 14:13:12 -0700470 auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
471 return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
472 };
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800473 auto DecompactPtr = [](CompactPtrT CompactPtr) {
474 return reinterpret_cast<uptr>(CompactPtr);
475 };
476 releaseFreeMemoryToOS(Sci->FreeList, RegionSize, NumberOfRegions, BlockSize,
477 &Recorder, DecompactPtr, SkipRegion);
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800478 if (Recorder.getReleasedRangesCount() > 0) {
479 Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
480 Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
481 Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
482 TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000483 }
484 Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
Kostya Kortchinskyab3e32b2020-11-09 15:14:49 -0800485
Dynamic Tools Teamc283bab2019-10-07 17:37:39 +0000486 return TotalReleasedBytes;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000487 }
488
Vitaly Buka5d3d7272021-04-29 01:19:51 -0700489 SizeClassInfo SizeClassInfoArray[NumClasses] = {};
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000490
Dynamic Tools Teamac403052020-02-06 15:46:05 -0800491 // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
Vitaly Buka5d3d7272021-04-29 01:19:51 -0700492 ByteMap PossibleRegions = {};
493 atomic_s32 ReleaseToOsIntervalMs = {};
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000494 // Unless several threads request regions simultaneously from different size
495 // classes, the stash rarely contains more than 1 entry.
496 static constexpr uptr MaxStashedRegions = 4;
497 HybridMutex RegionsStashMutex;
Vitaly Buka5d3d7272021-04-29 01:19:51 -0700498 uptr NumberOfStashedRegions = 0;
499 uptr RegionsStash[MaxStashedRegions] = {};
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000500};
501
502} // namespace scudo
503
504#endif // SCUDO_PRIMARY32_H_