blob: 8ee2b1cae84e715a840b39f00b9a1b78ce9bb998 [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- local_cache.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_LOCAL_CACHE_H_
10#define SCUDO_LOCAL_CACHE_H_
11
12#include "internal_defs.h"
13#include "report.h"
14#include "stats.h"
15
16namespace scudo {
17
18template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
19 typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080020 typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000021
22 struct TransferBatch {
23 static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080024 void setFromArray(CompactPtrT *Array, u32 N) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000025 DCHECK_LE(N, MaxNumCached);
26 Count = N;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080027 memcpy(Batch, Array, sizeof(Batch[0]) * Count);
Dynamic Tools Team517193e2019-09-11 14:48:41 +000028 }
29 void clear() { Count = 0; }
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080030 void add(CompactPtrT P) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000031 DCHECK_LT(Count, MaxNumCached);
32 Batch[Count++] = P;
33 }
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080034 void copyToArray(CompactPtrT *Array) const {
35 memcpy(Array, Batch, sizeof(Batch[0]) * Count);
Dynamic Tools Team517193e2019-09-11 14:48:41 +000036 }
37 u32 getCount() const { return Count; }
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080038 CompactPtrT get(u32 I) const {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000039 DCHECK_LE(I, Count);
40 return Batch[I];
41 }
42 static u32 getMaxCached(uptr Size) {
43 return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
44 }
45 TransferBatch *Next;
46
47 private:
48 u32 Count;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080049 CompactPtrT Batch[MaxNumCached];
Dynamic Tools Team517193e2019-09-11 14:48:41 +000050 };
51
52 void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
53 Stats.initLinkerInitialized();
54 if (LIKELY(S))
55 S->link(&Stats);
56 Allocator = A;
57 }
58
59 void init(GlobalStats *S, SizeClassAllocator *A) {
60 memset(this, 0, sizeof(*this));
61 initLinkerInitialized(S, A);
62 }
63
64 void destroy(GlobalStats *S) {
65 drain();
66 if (LIKELY(S))
67 S->unlink(&Stats);
68 }
69
70 void *allocate(uptr ClassId) {
71 DCHECK_LT(ClassId, NumClasses);
72 PerClass *C = &PerClassArray[ClassId];
73 if (C->Count == 0) {
74 if (UNLIKELY(!refill(C, ClassId)))
75 return nullptr;
76 DCHECK_GT(C->Count, 0);
77 }
78 // We read ClassSize first before accessing Chunks because it's adjacent to
79 // Count, while Chunks might be further off (depending on Count). That keeps
80 // the memory accesses in close quarters.
81 const uptr ClassSize = C->ClassSize;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080082 CompactPtrT CompactP = C->Chunks[--C->Count];
Dynamic Tools Team517193e2019-09-11 14:48:41 +000083 Stats.add(StatAllocated, ClassSize);
84 Stats.sub(StatFree, ClassSize);
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080085 return Allocator->decompactPtr(ClassId, CompactP);
Dynamic Tools Team517193e2019-09-11 14:48:41 +000086 }
87
88 void deallocate(uptr ClassId, void *P) {
89 CHECK_LT(ClassId, NumClasses);
90 PerClass *C = &PerClassArray[ClassId];
91 // We still have to initialize the cache in the event that the first heap
92 // operation in a thread is a deallocation.
93 initCacheMaybe(C);
94 if (C->Count == C->MaxCount)
95 drain(C, ClassId);
96 // See comment in allocate() about memory accesses.
97 const uptr ClassSize = C->ClassSize;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -080098 C->Chunks[C->Count++] =
99 Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000100 Stats.sub(StatAllocated, ClassSize);
101 Stats.add(StatFree, ClassSize);
102 }
103
104 void drain() {
105 for (uptr I = 0; I < NumClasses; I++) {
106 PerClass *C = &PerClassArray[I];
107 while (C->Count > 0)
108 drain(C, I);
109 }
110 }
111
112 TransferBatch *createBatch(uptr ClassId, void *B) {
113 if (ClassId != SizeClassMap::BatchClassId)
114 B = allocate(SizeClassMap::BatchClassId);
115 return reinterpret_cast<TransferBatch *>(B);
116 }
117
118 LocalStats &getStats() { return Stats; }
119
120private:
121 static const uptr NumClasses = SizeClassMap::NumClasses;
122 struct PerClass {
123 u32 Count;
124 u32 MaxCount;
125 uptr ClassSize;
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800126 CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000127 };
128 PerClass PerClassArray[NumClasses];
129 LocalStats Stats;
130 SizeClassAllocator *Allocator;
131
132 ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
133 if (LIKELY(C->MaxCount))
134 return;
135 initCache();
136 DCHECK_NE(C->MaxCount, 0U);
137 }
138
139 NOINLINE void initCache() {
140 for (uptr I = 0; I < NumClasses; I++) {
141 PerClass *P = &PerClassArray[I];
142 const uptr Size = SizeClassAllocator::getSizeByClassId(I);
143 P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
144 P->ClassSize = Size;
145 }
146 }
147
148 void destroyBatch(uptr ClassId, void *B) {
149 if (ClassId != SizeClassMap::BatchClassId)
150 deallocate(SizeClassMap::BatchClassId, B);
151 }
152
153 NOINLINE bool refill(PerClass *C, uptr ClassId) {
154 initCacheMaybe(C);
155 TransferBatch *B = Allocator->popBatch(this, ClassId);
156 if (UNLIKELY(!B))
157 return false;
158 DCHECK_GT(B->getCount(), 0);
159 C->Count = B->getCount();
160 B->copyToArray(C->Chunks);
Kostya Kortchinsky394cc822020-06-17 10:31:53 -0700161 B->clear();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000162 destroyBatch(ClassId, B);
163 return true;
164 }
165
166 NOINLINE void drain(PerClass *C, uptr ClassId) {
167 const u32 Count = Min(C->MaxCount / 2, C->Count);
Kostya Kortchinskyc9369542021-02-10 10:17:18 -0800168 TransferBatch *B =
169 createBatch(ClassId, Allocator->decompactPtr(ClassId, C->Chunks[0]));
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000170 if (UNLIKELY(!B))
171 reportOutOfMemory(
172 SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
Dynamic Tools Teamebcf82d2020-02-25 14:23:34 -0800173 B->setFromArray(&C->Chunks[0], Count);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000174 C->Count -= Count;
Dynamic Tools Teamebcf82d2020-02-25 14:23:34 -0800175 for (uptr I = 0; I < C->Count; I++)
176 C->Chunks[I] = C->Chunks[I + Count];
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000177 Allocator->pushBatch(ClassId, B);
178 }
179};
180
181} // namespace scudo
182
183#endif // SCUDO_LOCAL_CACHE_H_