blob: 1c2a880416b984a5fc27c4cc3bf11e387e1f48c7 [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_SHARED_H_
10#define SCUDO_TSD_SHARED_H_
11
Dynamic Tools Team517193e2019-09-11 14:48:41 +000012#include "tsd.h"
13
Peter Collingbournee03d2cd2020-09-09 15:15:46 -070014#if SCUDO_HAS_PLATFORM_TLS_SLOT
15// This is a platform-provided header that needs to be on the include path when
16// Scudo is compiled. It must declare a function with the prototype:
17// uintptr_t *getPlatformAllocatorTlsSlot()
18// that returns the address of a thread-local word of storage reserved for
19// Scudo, that must be zero-initialized in newly created threads.
20#include "scudo_platform_tls_slot.h"
21#endif
22
Dynamic Tools Team517193e2019-09-11 14:48:41 +000023namespace scudo {
24
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070025template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
26struct TSDRegistrySharedT {
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070027 void init(Allocator *Instance) {
28 DCHECK(!Initialized);
29 Instance->init();
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070030 for (u32 I = 0; I < TSDsArraySize; I++)
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070031 TSDs[I].init(Instance);
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070032 const u32 NumberOfCPUs = getNumberOfCPUs();
33 setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
34 : Min(NumberOfCPUs, DefaultTSDCount));
Dynamic Tools Team517193e2019-09-11 14:48:41 +000035 Initialized = true;
36 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +000037
Peter Collingbournecc3d4932020-12-21 18:39:03 -080038 void initOnceMaybe(Allocator *Instance) {
39 ScopedLock L(Mutex);
40 if (LIKELY(Initialized))
41 return;
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070042 init(Instance); // Sets Initialized.
Peter Collingbournecc3d4932020-12-21 18:39:03 -080043 }
44
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070045 void unmapTestOnly(Allocator *Instance) {
46 for (u32 I = 0; I < TSDsArraySize; I++) {
47 TSDs[I].commitBack(Instance);
48 TSDs[I] = {};
49 }
50 setCurrentTSD(nullptr);
51 Initialized = false;
52 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +000053
54 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
55 UNUSED bool MinimalInit) {
56 if (LIKELY(getCurrentTSD()))
57 return;
58 initThread(Instance);
59 }
60
61 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
62 TSD<Allocator> *TSD = getCurrentTSD();
63 DCHECK(TSD);
64 *UnlockRequired = true;
65 // Try to lock the currently associated context.
66 if (TSD->tryLock())
67 return TSD;
68 // If that fails, go down the slow path.
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070069 if (TSDsArraySize == 1U) {
70 // Only 1 TSD, not need to go any further.
71 // The compiler will optimize this one way or the other.
72 TSD->lock();
73 return TSD;
74 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +000075 return getTSDAndLockSlow(TSD);
76 }
77
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080078 void disable() {
Dynamic Tools Team83eaa512020-01-09 11:43:16 -080079 Mutex.lock();
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070080 for (u32 I = 0; I < TSDsArraySize; I++)
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080081 TSDs[I].lock();
82 }
83
84 void enable() {
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070085 for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080086 TSDs[I].unlock();
Dynamic Tools Team83eaa512020-01-09 11:43:16 -080087 Mutex.unlock();
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080088 }
89
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070090 bool setOption(Option O, sptr Value) {
91 if (O == Option::MaxTSDsCount)
92 return setNumberOfTSDs(static_cast<u32>(Value));
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070093 if (O == Option::ThreadDisableMemInit)
94 setDisableMemInit(Value);
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070095 // Not supported by the TSD Registry, but not an error either.
96 return true;
97 }
98
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070099 bool getDisableMemInit() const { return *getTlsPtr() & 1; }
100
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000101private:
Peter Collingbournee03d2cd2020-09-09 15:15:46 -0700102 ALWAYS_INLINE uptr *getTlsPtr() const {
103#if SCUDO_HAS_PLATFORM_TLS_SLOT
104 return reinterpret_cast<uptr *>(getPlatformAllocatorTlsSlot());
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000105#else
Peter Collingbournee03d2cd2020-09-09 15:15:46 -0700106 static thread_local uptr ThreadTSD;
107 return &ThreadTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000108#endif
109 }
110
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700111 static_assert(alignof(TSD<Allocator>) >= 2, "");
112
Peter Collingbournee03d2cd2020-09-09 15:15:46 -0700113 ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700114 *getTlsPtr() &= 1;
115 *getTlsPtr() |= reinterpret_cast<uptr>(CurrentTSD);
Peter Collingbournee03d2cd2020-09-09 15:15:46 -0700116 }
117
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000118 ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700119 return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000120 }
121
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700122 bool setNumberOfTSDs(u32 N) {
123 ScopedLock L(MutexTSDs);
124 if (N < NumberOfTSDs)
125 return false;
126 if (N > TSDsArraySize)
127 N = TSDsArraySize;
128 NumberOfTSDs = N;
129 NumberOfCoPrimes = 0;
130 // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
131 // array of TSDs in a random order. For details, see:
132 // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
133 for (u32 I = 0; I < N; I++) {
134 u32 A = I + 1;
135 u32 B = N;
136 // Find the GCD between I + 1 and N. If 1, they are coprimes.
137 while (B != 0) {
138 const u32 T = A;
139 A = B;
140 B = T % B;
141 }
142 if (A == 1)
143 CoPrimes[NumberOfCoPrimes++] = I + 1;
144 }
145 return true;
146 }
147
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700148 void setDisableMemInit(bool B) {
149 *getTlsPtr() &= ~1ULL;
150 *getTlsPtr() |= B;
151 }
152
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000153 NOINLINE void initThread(Allocator *Instance) {
154 initOnceMaybe(Instance);
155 // Initial context assignment is done in a plain round-robin fashion.
156 const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
157 setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
Dynamic Tools Team83eaa512020-01-09 11:43:16 -0800158 Instance->callPostInitCallback();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000159 }
160
161 NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700162 // Use the Precedence of the current TSD as our random seed. Since we are
163 // in the slow path, it means that tryLock failed, and as a result it's
164 // very likely that said Precedence is non-zero.
165 const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
166 u32 N, Inc;
167 {
168 ScopedLock L(MutexTSDs);
169 N = NumberOfTSDs;
170 DCHECK_NE(NumberOfCoPrimes, 0U);
171 Inc = CoPrimes[R % NumberOfCoPrimes];
172 }
173 if (N > 1U) {
174 u32 Index = R % N;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000175 uptr LowestPrecedence = UINTPTR_MAX;
176 TSD<Allocator> *CandidateTSD = nullptr;
177 // Go randomly through at most 4 contexts and find a candidate.
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700178 for (u32 I = 0; I < Min(4U, N); I++) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000179 if (TSDs[Index].tryLock()) {
180 setCurrentTSD(&TSDs[Index]);
181 return &TSDs[Index];
182 }
183 const uptr Precedence = TSDs[Index].getPrecedence();
184 // A 0 precedence here means another thread just locked this TSD.
185 if (Precedence && Precedence < LowestPrecedence) {
186 CandidateTSD = &TSDs[Index];
187 LowestPrecedence = Precedence;
188 }
189 Index += Inc;
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700190 if (Index >= N)
191 Index -= N;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000192 }
193 if (CandidateTSD) {
194 CandidateTSD->lock();
195 setCurrentTSD(CandidateTSD);
196 return CandidateTSD;
197 }
198 }
199 // Last resort, stick with the current one.
200 CurrentTSD->lock();
201 return CurrentTSD;
202 }
203
Christopher Ferris4c890782021-05-05 19:00:30 -0700204 atomic_u32 CurrentIndex = {};
205 u32 NumberOfTSDs = 0;
206 u32 NumberOfCoPrimes = 0;
207 u32 CoPrimes[TSDsArraySize] = {};
208 bool Initialized = false;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000209 HybridMutex Mutex;
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -0700210 HybridMutex MutexTSDs;
211 TSD<Allocator> TSDs[TSDsArraySize];
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000212};
213
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000214} // namespace scudo
215
216#endif // SCUDO_TSD_SHARED_H_