blob: 04850405959b20ab2b809cfc696a76a9c930c8d8 [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_EXCLUSIVE_H_
10#define SCUDO_TSD_EXCLUSIVE_H_
11
12#include "tsd.h"
13
Dynamic Tools Team517193e2019-09-11 14:48:41 +000014namespace scudo {
15
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070016struct ThreadState {
17 bool DisableMemInit : 1;
18 enum {
19 NotInitialized = 0,
20 Initialized,
21 TornDown,
22 } InitState : 2;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000023};
24
25template <class Allocator> void teardownThread(void *Ptr);
26
27template <class Allocator> struct TSDRegistryExT {
28 void initLinkerInitialized(Allocator *Instance) {
29 Instance->initLinkerInitialized();
30 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080031 FallbackTSD.initLinkerInitialized(Instance);
Dynamic Tools Team517193e2019-09-11 14:48:41 +000032 Initialized = true;
33 }
34 void init(Allocator *Instance) {
35 memset(this, 0, sizeof(*this));
36 initLinkerInitialized(Instance);
37 }
38
Peter Collingbournecc3d4932020-12-21 18:39:03 -080039 void initOnceMaybe(Allocator *Instance) {
40 ScopedLock L(Mutex);
41 if (LIKELY(Initialized))
42 return;
43 initLinkerInitialized(Instance); // Sets Initialized.
44 }
45
Vitaly Buka04571562021-04-01 12:40:28 -070046 void unmapTestOnly() {
47 Allocator *Instance =
48 reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey));
49 if (!Instance)
50 return;
51 ThreadTSD.commitBack(Instance);
52 State = {};
53 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +000054
55 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070056 if (LIKELY(State.InitState != ThreadState::NotInitialized))
Dynamic Tools Team517193e2019-09-11 14:48:41 +000057 return;
58 initThread(Instance, MinimalInit);
59 }
60
61 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070062 if (LIKELY(State.InitState == ThreadState::Initialized &&
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080063 !atomic_load(&Disabled, memory_order_acquire))) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000064 *UnlockRequired = false;
65 return &ThreadTSD;
66 }
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080067 FallbackTSD.lock();
Dynamic Tools Team517193e2019-09-11 14:48:41 +000068 *UnlockRequired = true;
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080069 return &FallbackTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000070 }
71
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080072 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
73 // and force all threads to attempt to use it instead of their local one.
74 void disable() {
Dynamic Tools Team83eaa512020-01-09 11:43:16 -080075 Mutex.lock();
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080076 FallbackTSD.lock();
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080077 atomic_store(&Disabled, 1U, memory_order_release);
78 }
79
80 void enable() {
81 atomic_store(&Disabled, 0U, memory_order_release);
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080082 FallbackTSD.unlock();
Dynamic Tools Team83eaa512020-01-09 11:43:16 -080083 Mutex.unlock();
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080084 }
85
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070086 bool setOption(Option O, UNUSED sptr Value) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070087 if (O == Option::ThreadDisableMemInit)
88 State.DisableMemInit = Value;
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070089 if (O == Option::MaxTSDsCount)
90 return false;
91 return true;
92 }
93
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070094 bool getDisableMemInit() { return State.DisableMemInit; }
95
Dynamic Tools Team517193e2019-09-11 14:48:41 +000096private:
Dynamic Tools Team517193e2019-09-11 14:48:41 +000097 // Using minimal initialization allows for global initialization while keeping
98 // the thread specific structure untouched. The fallback structure will be
99 // used instead.
100 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
101 initOnceMaybe(Instance);
102 if (UNLIKELY(MinimalInit))
103 return;
104 CHECK_EQ(
105 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
106 ThreadTSD.initLinkerInitialized(Instance);
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700107 State.InitState = ThreadState::Initialized;
Dynamic Tools Team83eaa512020-01-09 11:43:16 -0800108 Instance->callPostInitCallback();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000109 }
110
111 pthread_key_t PThreadKey;
112 bool Initialized;
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -0800113 atomic_u8 Disabled;
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -0800114 TSD<Allocator> FallbackTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000115 HybridMutex Mutex;
Peter Collingbourne0d4ff652020-09-10 12:38:42 -0700116 static thread_local ThreadState State;
117 static thread_local TSD<Allocator> ThreadTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000118
119 friend void teardownThread<Allocator>(void *Ptr);
120};
121
122template <class Allocator>
Peter Collingbourne0d4ff652020-09-10 12:38:42 -0700123thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000124template <class Allocator>
Peter Collingbourne0d4ff652020-09-10 12:38:42 -0700125thread_local ThreadState TSDRegistryExT<Allocator>::State;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000126
127template <class Allocator> void teardownThread(void *Ptr) {
128 typedef TSDRegistryExT<Allocator> TSDRegistryT;
129 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
130 // The glibc POSIX thread-local-storage deallocation routine calls user
131 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
132 // We want to be called last since other destructors might call free and the
133 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
134 // quarantine and swallowing the cache.
135 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
136 TSDRegistryT::ThreadTSD.DestructorIterations--;
137 // If pthread_setspecific fails, we will go ahead with the teardown.
138 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
139 Ptr) == 0))
140 return;
141 }
142 TSDRegistryT::ThreadTSD.commitBack(Instance);
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700143 TSDRegistryT::State.InitState = ThreadState::TornDown;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000144}
145
146} // namespace scudo
147
148#endif // SCUDO_TSD_EXCLUSIVE_H_