blob: bba0c277c6a7067931c4af72fe1720bfa708e97f [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef SCUDO_TSD_EXCLUSIVE_H_
10#define SCUDO_TSD_EXCLUSIVE_H_
11
12#include "tsd.h"
13
Dynamic Tools Team517193e2019-09-11 14:48:41 +000014namespace scudo {
15
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070016struct ThreadState {
17 bool DisableMemInit : 1;
18 enum {
19 NotInitialized = 0,
20 Initialized,
21 TornDown,
22 } InitState : 2;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000023};
24
25template <class Allocator> void teardownThread(void *Ptr);
26
27template <class Allocator> struct TSDRegistryExT {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000028 void init(Allocator *Instance) {
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070029 DCHECK(!Initialized);
30 Instance->init();
31 CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
32 FallbackTSD.init(Instance);
33 Initialized = true;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000034 }
35
Peter Collingbournecc3d4932020-12-21 18:39:03 -080036 void initOnceMaybe(Allocator *Instance) {
37 ScopedLock L(Mutex);
38 if (LIKELY(Initialized))
39 return;
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070040 init(Instance); // Sets Initialized.
Peter Collingbournecc3d4932020-12-21 18:39:03 -080041 }
42
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070043 void unmapTestOnly(Allocator *Instance) {
44 DCHECK(Instance);
45 if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
46 DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
47 Instance);
48 ThreadTSD.commitBack(Instance);
49 ThreadTSD = {};
50 }
51 CHECK_EQ(pthread_key_delete(PThreadKey), 0);
52 PThreadKey = {};
53 FallbackTSD.commitBack(Instance);
54 FallbackTSD = {};
Vitaly Buka04571562021-04-01 12:40:28 -070055 State = {};
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -070056 Initialized = false;
Vitaly Buka04571562021-04-01 12:40:28 -070057 }
Dynamic Tools Team517193e2019-09-11 14:48:41 +000058
59 ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070060 if (LIKELY(State.InitState != ThreadState::NotInitialized))
Dynamic Tools Team517193e2019-09-11 14:48:41 +000061 return;
62 initThread(Instance, MinimalInit);
63 }
64
65 ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070066 if (LIKELY(State.InitState == ThreadState::Initialized &&
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080067 !atomic_load(&Disabled, memory_order_acquire))) {
Dynamic Tools Team517193e2019-09-11 14:48:41 +000068 *UnlockRequired = false;
69 return &ThreadTSD;
70 }
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080071 FallbackTSD.lock();
Dynamic Tools Team517193e2019-09-11 14:48:41 +000072 *UnlockRequired = true;
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080073 return &FallbackTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +000074 }
75
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080076 // To disable the exclusive TSD registry, we effectively lock the fallback TSD
77 // and force all threads to attempt to use it instead of their local one.
78 void disable() {
Dynamic Tools Team83eaa512020-01-09 11:43:16 -080079 Mutex.lock();
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080080 FallbackTSD.lock();
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080081 atomic_store(&Disabled, 1U, memory_order_release);
82 }
83
84 void enable() {
85 atomic_store(&Disabled, 0U, memory_order_release);
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -080086 FallbackTSD.unlock();
Dynamic Tools Team83eaa512020-01-09 11:43:16 -080087 Mutex.unlock();
Dynamic Tools Team2c0f7b62019-12-19 10:37:12 -080088 }
89
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070090 bool setOption(Option O, UNUSED sptr Value) {
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070091 if (O == Option::ThreadDisableMemInit)
92 State.DisableMemInit = Value;
Kostya Kortchinskyc72ca562020-07-27 09:13:42 -070093 if (O == Option::MaxTSDsCount)
94 return false;
95 return true;
96 }
97
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -070098 bool getDisableMemInit() { return State.DisableMemInit; }
99
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000100private:
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000101 // Using minimal initialization allows for global initialization while keeping
102 // the thread specific structure untouched. The fallback structure will be
103 // used instead.
104 NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
105 initOnceMaybe(Instance);
106 if (UNLIKELY(MinimalInit))
107 return;
108 CHECK_EQ(
109 pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
Kostya Kortchinsky4a435d22021-05-25 15:00:58 -0700110 ThreadTSD.init(Instance);
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700111 State.InitState = ThreadState::Initialized;
Dynamic Tools Team83eaa512020-01-09 11:43:16 -0800112 Instance->callPostInitCallback();
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000113 }
114
Vitaly Buka5d3d7272021-04-29 01:19:51 -0700115 pthread_key_t PThreadKey = {};
116 bool Initialized = false;
117 atomic_u8 Disabled = {};
Dynamic Tools Team2e7fec22020-02-16 15:29:46 -0800118 TSD<Allocator> FallbackTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000119 HybridMutex Mutex;
Peter Collingbourne0d4ff652020-09-10 12:38:42 -0700120 static thread_local ThreadState State;
121 static thread_local TSD<Allocator> ThreadTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000122
123 friend void teardownThread<Allocator>(void *Ptr);
124};
125
126template <class Allocator>
Peter Collingbourne0d4ff652020-09-10 12:38:42 -0700127thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000128template <class Allocator>
Peter Collingbourne0d4ff652020-09-10 12:38:42 -0700129thread_local ThreadState TSDRegistryExT<Allocator>::State;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000130
131template <class Allocator> void teardownThread(void *Ptr) {
132 typedef TSDRegistryExT<Allocator> TSDRegistryT;
133 Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
134 // The glibc POSIX thread-local-storage deallocation routine calls user
135 // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
136 // We want to be called last since other destructors might call free and the
137 // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
138 // quarantine and swallowing the cache.
139 if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
140 TSDRegistryT::ThreadTSD.DestructorIterations--;
141 // If pthread_setspecific fails, we will go ahead with the teardown.
142 if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
143 Ptr) == 0))
144 return;
145 }
146 TSDRegistryT::ThreadTSD.commitBack(Instance);
Peter Collingbourne33f8e1a2020-09-09 19:15:26 -0700147 TSDRegistryT::State.InitState = ThreadState::TornDown;
Dynamic Tools Team517193e2019-09-11 14:48:41 +0000148}
149
150} // namespace scudo
151
152#endif // SCUDO_TSD_EXCLUSIVE_H_