Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 1 | //===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef SCUDO_TSD_EXCLUSIVE_H_ |
| 10 | #define SCUDO_TSD_EXCLUSIVE_H_ |
| 11 | |
| 12 | #include "tsd.h" |
| 13 | |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 14 | namespace scudo { |
| 15 | |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 16 | struct ThreadState { |
| 17 | bool DisableMemInit : 1; |
| 18 | enum { |
| 19 | NotInitialized = 0, |
| 20 | Initialized, |
| 21 | TornDown, |
| 22 | } InitState : 2; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 23 | }; |
| 24 | |
| 25 | template <class Allocator> void teardownThread(void *Ptr); |
| 26 | |
| 27 | template <class Allocator> struct TSDRegistryExT { |
| 28 | void initLinkerInitialized(Allocator *Instance) { |
| 29 | Instance->initLinkerInitialized(); |
| 30 | CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0); |
Dynamic Tools Team | 2e7fec2 | 2020-02-16 15:29:46 -0800 | [diff] [blame] | 31 | FallbackTSD.initLinkerInitialized(Instance); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 32 | Initialized = true; |
| 33 | } |
| 34 | void init(Allocator *Instance) { |
| 35 | memset(this, 0, sizeof(*this)); |
| 36 | initLinkerInitialized(Instance); |
| 37 | } |
| 38 | |
Peter Collingbourne | cc3d493 | 2020-12-21 18:39:03 -0800 | [diff] [blame] | 39 | void initOnceMaybe(Allocator *Instance) { |
| 40 | ScopedLock L(Mutex); |
| 41 | if (LIKELY(Initialized)) |
| 42 | return; |
| 43 | initLinkerInitialized(Instance); // Sets Initialized. |
| 44 | } |
| 45 | |
Vitaly Buka | 0457156 | 2021-04-01 12:40:28 -0700 | [diff] [blame^] | 46 | void unmapTestOnly() { |
| 47 | Allocator *Instance = |
| 48 | reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)); |
| 49 | if (!Instance) |
| 50 | return; |
| 51 | ThreadTSD.commitBack(Instance); |
| 52 | State = {}; |
| 53 | } |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 54 | |
| 55 | ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) { |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 56 | if (LIKELY(State.InitState != ThreadState::NotInitialized)) |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 57 | return; |
| 58 | initThread(Instance, MinimalInit); |
| 59 | } |
| 60 | |
| 61 | ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) { |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 62 | if (LIKELY(State.InitState == ThreadState::Initialized && |
Dynamic Tools Team | 2c0f7b6 | 2019-12-19 10:37:12 -0800 | [diff] [blame] | 63 | !atomic_load(&Disabled, memory_order_acquire))) { |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 64 | *UnlockRequired = false; |
| 65 | return &ThreadTSD; |
| 66 | } |
Dynamic Tools Team | 2e7fec2 | 2020-02-16 15:29:46 -0800 | [diff] [blame] | 67 | FallbackTSD.lock(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 68 | *UnlockRequired = true; |
Dynamic Tools Team | 2e7fec2 | 2020-02-16 15:29:46 -0800 | [diff] [blame] | 69 | return &FallbackTSD; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 70 | } |
| 71 | |
Dynamic Tools Team | 2c0f7b6 | 2019-12-19 10:37:12 -0800 | [diff] [blame] | 72 | // To disable the exclusive TSD registry, we effectively lock the fallback TSD |
| 73 | // and force all threads to attempt to use it instead of their local one. |
| 74 | void disable() { |
Dynamic Tools Team | 83eaa51 | 2020-01-09 11:43:16 -0800 | [diff] [blame] | 75 | Mutex.lock(); |
Dynamic Tools Team | 2e7fec2 | 2020-02-16 15:29:46 -0800 | [diff] [blame] | 76 | FallbackTSD.lock(); |
Dynamic Tools Team | 2c0f7b6 | 2019-12-19 10:37:12 -0800 | [diff] [blame] | 77 | atomic_store(&Disabled, 1U, memory_order_release); |
| 78 | } |
| 79 | |
| 80 | void enable() { |
| 81 | atomic_store(&Disabled, 0U, memory_order_release); |
Dynamic Tools Team | 2e7fec2 | 2020-02-16 15:29:46 -0800 | [diff] [blame] | 82 | FallbackTSD.unlock(); |
Dynamic Tools Team | 83eaa51 | 2020-01-09 11:43:16 -0800 | [diff] [blame] | 83 | Mutex.unlock(); |
Dynamic Tools Team | 2c0f7b6 | 2019-12-19 10:37:12 -0800 | [diff] [blame] | 84 | } |
| 85 | |
Kostya Kortchinsky | c72ca56 | 2020-07-27 09:13:42 -0700 | [diff] [blame] | 86 | bool setOption(Option O, UNUSED sptr Value) { |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 87 | if (O == Option::ThreadDisableMemInit) |
| 88 | State.DisableMemInit = Value; |
Kostya Kortchinsky | c72ca56 | 2020-07-27 09:13:42 -0700 | [diff] [blame] | 89 | if (O == Option::MaxTSDsCount) |
| 90 | return false; |
| 91 | return true; |
| 92 | } |
| 93 | |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 94 | bool getDisableMemInit() { return State.DisableMemInit; } |
| 95 | |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 96 | private: |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 97 | // Using minimal initialization allows for global initialization while keeping |
| 98 | // the thread specific structure untouched. The fallback structure will be |
| 99 | // used instead. |
| 100 | NOINLINE void initThread(Allocator *Instance, bool MinimalInit) { |
| 101 | initOnceMaybe(Instance); |
| 102 | if (UNLIKELY(MinimalInit)) |
| 103 | return; |
| 104 | CHECK_EQ( |
| 105 | pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0); |
| 106 | ThreadTSD.initLinkerInitialized(Instance); |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 107 | State.InitState = ThreadState::Initialized; |
Dynamic Tools Team | 83eaa51 | 2020-01-09 11:43:16 -0800 | [diff] [blame] | 108 | Instance->callPostInitCallback(); |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | pthread_key_t PThreadKey; |
| 112 | bool Initialized; |
Dynamic Tools Team | 2c0f7b6 | 2019-12-19 10:37:12 -0800 | [diff] [blame] | 113 | atomic_u8 Disabled; |
Dynamic Tools Team | 2e7fec2 | 2020-02-16 15:29:46 -0800 | [diff] [blame] | 114 | TSD<Allocator> FallbackTSD; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 115 | HybridMutex Mutex; |
Peter Collingbourne | 0d4ff65 | 2020-09-10 12:38:42 -0700 | [diff] [blame] | 116 | static thread_local ThreadState State; |
| 117 | static thread_local TSD<Allocator> ThreadTSD; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 118 | |
| 119 | friend void teardownThread<Allocator>(void *Ptr); |
| 120 | }; |
| 121 | |
| 122 | template <class Allocator> |
Peter Collingbourne | 0d4ff65 | 2020-09-10 12:38:42 -0700 | [diff] [blame] | 123 | thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 124 | template <class Allocator> |
Peter Collingbourne | 0d4ff65 | 2020-09-10 12:38:42 -0700 | [diff] [blame] | 125 | thread_local ThreadState TSDRegistryExT<Allocator>::State; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 126 | |
| 127 | template <class Allocator> void teardownThread(void *Ptr) { |
| 128 | typedef TSDRegistryExT<Allocator> TSDRegistryT; |
| 129 | Allocator *Instance = reinterpret_cast<Allocator *>(Ptr); |
| 130 | // The glibc POSIX thread-local-storage deallocation routine calls user |
| 131 | // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. |
| 132 | // We want to be called last since other destructors might call free and the |
| 133 | // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the |
| 134 | // quarantine and swallowing the cache. |
| 135 | if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) { |
| 136 | TSDRegistryT::ThreadTSD.DestructorIterations--; |
| 137 | // If pthread_setspecific fails, we will go ahead with the teardown. |
| 138 | if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey, |
| 139 | Ptr) == 0)) |
| 140 | return; |
| 141 | } |
| 142 | TSDRegistryT::ThreadTSD.commitBack(Instance); |
Peter Collingbourne | 33f8e1a | 2020-09-09 19:15:26 -0700 | [diff] [blame] | 143 | TSDRegistryT::State.InitState = ThreadState::TornDown; |
Dynamic Tools Team | 517193e | 2019-09-11 14:48:41 +0000 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | } // namespace scudo |
| 147 | |
| 148 | #endif // SCUDO_TSD_EXCLUSIVE_H_ |