Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 1 | //===-- tsan_mutex.cc -------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | #include "tsan_mutex.h" |
| 14 | #include "tsan_platform.h" |
| 15 | #include "tsan_rtl.h" |
| 16 | |
| 17 | namespace __tsan { |
| 18 | |
| 19 | // Simple reader-writer spin-mutex. Optimized for not-so-contended case. |
| 20 | // Readers have preference, can possibly starvate writers. |
| 21 | |
| 22 | // The table fixes what mutexes can be locked under what mutexes. |
| 23 | // E.g. if the row for MutexTypeThreads contains MutexTypeReport, |
| 24 | // then Report mutex can be locked while under Threads mutex. |
| 25 | // The leaf mutexes can be locked under any other mutexes. |
| 26 | // Recursive locking is not supported. |
| 27 | const MutexType MutexTypeLeaf = (MutexType)-1; |
| 28 | static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { |
| 29 | /*0 MutexTypeInvalid*/ {}, |
| 30 | /*1 MutexTypeTrace*/ {MutexTypeLeaf}, |
| 31 | /*2 MutexTypeThreads*/ {MutexTypeReport}, |
| 32 | /*3 MutexTypeReport*/ {}, |
| 33 | /*4 MutexTypeSyncVar*/ {}, |
| 34 | /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, |
| 35 | /*6 MutexTypeSlab*/ {MutexTypeLeaf}, |
| 36 | /*7 MutexTypeAnnotations*/ {}, |
| 37 | /*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, |
| 38 | }; |
| 39 | |
| 40 | static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; |
| 41 | |
| 42 | void InitializeMutex() { |
| 43 | // Build the "can lock" adjacency matrix. |
| 44 | // If [i][j]==true, then one can lock mutex j while under mutex i. |
| 45 | const int N = MutexTypeCount; |
| 46 | int cnt[N] = {}; |
| 47 | bool leaf[N] = {}; |
| 48 | for (int i = 1; i < N; i++) { |
| 49 | for (int j = 0; j < N; j++) { |
| 50 | int z = CanLockTab[i][j]; |
| 51 | if (z == MutexTypeInvalid) |
| 52 | continue; |
| 53 | if (z == MutexTypeLeaf) { |
| 54 | CHECK(!leaf[i]); |
| 55 | leaf[i] = true; |
| 56 | continue; |
| 57 | } |
| 58 | CHECK(!CanLockAdj[i][z]); |
| 59 | CanLockAdj[i][z] = true; |
| 60 | cnt[i]++; |
| 61 | } |
| 62 | } |
| 63 | for (int i = 0; i < N; i++) { |
| 64 | CHECK(!leaf[i] || cnt[i] == 0); |
| 65 | } |
| 66 | // Add leaf mutexes. |
| 67 | for (int i = 0; i < N; i++) { |
| 68 | if (!leaf[i]) |
| 69 | continue; |
| 70 | for (int j = 0; j < N; j++) { |
| 71 | if (i == j || leaf[j] || j == MutexTypeInvalid) |
| 72 | continue; |
| 73 | CHECK(!CanLockAdj[j][i]); |
| 74 | CanLockAdj[j][i] = true; |
| 75 | } |
| 76 | } |
| 77 | // Build the transitive closure. |
| 78 | bool CanLockAdj2[MutexTypeCount][MutexTypeCount]; |
| 79 | for (int i = 0; i < N; i++) { |
| 80 | for (int j = 0; j < N; j++) { |
| 81 | CanLockAdj2[i][j] = CanLockAdj[i][j]; |
| 82 | } |
| 83 | } |
| 84 | for (int k = 0; k < N; k++) { |
| 85 | for (int i = 0; i < N; i++) { |
| 86 | for (int j = 0; j < N; j++) { |
| 87 | if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) { |
| 88 | CanLockAdj2[i][j] = true; |
| 89 | } |
| 90 | } |
| 91 | } |
| 92 | } |
| 93 | #if 0 |
| 94 | Printf("Can lock graph:\n"); |
| 95 | for (int i = 0; i < N; i++) { |
| 96 | for (int j = 0; j < N; j++) { |
| 97 | Printf("%d ", CanLockAdj[i][j]); |
| 98 | } |
| 99 | Printf("\n"); |
| 100 | } |
| 101 | Printf("Can lock graph closure:\n"); |
| 102 | for (int i = 0; i < N; i++) { |
| 103 | for (int j = 0; j < N; j++) { |
| 104 | Printf("%d ", CanLockAdj2[i][j]); |
| 105 | } |
| 106 | Printf("\n"); |
| 107 | } |
| 108 | #endif |
| 109 | // Verify that the graph is acyclic. |
| 110 | for (int i = 0; i < N; i++) { |
| 111 | if (CanLockAdj2[i][i]) { |
| 112 | Printf("Mutex %d participates in a cycle\n", i); |
| 113 | Die(); |
| 114 | } |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | DeadlockDetector::DeadlockDetector() { |
| 119 | // Rely on zero initialization because some mutexes can be locked before ctor. |
| 120 | } |
| 121 | |
| 122 | void DeadlockDetector::Lock(MutexType t) { |
| 123 | // Printf("LOCK %d @%llu\n", t, seq_ + 1); |
| 124 | u64 max_seq = 0; |
| 125 | u64 max_idx = MutexTypeInvalid; |
| 126 | for (int i = 0; i != MutexTypeCount; i++) { |
| 127 | if (locked_[i] == 0) |
| 128 | continue; |
| 129 | CHECK_NE(locked_[i], max_seq); |
| 130 | if (max_seq < locked_[i]) { |
| 131 | max_seq = locked_[i]; |
| 132 | max_idx = i; |
| 133 | } |
| 134 | } |
| 135 | locked_[t] = ++seq_; |
| 136 | if (max_idx == MutexTypeInvalid) |
| 137 | return; |
| 138 | // Printf(" last %d @%llu\n", max_idx, max_seq); |
| 139 | if (!CanLockAdj[max_idx][t]) { |
| 140 | Printf("ThreadSanitizer: internal deadlock detected\n"); |
| 141 | Printf("ThreadSanitizer: can't lock %d while under %llu\n", t, max_idx); |
| 142 | Die(); |
| 143 | } |
| 144 | } |
| 145 | |
| 146 | void DeadlockDetector::Unlock(MutexType t) { |
| 147 | // Printf("UNLO %d @%llu #%llu\n", t, seq_, locked_[t]); |
| 148 | CHECK(locked_[t]); |
| 149 | locked_[t] = 0; |
| 150 | } |
| 151 | |
| 152 | const uptr kUnlocked = 0; |
| 153 | const uptr kWriteLock = 1; |
| 154 | const uptr kReadLock = 2; |
| 155 | |
| 156 | class Backoff { |
| 157 | public: |
| 158 | Backoff() |
| 159 | : iter_() { |
| 160 | } |
| 161 | |
| 162 | bool Do() { |
| 163 | if (iter_++ < kActiveSpinIters) |
| 164 | proc_yield(kActiveSpinCnt); |
| 165 | else |
Dmitry Vyukov | 15710c9 | 2012-05-22 11:33:03 +0000 | [diff] [blame^] | 166 | internal_yield(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 167 | return true; |
| 168 | } |
| 169 | |
| 170 | u64 Contention() const { |
| 171 | u64 active = iter_ % kActiveSpinIters; |
| 172 | u64 passive = iter_ - active; |
| 173 | return active + 10 * passive; |
| 174 | } |
| 175 | |
| 176 | private: |
| 177 | int iter_; |
| 178 | static const int kActiveSpinIters = 10; |
| 179 | static const int kActiveSpinCnt = 20; |
| 180 | }; |
| 181 | |
| 182 | Mutex::Mutex(MutexType type, StatType stat_type) { |
| 183 | CHECK_GT(type, MutexTypeInvalid); |
| 184 | CHECK_LT(type, MutexTypeCount); |
| 185 | #if TSAN_DEBUG |
| 186 | type_ = type; |
| 187 | #endif |
| 188 | #if TSAN_COLLECT_STATS |
| 189 | stat_type_ = stat_type; |
| 190 | #endif |
| 191 | atomic_store(&state_, kUnlocked, memory_order_relaxed); |
| 192 | } |
| 193 | |
| 194 | Mutex::~Mutex() { |
| 195 | CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); |
| 196 | } |
| 197 | |
| 198 | void Mutex::Lock() { |
| 199 | #if TSAN_DEBUG |
| 200 | cur_thread()->deadlock_detector.Lock(type_); |
| 201 | #endif |
| 202 | uptr cmp = kUnlocked; |
| 203 | if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, |
| 204 | memory_order_acquire)) |
| 205 | return; |
| 206 | for (Backoff backoff; backoff.Do();) { |
| 207 | if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) { |
| 208 | cmp = kUnlocked; |
| 209 | if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, |
| 210 | memory_order_acquire)) { |
| 211 | #if TSAN_COLLECT_STATS |
| 212 | StatInc(cur_thread(), stat_type_, backoff.Contention()); |
| 213 | #endif |
| 214 | return; |
| 215 | } |
| 216 | } |
| 217 | } |
| 218 | } |
| 219 | |
| 220 | void Mutex::Unlock() { |
| 221 | uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); |
| 222 | (void)prev; |
| 223 | DCHECK_NE(prev & kWriteLock, 0); |
| 224 | #if TSAN_DEBUG |
| 225 | cur_thread()->deadlock_detector.Unlock(type_); |
| 226 | #endif |
| 227 | } |
| 228 | |
| 229 | void Mutex::ReadLock() { |
| 230 | #if TSAN_DEBUG |
| 231 | cur_thread()->deadlock_detector.Lock(type_); |
| 232 | #endif |
| 233 | uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); |
| 234 | if ((prev & kWriteLock) == 0) |
| 235 | return; |
| 236 | for (Backoff backoff; backoff.Do();) { |
| 237 | prev = atomic_load(&state_, memory_order_acquire); |
| 238 | if ((prev & kWriteLock) == 0) { |
| 239 | #if TSAN_COLLECT_STATS |
| 240 | StatInc(cur_thread(), stat_type_, backoff.Contention()); |
| 241 | #endif |
| 242 | return; |
| 243 | } |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | void Mutex::ReadUnlock() { |
| 248 | uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); |
| 249 | (void)prev; |
| 250 | DCHECK_EQ(prev & kWriteLock, 0); |
| 251 | DCHECK_GT(prev & ~kWriteLock, 0); |
| 252 | #if TSAN_DEBUG |
| 253 | cur_thread()->deadlock_detector.Unlock(type_); |
| 254 | #endif |
| 255 | } |
| 256 | |
| 257 | Lock::Lock(Mutex *m) |
| 258 | : m_(m) { |
| 259 | m_->Lock(); |
| 260 | } |
| 261 | |
| 262 | Lock::~Lock() { |
| 263 | m_->Unlock(); |
| 264 | } |
| 265 | |
| 266 | ReadLock::ReadLock(Mutex *m) |
| 267 | : m_(m) { |
| 268 | m_->ReadLock(); |
| 269 | } |
| 270 | |
| 271 | ReadLock::~ReadLock() { |
| 272 | m_->ReadUnlock(); |
| 273 | } |
| 274 | |
| 275 | } // namespace __tsan |