Alexey Samsonov | 3b2f9f4 | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_mutex.cc -----------------------------------------------------===// |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
Alexey Samsonov | 58a3c58 | 2012-06-18 08:44:30 +0000 | [diff] [blame] | 13 | #include "sanitizer_common/sanitizer_libc.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 14 | #include "tsan_mutex.h" |
| 15 | #include "tsan_platform.h" |
| 16 | #include "tsan_rtl.h" |
| 17 | |
| 18 | namespace __tsan { |
| 19 | |
| 20 | // Simple reader-writer spin-mutex. Optimized for not-so-contended case. |
| 21 | // Readers have preference, can possibly starvate writers. |
| 22 | |
| 23 | // The table fixes what mutexes can be locked under what mutexes. |
| 24 | // E.g. if the row for MutexTypeThreads contains MutexTypeReport, |
| 25 | // then Report mutex can be locked while under Threads mutex. |
| 26 | // The leaf mutexes can be locked under any other mutexes. |
| 27 | // Recursive locking is not supported. |
| 28 | const MutexType MutexTypeLeaf = (MutexType)-1; |
| 29 | static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { |
| 30 | /*0 MutexTypeInvalid*/ {}, |
| 31 | /*1 MutexTypeTrace*/ {MutexTypeLeaf}, |
| 32 | /*2 MutexTypeThreads*/ {MutexTypeReport}, |
| 33 | /*3 MutexTypeReport*/ {}, |
| 34 | /*4 MutexTypeSyncVar*/ {}, |
| 35 | /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, |
| 36 | /*6 MutexTypeSlab*/ {MutexTypeLeaf}, |
| 37 | /*7 MutexTypeAnnotations*/ {}, |
| 38 | /*8 MutexTypeAtExit*/ {MutexTypeSyncTab}, |
| 39 | }; |
| 40 | |
| 41 | static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; |
| 42 | |
| 43 | void InitializeMutex() { |
| 44 | // Build the "can lock" adjacency matrix. |
| 45 | // If [i][j]==true, then one can lock mutex j while under mutex i. |
| 46 | const int N = MutexTypeCount; |
| 47 | int cnt[N] = {}; |
| 48 | bool leaf[N] = {}; |
| 49 | for (int i = 1; i < N; i++) { |
| 50 | for (int j = 0; j < N; j++) { |
Alexey Samsonov | 046248c | 2012-09-13 11:54:41 +0000 | [diff] [blame] | 51 | MutexType z = CanLockTab[i][j]; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 52 | if (z == MutexTypeInvalid) |
| 53 | continue; |
| 54 | if (z == MutexTypeLeaf) { |
| 55 | CHECK(!leaf[i]); |
| 56 | leaf[i] = true; |
| 57 | continue; |
| 58 | } |
Alexey Samsonov | 046248c | 2012-09-13 11:54:41 +0000 | [diff] [blame] | 59 | CHECK(!CanLockAdj[i][(int)z]); |
| 60 | CanLockAdj[i][(int)z] = true; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 61 | cnt[i]++; |
| 62 | } |
| 63 | } |
| 64 | for (int i = 0; i < N; i++) { |
| 65 | CHECK(!leaf[i] || cnt[i] == 0); |
| 66 | } |
| 67 | // Add leaf mutexes. |
| 68 | for (int i = 0; i < N; i++) { |
| 69 | if (!leaf[i]) |
| 70 | continue; |
| 71 | for (int j = 0; j < N; j++) { |
| 72 | if (i == j || leaf[j] || j == MutexTypeInvalid) |
| 73 | continue; |
| 74 | CHECK(!CanLockAdj[j][i]); |
| 75 | CanLockAdj[j][i] = true; |
| 76 | } |
| 77 | } |
| 78 | // Build the transitive closure. |
| 79 | bool CanLockAdj2[MutexTypeCount][MutexTypeCount]; |
| 80 | for (int i = 0; i < N; i++) { |
| 81 | for (int j = 0; j < N; j++) { |
| 82 | CanLockAdj2[i][j] = CanLockAdj[i][j]; |
| 83 | } |
| 84 | } |
| 85 | for (int k = 0; k < N; k++) { |
| 86 | for (int i = 0; i < N; i++) { |
| 87 | for (int j = 0; j < N; j++) { |
| 88 | if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) { |
| 89 | CanLockAdj2[i][j] = true; |
| 90 | } |
| 91 | } |
| 92 | } |
| 93 | } |
| 94 | #if 0 |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 95 | Printf("Can lock graph:\n"); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 96 | for (int i = 0; i < N; i++) { |
| 97 | for (int j = 0; j < N; j++) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 98 | Printf("%d ", CanLockAdj[i][j]); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 99 | } |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 100 | Printf("\n"); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 101 | } |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 102 | Printf("Can lock graph closure:\n"); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 103 | for (int i = 0; i < N; i++) { |
| 104 | for (int j = 0; j < N; j++) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 105 | Printf("%d ", CanLockAdj2[i][j]); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 106 | } |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 107 | Printf("\n"); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 108 | } |
| 109 | #endif |
| 110 | // Verify that the graph is acyclic. |
| 111 | for (int i = 0; i < N; i++) { |
| 112 | if (CanLockAdj2[i][i]) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 113 | Printf("Mutex %d participates in a cycle\n", i); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 114 | Die(); |
| 115 | } |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | DeadlockDetector::DeadlockDetector() { |
| 120 | // Rely on zero initialization because some mutexes can be locked before ctor. |
| 121 | } |
| 122 | |
| 123 | void DeadlockDetector::Lock(MutexType t) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 124 | // Printf("LOCK %d @%zu\n", t, seq_ + 1); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 125 | u64 max_seq = 0; |
| 126 | u64 max_idx = MutexTypeInvalid; |
| 127 | for (int i = 0; i != MutexTypeCount; i++) { |
| 128 | if (locked_[i] == 0) |
| 129 | continue; |
| 130 | CHECK_NE(locked_[i], max_seq); |
| 131 | if (max_seq < locked_[i]) { |
| 132 | max_seq = locked_[i]; |
| 133 | max_idx = i; |
| 134 | } |
| 135 | } |
| 136 | locked_[t] = ++seq_; |
| 137 | if (max_idx == MutexTypeInvalid) |
| 138 | return; |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 139 | // Printf(" last %d @%zu\n", max_idx, max_seq); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 140 | if (!CanLockAdj[max_idx][t]) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 141 | Printf("ThreadSanitizer: internal deadlock detected\n"); |
| 142 | Printf("ThreadSanitizer: can't lock %d while under %zu\n", |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 143 | t, (uptr)max_idx); |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 144 | CHECK(0); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 145 | } |
| 146 | } |
| 147 | |
| 148 | void DeadlockDetector::Unlock(MutexType t) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame^] | 149 | // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 150 | CHECK(locked_[t]); |
| 151 | locked_[t] = 0; |
| 152 | } |
| 153 | |
| 154 | const uptr kUnlocked = 0; |
| 155 | const uptr kWriteLock = 1; |
| 156 | const uptr kReadLock = 2; |
| 157 | |
| 158 | class Backoff { |
| 159 | public: |
| 160 | Backoff() |
| 161 | : iter_() { |
| 162 | } |
| 163 | |
| 164 | bool Do() { |
| 165 | if (iter_++ < kActiveSpinIters) |
| 166 | proc_yield(kActiveSpinCnt); |
| 167 | else |
Alexey Samsonov | 58a3c58 | 2012-06-18 08:44:30 +0000 | [diff] [blame] | 168 | internal_sched_yield(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 169 | return true; |
| 170 | } |
| 171 | |
| 172 | u64 Contention() const { |
| 173 | u64 active = iter_ % kActiveSpinIters; |
| 174 | u64 passive = iter_ - active; |
| 175 | return active + 10 * passive; |
| 176 | } |
| 177 | |
| 178 | private: |
| 179 | int iter_; |
| 180 | static const int kActiveSpinIters = 10; |
| 181 | static const int kActiveSpinCnt = 20; |
| 182 | }; |
| 183 | |
| 184 | Mutex::Mutex(MutexType type, StatType stat_type) { |
| 185 | CHECK_GT(type, MutexTypeInvalid); |
| 186 | CHECK_LT(type, MutexTypeCount); |
| 187 | #if TSAN_DEBUG |
| 188 | type_ = type; |
| 189 | #endif |
| 190 | #if TSAN_COLLECT_STATS |
| 191 | stat_type_ = stat_type; |
| 192 | #endif |
| 193 | atomic_store(&state_, kUnlocked, memory_order_relaxed); |
| 194 | } |
| 195 | |
| 196 | Mutex::~Mutex() { |
| 197 | CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); |
| 198 | } |
| 199 | |
| 200 | void Mutex::Lock() { |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 201 | #if TSAN_DEBUG && !TSAN_GO |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 202 | cur_thread()->deadlock_detector.Lock(type_); |
| 203 | #endif |
| 204 | uptr cmp = kUnlocked; |
| 205 | if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, |
| 206 | memory_order_acquire)) |
| 207 | return; |
| 208 | for (Backoff backoff; backoff.Do();) { |
| 209 | if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) { |
| 210 | cmp = kUnlocked; |
| 211 | if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, |
| 212 | memory_order_acquire)) { |
| 213 | #if TSAN_COLLECT_STATS |
| 214 | StatInc(cur_thread(), stat_type_, backoff.Contention()); |
| 215 | #endif |
| 216 | return; |
| 217 | } |
| 218 | } |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | void Mutex::Unlock() { |
| 223 | uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); |
| 224 | (void)prev; |
| 225 | DCHECK_NE(prev & kWriteLock, 0); |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 226 | #if TSAN_DEBUG && !TSAN_GO |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 227 | cur_thread()->deadlock_detector.Unlock(type_); |
| 228 | #endif |
| 229 | } |
| 230 | |
| 231 | void Mutex::ReadLock() { |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 232 | #if TSAN_DEBUG && !TSAN_GO |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 233 | cur_thread()->deadlock_detector.Lock(type_); |
| 234 | #endif |
| 235 | uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); |
| 236 | if ((prev & kWriteLock) == 0) |
| 237 | return; |
| 238 | for (Backoff backoff; backoff.Do();) { |
| 239 | prev = atomic_load(&state_, memory_order_acquire); |
| 240 | if ((prev & kWriteLock) == 0) { |
| 241 | #if TSAN_COLLECT_STATS |
| 242 | StatInc(cur_thread(), stat_type_, backoff.Contention()); |
| 243 | #endif |
| 244 | return; |
| 245 | } |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | void Mutex::ReadUnlock() { |
| 250 | uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); |
| 251 | (void)prev; |
| 252 | DCHECK_EQ(prev & kWriteLock, 0); |
| 253 | DCHECK_GT(prev & ~kWriteLock, 0); |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 254 | #if TSAN_DEBUG && !TSAN_GO |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 255 | cur_thread()->deadlock_detector.Unlock(type_); |
| 256 | #endif |
| 257 | } |
| 258 | |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 259 | void Mutex::CheckLocked() { |
| 260 | CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0); |
| 261 | } |
| 262 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 263 | } // namespace __tsan |