blob: dc5a462a8081a88236285b7a32d260af3c372956 [file] [log] [blame]
Alexey Samsonov603c4be2012-06-04 13:55:19 +00001//===-- tsan_mutex.cc -----------------------------------------------------===//
Kostya Serebryany7ac41482012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov0969bcf2012-06-18 08:44:30 +000013#include "sanitizer_common/sanitizer_libc.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000014#include "tsan_mutex.h"
15#include "tsan_platform.h"
16#include "tsan_rtl.h"
17
18namespace __tsan {
19
20// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
21// Readers have preference, can possibly starvate writers.
22
23// The table fixes what mutexes can be locked under what mutexes.
24// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
25// then Report mutex can be locked while under Threads mutex.
26// The leaf mutexes can be locked under any other mutexes.
27// Recursive locking is not supported.
Stephen Hines86277eb2015-03-23 12:06:32 -070028#if SANITIZER_DEBUG && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +000029const MutexType MutexTypeLeaf = (MutexType)-1;
30static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
Dmitry Vyukovf4e4f932012-12-21 11:30:14 +000031 /*0 MutexTypeInvalid*/ {},
32 /*1 MutexTypeTrace*/ {MutexTypeLeaf},
33 /*2 MutexTypeThreads*/ {MutexTypeReport},
Stephen Hines6a211c52014-07-21 00:49:56 -070034 /*3 MutexTypeReport*/ {MutexTypeSyncVar,
Dmitry Vyukov94d8b442013-04-30 12:00:40 +000035 MutexTypeMBlock, MutexTypeJavaMBlock},
Stephen Hines2d1fdb22014-05-28 23:58:16 -070036 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
Stephen Hines6a211c52014-07-21 00:49:56 -070037 /*5 MutexTypeSyncTab*/ {}, // unused
Dmitry Vyukovf4e4f932012-12-21 11:30:14 +000038 /*6 MutexTypeSlab*/ {MutexTypeLeaf},
39 /*7 MutexTypeAnnotations*/ {},
Stephen Hines6a211c52014-07-21 00:49:56 -070040 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
Dmitry Vyukovf4e4f932012-12-21 11:30:14 +000041 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
42 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
Stephen Hines2d1fdb22014-05-28 23:58:16 -070043 /*11 MutexTypeDDetector*/ {},
Kostya Serebryany7ac41482012-05-10 13:48:04 +000044};
45
46static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
Dmitry Vyukovd6223792012-12-13 09:22:11 +000047#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +000048
49void InitializeMutex() {
Stephen Hines86277eb2015-03-23 12:06:32 -070050#if SANITIZER_DEBUG && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +000051 // Build the "can lock" adjacency matrix.
52 // If [i][j]==true, then one can lock mutex j while under mutex i.
53 const int N = MutexTypeCount;
54 int cnt[N] = {};
55 bool leaf[N] = {};
56 for (int i = 1; i < N; i++) {
57 for (int j = 0; j < N; j++) {
Alexey Samsonov2135d8a2012-09-13 11:54:41 +000058 MutexType z = CanLockTab[i][j];
Kostya Serebryany7ac41482012-05-10 13:48:04 +000059 if (z == MutexTypeInvalid)
60 continue;
61 if (z == MutexTypeLeaf) {
62 CHECK(!leaf[i]);
63 leaf[i] = true;
64 continue;
65 }
Alexey Samsonov2135d8a2012-09-13 11:54:41 +000066 CHECK(!CanLockAdj[i][(int)z]);
67 CanLockAdj[i][(int)z] = true;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000068 cnt[i]++;
69 }
70 }
71 for (int i = 0; i < N; i++) {
72 CHECK(!leaf[i] || cnt[i] == 0);
73 }
74 // Add leaf mutexes.
75 for (int i = 0; i < N; i++) {
76 if (!leaf[i])
77 continue;
78 for (int j = 0; j < N; j++) {
79 if (i == j || leaf[j] || j == MutexTypeInvalid)
80 continue;
81 CHECK(!CanLockAdj[j][i]);
82 CanLockAdj[j][i] = true;
83 }
84 }
85 // Build the transitive closure.
86 bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
87 for (int i = 0; i < N; i++) {
88 for (int j = 0; j < N; j++) {
89 CanLockAdj2[i][j] = CanLockAdj[i][j];
90 }
91 }
92 for (int k = 0; k < N; k++) {
93 for (int i = 0; i < N; i++) {
94 for (int j = 0; j < N; j++) {
95 if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
96 CanLockAdj2[i][j] = true;
97 }
98 }
99 }
100 }
101#if 0
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000102 Printf("Can lock graph:\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000103 for (int i = 0; i < N; i++) {
104 for (int j = 0; j < N; j++) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000105 Printf("%d ", CanLockAdj[i][j]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000106 }
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000107 Printf("\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000108 }
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000109 Printf("Can lock graph closure:\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000110 for (int i = 0; i < N; i++) {
111 for (int j = 0; j < N; j++) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000112 Printf("%d ", CanLockAdj2[i][j]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000113 }
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000114 Printf("\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000115 }
116#endif
117 // Verify that the graph is acyclic.
118 for (int i = 0; i < N; i++) {
119 if (CanLockAdj2[i][i]) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000120 Printf("Mutex %d participates in a cycle\n", i);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000121 Die();
122 }
123 }
Dmitry Vyukovd6223792012-12-13 09:22:11 +0000124#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000125}
126
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700127InternalDeadlockDetector::InternalDeadlockDetector() {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000128 // Rely on zero initialization because some mutexes can be locked before ctor.
129}
130
Stephen Hines86277eb2015-03-23 12:06:32 -0700131#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700132void InternalDeadlockDetector::Lock(MutexType t) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000133 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000134 CHECK_GT(t, MutexTypeInvalid);
135 CHECK_LT(t, MutexTypeCount);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000136 u64 max_seq = 0;
137 u64 max_idx = MutexTypeInvalid;
138 for (int i = 0; i != MutexTypeCount; i++) {
139 if (locked_[i] == 0)
140 continue;
141 CHECK_NE(locked_[i], max_seq);
142 if (max_seq < locked_[i]) {
143 max_seq = locked_[i];
144 max_idx = i;
145 }
146 }
147 locked_[t] = ++seq_;
148 if (max_idx == MutexTypeInvalid)
149 return;
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000150 // Printf(" last %d @%zu\n", max_idx, max_seq);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000151 if (!CanLockAdj[max_idx][t]) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000152 Printf("ThreadSanitizer: internal deadlock detected\n");
153 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
Alexey Samsonove9541012012-06-06 13:11:29 +0000154 t, (uptr)max_idx);
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000155 CHECK(0);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000156 }
157}
158
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700159void InternalDeadlockDetector::Unlock(MutexType t) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000160 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000161 CHECK(locked_[t]);
162 locked_[t] = 0;
163}
Stephen Hines6a211c52014-07-21 00:49:56 -0700164
165void InternalDeadlockDetector::CheckNoLocks() {
166 for (int i = 0; i != MutexTypeCount; i++) {
167 CHECK_EQ(locked_[i], 0);
168 }
169}
Dmitry Vyukovd6223792012-12-13 09:22:11 +0000170#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000171
Stephen Hines6a211c52014-07-21 00:49:56 -0700172void CheckNoLocks(ThreadState *thr) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700173#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines6a211c52014-07-21 00:49:56 -0700174 thr->internal_deadlock_detector.CheckNoLocks();
175#endif
176}
177
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000178const uptr kUnlocked = 0;
179const uptr kWriteLock = 1;
180const uptr kReadLock = 2;
181
182class Backoff {
183 public:
184 Backoff()
185 : iter_() {
186 }
187
188 bool Do() {
189 if (iter_++ < kActiveSpinIters)
190 proc_yield(kActiveSpinCnt);
191 else
Alexey Samsonov0969bcf2012-06-18 08:44:30 +0000192 internal_sched_yield();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000193 return true;
194 }
195
196 u64 Contention() const {
197 u64 active = iter_ % kActiveSpinIters;
198 u64 passive = iter_ - active;
199 return active + 10 * passive;
200 }
201
202 private:
203 int iter_;
204 static const int kActiveSpinIters = 10;
205 static const int kActiveSpinCnt = 20;
206};
207
208Mutex::Mutex(MutexType type, StatType stat_type) {
209 CHECK_GT(type, MutexTypeInvalid);
210 CHECK_LT(type, MutexTypeCount);
Stephen Hines86277eb2015-03-23 12:06:32 -0700211#if SANITIZER_DEBUG
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000212 type_ = type;
213#endif
214#if TSAN_COLLECT_STATS
215 stat_type_ = stat_type;
216#endif
217 atomic_store(&state_, kUnlocked, memory_order_relaxed);
218}
219
220Mutex::~Mutex() {
221 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
222}
223
224void Mutex::Lock() {
Stephen Hines86277eb2015-03-23 12:06:32 -0700225#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700226 cur_thread()->internal_deadlock_detector.Lock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000227#endif
228 uptr cmp = kUnlocked;
229 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
230 memory_order_acquire))
231 return;
232 for (Backoff backoff; backoff.Do();) {
233 if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
234 cmp = kUnlocked;
235 if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
236 memory_order_acquire)) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700237#if TSAN_COLLECT_STATS && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000238 StatInc(cur_thread(), stat_type_, backoff.Contention());
239#endif
240 return;
241 }
242 }
243 }
244}
245
246void Mutex::Unlock() {
247 uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
248 (void)prev;
249 DCHECK_NE(prev & kWriteLock, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700250#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700251 cur_thread()->internal_deadlock_detector.Unlock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000252#endif
253}
254
255void Mutex::ReadLock() {
Stephen Hines86277eb2015-03-23 12:06:32 -0700256#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700257 cur_thread()->internal_deadlock_detector.Lock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000258#endif
259 uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
260 if ((prev & kWriteLock) == 0)
261 return;
262 for (Backoff backoff; backoff.Do();) {
263 prev = atomic_load(&state_, memory_order_acquire);
264 if ((prev & kWriteLock) == 0) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700265#if TSAN_COLLECT_STATS && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000266 StatInc(cur_thread(), stat_type_, backoff.Contention());
267#endif
268 return;
269 }
270 }
271}
272
273void Mutex::ReadUnlock() {
274 uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
275 (void)prev;
276 DCHECK_EQ(prev & kWriteLock, 0);
277 DCHECK_GT(prev & ~kWriteLock, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700278#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700279 cur_thread()->internal_deadlock_detector.Unlock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000280#endif
281}
282
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000283void Mutex::CheckLocked() {
284 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
285}
286
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000287} // namespace __tsan