blob: 9dd24803b1836a705c8020f3def4b78412a64523 [file] [log] [blame]
Alexey Samsonov603c4be2012-06-04 13:55:19 +00001//===-- tsan_mutex.cc -----------------------------------------------------===//
Kostya Serebryany7ac41482012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov0969bcf2012-06-18 08:44:30 +000013#include "sanitizer_common/sanitizer_libc.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000014#include "tsan_mutex.h"
15#include "tsan_platform.h"
16#include "tsan_rtl.h"
17
18namespace __tsan {
19
20// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
21// Readers have preference, can possibly starvate writers.
22
23// The table fixes what mutexes can be locked under what mutexes.
24// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
25// then Report mutex can be locked while under Threads mutex.
26// The leaf mutexes can be locked under any other mutexes.
27// Recursive locking is not supported.
Stephen Hines86277eb2015-03-23 12:06:32 -070028#if SANITIZER_DEBUG && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +000029const MutexType MutexTypeLeaf = (MutexType)-1;
30static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
Dmitry Vyukovf4e4f932012-12-21 11:30:14 +000031 /*0 MutexTypeInvalid*/ {},
32 /*1 MutexTypeTrace*/ {MutexTypeLeaf},
33 /*2 MutexTypeThreads*/ {MutexTypeReport},
Stephen Hines6a211c52014-07-21 00:49:56 -070034 /*3 MutexTypeReport*/ {MutexTypeSyncVar,
Dmitry Vyukov94d8b442013-04-30 12:00:40 +000035 MutexTypeMBlock, MutexTypeJavaMBlock},
Stephen Hines2d1fdb22014-05-28 23:58:16 -070036 /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
Stephen Hines6a211c52014-07-21 00:49:56 -070037 /*5 MutexTypeSyncTab*/ {}, // unused
Dmitry Vyukovf4e4f932012-12-21 11:30:14 +000038 /*6 MutexTypeSlab*/ {MutexTypeLeaf},
39 /*7 MutexTypeAnnotations*/ {},
Stephen Hines6a211c52014-07-21 00:49:56 -070040 /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
Dmitry Vyukovf4e4f932012-12-21 11:30:14 +000041 /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
42 /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
Stephen Hines2d1fdb22014-05-28 23:58:16 -070043 /*11 MutexTypeDDetector*/ {},
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080044 /*12 MutexTypeFired*/ {MutexTypeLeaf},
45 /*13 MutexTypeRacy*/ {MutexTypeLeaf},
Kostya Serebryany7ac41482012-05-10 13:48:04 +000046};
47
48static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
Dmitry Vyukovd6223792012-12-13 09:22:11 +000049#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +000050
51void InitializeMutex() {
Stephen Hines86277eb2015-03-23 12:06:32 -070052#if SANITIZER_DEBUG && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +000053 // Build the "can lock" adjacency matrix.
54 // If [i][j]==true, then one can lock mutex j while under mutex i.
55 const int N = MutexTypeCount;
56 int cnt[N] = {};
57 bool leaf[N] = {};
58 for (int i = 1; i < N; i++) {
59 for (int j = 0; j < N; j++) {
Alexey Samsonov2135d8a2012-09-13 11:54:41 +000060 MutexType z = CanLockTab[i][j];
Kostya Serebryany7ac41482012-05-10 13:48:04 +000061 if (z == MutexTypeInvalid)
62 continue;
63 if (z == MutexTypeLeaf) {
64 CHECK(!leaf[i]);
65 leaf[i] = true;
66 continue;
67 }
Alexey Samsonov2135d8a2012-09-13 11:54:41 +000068 CHECK(!CanLockAdj[i][(int)z]);
69 CanLockAdj[i][(int)z] = true;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000070 cnt[i]++;
71 }
72 }
73 for (int i = 0; i < N; i++) {
74 CHECK(!leaf[i] || cnt[i] == 0);
75 }
76 // Add leaf mutexes.
77 for (int i = 0; i < N; i++) {
78 if (!leaf[i])
79 continue;
80 for (int j = 0; j < N; j++) {
81 if (i == j || leaf[j] || j == MutexTypeInvalid)
82 continue;
83 CHECK(!CanLockAdj[j][i]);
84 CanLockAdj[j][i] = true;
85 }
86 }
87 // Build the transitive closure.
88 bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
89 for (int i = 0; i < N; i++) {
90 for (int j = 0; j < N; j++) {
91 CanLockAdj2[i][j] = CanLockAdj[i][j];
92 }
93 }
94 for (int k = 0; k < N; k++) {
95 for (int i = 0; i < N; i++) {
96 for (int j = 0; j < N; j++) {
97 if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
98 CanLockAdj2[i][j] = true;
99 }
100 }
101 }
102 }
103#if 0
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000104 Printf("Can lock graph:\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000105 for (int i = 0; i < N; i++) {
106 for (int j = 0; j < N; j++) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000107 Printf("%d ", CanLockAdj[i][j]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000108 }
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000109 Printf("\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000110 }
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000111 Printf("Can lock graph closure:\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000112 for (int i = 0; i < N; i++) {
113 for (int j = 0; j < N; j++) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000114 Printf("%d ", CanLockAdj2[i][j]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000115 }
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000116 Printf("\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000117 }
118#endif
119 // Verify that the graph is acyclic.
120 for (int i = 0; i < N; i++) {
121 if (CanLockAdj2[i][i]) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000122 Printf("Mutex %d participates in a cycle\n", i);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000123 Die();
124 }
125 }
Dmitry Vyukovd6223792012-12-13 09:22:11 +0000126#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000127}
128
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700129InternalDeadlockDetector::InternalDeadlockDetector() {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000130 // Rely on zero initialization because some mutexes can be locked before ctor.
131}
132
Stephen Hines86277eb2015-03-23 12:06:32 -0700133#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700134void InternalDeadlockDetector::Lock(MutexType t) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000135 // Printf("LOCK %d @%zu\n", t, seq_ + 1);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000136 CHECK_GT(t, MutexTypeInvalid);
137 CHECK_LT(t, MutexTypeCount);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000138 u64 max_seq = 0;
139 u64 max_idx = MutexTypeInvalid;
140 for (int i = 0; i != MutexTypeCount; i++) {
141 if (locked_[i] == 0)
142 continue;
143 CHECK_NE(locked_[i], max_seq);
144 if (max_seq < locked_[i]) {
145 max_seq = locked_[i];
146 max_idx = i;
147 }
148 }
149 locked_[t] = ++seq_;
150 if (max_idx == MutexTypeInvalid)
151 return;
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000152 // Printf(" last %d @%zu\n", max_idx, max_seq);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000153 if (!CanLockAdj[max_idx][t]) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000154 Printf("ThreadSanitizer: internal deadlock detected\n");
155 Printf("ThreadSanitizer: can't lock %d while under %zu\n",
Alexey Samsonove9541012012-06-06 13:11:29 +0000156 t, (uptr)max_idx);
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000157 CHECK(0);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000158 }
159}
160
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700161void InternalDeadlockDetector::Unlock(MutexType t) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000162 // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000163 CHECK(locked_[t]);
164 locked_[t] = 0;
165}
Stephen Hines6a211c52014-07-21 00:49:56 -0700166
167void InternalDeadlockDetector::CheckNoLocks() {
168 for (int i = 0; i != MutexTypeCount; i++) {
169 CHECK_EQ(locked_[i], 0);
170 }
171}
Dmitry Vyukovd6223792012-12-13 09:22:11 +0000172#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000173
Stephen Hines6a211c52014-07-21 00:49:56 -0700174void CheckNoLocks(ThreadState *thr) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700175#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines6a211c52014-07-21 00:49:56 -0700176 thr->internal_deadlock_detector.CheckNoLocks();
177#endif
178}
179
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000180const uptr kUnlocked = 0;
181const uptr kWriteLock = 1;
182const uptr kReadLock = 2;
183
184class Backoff {
185 public:
186 Backoff()
187 : iter_() {
188 }
189
190 bool Do() {
191 if (iter_++ < kActiveSpinIters)
192 proc_yield(kActiveSpinCnt);
193 else
Alexey Samsonov0969bcf2012-06-18 08:44:30 +0000194 internal_sched_yield();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000195 return true;
196 }
197
198 u64 Contention() const {
199 u64 active = iter_ % kActiveSpinIters;
200 u64 passive = iter_ - active;
201 return active + 10 * passive;
202 }
203
204 private:
205 int iter_;
206 static const int kActiveSpinIters = 10;
207 static const int kActiveSpinCnt = 20;
208};
209
210Mutex::Mutex(MutexType type, StatType stat_type) {
211 CHECK_GT(type, MutexTypeInvalid);
212 CHECK_LT(type, MutexTypeCount);
Stephen Hines86277eb2015-03-23 12:06:32 -0700213#if SANITIZER_DEBUG
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000214 type_ = type;
215#endif
216#if TSAN_COLLECT_STATS
217 stat_type_ = stat_type;
218#endif
219 atomic_store(&state_, kUnlocked, memory_order_relaxed);
220}
221
222Mutex::~Mutex() {
223 CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
224}
225
226void Mutex::Lock() {
Stephen Hines86277eb2015-03-23 12:06:32 -0700227#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700228 cur_thread()->internal_deadlock_detector.Lock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000229#endif
230 uptr cmp = kUnlocked;
231 if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
232 memory_order_acquire))
233 return;
234 for (Backoff backoff; backoff.Do();) {
235 if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
236 cmp = kUnlocked;
237 if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
238 memory_order_acquire)) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700239#if TSAN_COLLECT_STATS && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000240 StatInc(cur_thread(), stat_type_, backoff.Contention());
241#endif
242 return;
243 }
244 }
245 }
246}
247
248void Mutex::Unlock() {
249 uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
250 (void)prev;
251 DCHECK_NE(prev & kWriteLock, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700252#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700253 cur_thread()->internal_deadlock_detector.Unlock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000254#endif
255}
256
257void Mutex::ReadLock() {
Stephen Hines86277eb2015-03-23 12:06:32 -0700258#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700259 cur_thread()->internal_deadlock_detector.Lock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000260#endif
261 uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
262 if ((prev & kWriteLock) == 0)
263 return;
264 for (Backoff backoff; backoff.Do();) {
265 prev = atomic_load(&state_, memory_order_acquire);
266 if ((prev & kWriteLock) == 0) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700267#if TSAN_COLLECT_STATS && !SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000268 StatInc(cur_thread(), stat_type_, backoff.Contention());
269#endif
270 return;
271 }
272 }
273}
274
275void Mutex::ReadUnlock() {
276 uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
277 (void)prev;
278 DCHECK_EQ(prev & kWriteLock, 0);
279 DCHECK_GT(prev & ~kWriteLock, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700280#if SANITIZER_DEBUG && !SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700281 cur_thread()->internal_deadlock_detector.Unlock(type_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000282#endif
283}
284
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000285void Mutex::CheckLocked() {
286 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
287}
288
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000289} // namespace __tsan