blob: 5d71f9ff4aa788b0ade0d56ce558a7a7c219338e [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukov9b410fb2014-03-05 13:41:21 +000020void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000022SyncVar::SyncVar(uptr addr, u64 uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000023 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
24 , addr(addr)
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000025 , uid(uid)
Dmitry Vyukov6e255772014-03-20 10:19:02 +000026 , creation_stack_id()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000027 , owner_tid(kInvalidTid)
Dmitry Vyukov3482ec32012-08-16 15:08:49 +000028 , last_lock()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000029 , recursion()
30 , is_rw()
31 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000032 , is_broken()
33 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000034}
35
36SyncTab::Part::Part()
37 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
38 , val() {
39}
40
41SyncTab::SyncTab() {
42}
43
44SyncTab::~SyncTab() {
45 for (int i = 0; i < kPartCount; i++) {
46 while (tab_[i].val) {
47 SyncVar *tmp = tab_[i].val;
48 tab_[i].val = tmp->next;
49 DestroyAndFree(tmp);
50 }
51 }
52}
53
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000054SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
55 uptr addr, bool write_lock) {
56 return GetAndLock(thr, pc, addr, write_lock, true);
57}
58
59SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
60 return GetAndLock(0, 0, addr, write_lock, false);
61}
62
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000063SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
64 StatInc(thr, StatSyncCreated);
65 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
66 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
67 SyncVar *res = new(mem) SyncVar(addr, uid);
Dmitry Vyukov6e255772014-03-20 10:19:02 +000068 res->creation_stack_id = 0;
69 if (!kGoMode) // Go does not use them
70 res->creation_stack_id = CurrentStackId(thr, pc);
Dmitry Vyukov6cfab722014-02-28 10:48:13 +000071 if (flags()->detect_deadlocks)
Dmitry Vyukov9b410fb2014-03-05 13:41:21 +000072 DDMutexInit(thr, pc, res);
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000073 return res;
74}
75
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000076SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000077 uptr addr, bool write_lock, bool create) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000078#ifndef TSAN_GO
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000079 { // NOLINT
80 SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
81 if (res)
82 return res;
83 }
84
Dmitry Vyukov44e1beae2012-12-18 14:44:44 +000085 // Here we ask only PrimaryAllocator, because
86 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
87 // the hashmap anyway.
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000088 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
89 MBlock *b = user_mblock(thr, (void*)addr);
Dmitry Vyukov54d9c812013-04-24 09:20:25 +000090 CHECK_NE(b, 0);
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +000091 MBlock::ScopedLock l(b);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000092 SyncVar *res = 0;
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +000093 for (res = b->ListHead(); res; res = res->next) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000094 if (res->addr == addr)
95 break;
96 }
97 if (res == 0) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000098 if (!create)
99 return 0;
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000100 res = Create(thr, pc, addr);
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000101 b->ListPush(res);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000102 }
103 if (write_lock)
104 res->mtx.Lock();
105 else
106 res->mtx.ReadLock();
107 return res;
108 }
109#endif
110
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000111 Part *p = &tab_[PartIdx(addr)];
112 {
113 ReadLock l(&p->mtx);
114 for (SyncVar *res = p->val; res; res = res->next) {
115 if (res->addr == addr) {
116 if (write_lock)
117 res->mtx.Lock();
118 else
119 res->mtx.ReadLock();
120 return res;
121 }
122 }
123 }
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000124 if (!create)
125 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000126 {
127 Lock l(&p->mtx);
128 SyncVar *res = p->val;
129 for (; res; res = res->next) {
130 if (res->addr == addr)
131 break;
132 }
133 if (res == 0) {
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000134 res = Create(thr, pc, addr);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000135 res->next = p->val;
136 p->val = res;
137 }
138 if (write_lock)
139 res->mtx.Lock();
140 else
141 res->mtx.ReadLock();
142 return res;
143 }
144}
145
146SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000147#ifndef TSAN_GO
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000148 { // NOLINT
149 SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
150 if (res)
151 return res;
152 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000153 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
154 MBlock *b = user_mblock(thr, (void*)addr);
Dmitry Vyukov54d9c812013-04-24 09:20:25 +0000155 CHECK_NE(b, 0);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000156 SyncVar *res = 0;
157 {
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000158 MBlock::ScopedLock l(b);
159 res = b->ListHead();
160 if (res) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000161 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000162 if (res->is_linker_init)
163 return 0;
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000164 b->ListPop();
165 } else {
166 SyncVar **prev = &res->next;
167 res = *prev;
168 while (res) {
169 if (res->addr == addr) {
170 if (res->is_linker_init)
171 return 0;
172 *prev = res->next;
173 break;
174 }
175 prev = &res->next;
176 res = *prev;
177 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000178 }
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000179 if (res) {
180 StatInc(thr, StatSyncDestroyed);
181 res->mtx.Lock();
182 res->mtx.Unlock();
183 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000184 }
185 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000186 return res;
187 }
188#endif
189
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000190 Part *p = &tab_[PartIdx(addr)];
191 SyncVar *res = 0;
192 {
193 Lock l(&p->mtx);
194 SyncVar **prev = &p->val;
195 res = *prev;
196 while (res) {
197 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000198 if (res->is_linker_init)
199 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000200 *prev = res->next;
201 break;
202 }
203 prev = &res->next;
204 res = *prev;
205 }
206 }
207 if (res) {
208 StatInc(thr, StatSyncDestroyed);
209 res->mtx.Lock();
210 res->mtx.Unlock();
211 }
212 return res;
213}
214
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000215int SyncTab::PartIdx(uptr addr) {
216 return (addr >> 3) % kPartCount;
217}
218
219StackTrace::StackTrace()
220 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000221 , s_()
222 , c_() {
223}
224
225StackTrace::StackTrace(uptr *buf, uptr cnt)
226 : n_()
227 , s_(buf)
228 , c_(cnt) {
229 CHECK_NE(buf, 0);
230 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000231}
232
233StackTrace::~StackTrace() {
234 Reset();
235}
236
237void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000238 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000239 CHECK_NE(n_, 0);
240 internal_free(s_);
241 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000242 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000243 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000244}
245
246void StackTrace::Init(const uptr *pcs, uptr cnt) {
247 Reset();
248 if (cnt == 0)
249 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000250 if (c_) {
251 CHECK_NE(s_, 0);
252 CHECK_LE(cnt, c_);
253 } else {
254 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
255 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000256 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000257 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000258}
259
260void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
261 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000262 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000263 if (n_ + !!toppc == 0)
264 return;
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000265 uptr start = 0;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000266 if (c_) {
267 CHECK_NE(s_, 0);
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000268 if (n_ + !!toppc > c_) {
269 start = n_ - c_ + !!toppc;
270 n_ = c_ - !!toppc;
271 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000272 } else {
Dmitry Vyukov464ebbd2013-10-16 15:35:12 +0000273 // Cap potentially huge stacks.
274 if (n_ + !!toppc > kTraceStackSize) {
275 start = n_ - kTraceStackSize + !!toppc;
276 n_ = kTraceStackSize - !!toppc;
277 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000278 s_ = (uptr*)internal_alloc(MBlockStackTrace,
279 (n_ + !!toppc) * sizeof(s_[0]));
280 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000281 for (uptr i = 0; i < n_; i++)
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000282 s_[i] = thr->shadow_stack[start + i];
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000283 if (toppc) {
284 s_[n_] = toppc;
285 n_++;
286 }
287}
288
289void StackTrace::CopyFrom(const StackTrace& other) {
290 Reset();
291 Init(other.Begin(), other.Size());
292}
293
294bool StackTrace::IsEmpty() const {
295 return n_ == 0;
296}
297
298uptr StackTrace::Size() const {
299 return n_;
300}
301
302uptr StackTrace::Get(uptr i) const {
303 CHECK_LT(i, n_);
304 return s_[i];
305}
306
307const uptr *StackTrace::Begin() const {
308 return s_;
309}
310
311} // namespace __tsan