blob: 1fe72c819dd7b63c3ff77f7a2fed5a0b984aaf69 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000020SyncVar::SyncVar(uptr addr, u64 uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000021 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000023 , uid(uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024 , owner_tid(kInvalidTid)
Dmitry Vyukov3482ec32012-08-16 15:08:49 +000025 , last_lock()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000026 , recursion()
27 , is_rw()
28 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000029 , is_broken()
30 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000031}
32
33SyncTab::Part::Part()
34 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
35 , val() {
36}
37
38SyncTab::SyncTab() {
39}
40
41SyncTab::~SyncTab() {
42 for (int i = 0; i < kPartCount; i++) {
43 while (tab_[i].val) {
44 SyncVar *tmp = tab_[i].val;
45 tab_[i].val = tmp->next;
46 DestroyAndFree(tmp);
47 }
48 }
49}
50
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000051SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
52 uptr addr, bool write_lock) {
53 return GetAndLock(thr, pc, addr, write_lock, true);
54}
55
56SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
57 return GetAndLock(0, 0, addr, write_lock, false);
58}
59
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000060SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov6cfab722014-02-28 10:48:13 +000061 Context *ctx = CTX();
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000062 StatInc(thr, StatSyncCreated);
63 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
64 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
65 SyncVar *res = new(mem) SyncVar(addr, uid);
66#ifndef TSAN_GO
Dmitry Vyukova2216202013-03-18 08:27:47 +000067 res->creation_stack_id = CurrentStackId(thr, pc);
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000068#endif
Dmitry Vyukov6cfab722014-02-28 10:48:13 +000069 if (flags()->detect_deadlocks)
70 ctx->dd->MutexInit(&res->dd, res->creation_stack_id, res->GetId());
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000071 return res;
72}
73
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000074SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000075 uptr addr, bool write_lock, bool create) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000076#ifndef TSAN_GO
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000077 { // NOLINT
78 SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
79 if (res)
80 return res;
81 }
82
Dmitry Vyukov44e1beae2012-12-18 14:44:44 +000083 // Here we ask only PrimaryAllocator, because
84 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
85 // the hashmap anyway.
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000086 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
87 MBlock *b = user_mblock(thr, (void*)addr);
Dmitry Vyukov54d9c812013-04-24 09:20:25 +000088 CHECK_NE(b, 0);
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +000089 MBlock::ScopedLock l(b);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000090 SyncVar *res = 0;
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +000091 for (res = b->ListHead(); res; res = res->next) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000092 if (res->addr == addr)
93 break;
94 }
95 if (res == 0) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000096 if (!create)
97 return 0;
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000098 res = Create(thr, pc, addr);
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +000099 b->ListPush(res);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000100 }
101 if (write_lock)
102 res->mtx.Lock();
103 else
104 res->mtx.ReadLock();
105 return res;
106 }
107#endif
108
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000109 Part *p = &tab_[PartIdx(addr)];
110 {
111 ReadLock l(&p->mtx);
112 for (SyncVar *res = p->val; res; res = res->next) {
113 if (res->addr == addr) {
114 if (write_lock)
115 res->mtx.Lock();
116 else
117 res->mtx.ReadLock();
118 return res;
119 }
120 }
121 }
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000122 if (!create)
123 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000124 {
125 Lock l(&p->mtx);
126 SyncVar *res = p->val;
127 for (; res; res = res->next) {
128 if (res->addr == addr)
129 break;
130 }
131 if (res == 0) {
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000132 res = Create(thr, pc, addr);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000133 res->next = p->val;
134 p->val = res;
135 }
136 if (write_lock)
137 res->mtx.Lock();
138 else
139 res->mtx.ReadLock();
140 return res;
141 }
142}
143
144SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000145#ifndef TSAN_GO
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000146 { // NOLINT
147 SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
148 if (res)
149 return res;
150 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000151 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
152 MBlock *b = user_mblock(thr, (void*)addr);
Dmitry Vyukov54d9c812013-04-24 09:20:25 +0000153 CHECK_NE(b, 0);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000154 SyncVar *res = 0;
155 {
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000156 MBlock::ScopedLock l(b);
157 res = b->ListHead();
158 if (res) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000159 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000160 if (res->is_linker_init)
161 return 0;
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000162 b->ListPop();
163 } else {
164 SyncVar **prev = &res->next;
165 res = *prev;
166 while (res) {
167 if (res->addr == addr) {
168 if (res->is_linker_init)
169 return 0;
170 *prev = res->next;
171 break;
172 }
173 prev = &res->next;
174 res = *prev;
175 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000176 }
Dmitry Vyukov4ddd37b2013-03-18 19:47:36 +0000177 if (res) {
178 StatInc(thr, StatSyncDestroyed);
179 res->mtx.Lock();
180 res->mtx.Unlock();
181 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000182 }
183 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000184 return res;
185 }
186#endif
187
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000188 Part *p = &tab_[PartIdx(addr)];
189 SyncVar *res = 0;
190 {
191 Lock l(&p->mtx);
192 SyncVar **prev = &p->val;
193 res = *prev;
194 while (res) {
195 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000196 if (res->is_linker_init)
197 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000198 *prev = res->next;
199 break;
200 }
201 prev = &res->next;
202 res = *prev;
203 }
204 }
205 if (res) {
206 StatInc(thr, StatSyncDestroyed);
207 res->mtx.Lock();
208 res->mtx.Unlock();
209 }
210 return res;
211}
212
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000213int SyncTab::PartIdx(uptr addr) {
214 return (addr >> 3) % kPartCount;
215}
216
217StackTrace::StackTrace()
218 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000219 , s_()
220 , c_() {
221}
222
223StackTrace::StackTrace(uptr *buf, uptr cnt)
224 : n_()
225 , s_(buf)
226 , c_(cnt) {
227 CHECK_NE(buf, 0);
228 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000229}
230
231StackTrace::~StackTrace() {
232 Reset();
233}
234
235void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000236 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000237 CHECK_NE(n_, 0);
238 internal_free(s_);
239 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000240 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000241 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000242}
243
244void StackTrace::Init(const uptr *pcs, uptr cnt) {
245 Reset();
246 if (cnt == 0)
247 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000248 if (c_) {
249 CHECK_NE(s_, 0);
250 CHECK_LE(cnt, c_);
251 } else {
252 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
253 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000254 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000255 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000256}
257
258void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
259 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000260 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000261 if (n_ + !!toppc == 0)
262 return;
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000263 uptr start = 0;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000264 if (c_) {
265 CHECK_NE(s_, 0);
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000266 if (n_ + !!toppc > c_) {
267 start = n_ - c_ + !!toppc;
268 n_ = c_ - !!toppc;
269 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000270 } else {
Dmitry Vyukov464ebbd2013-10-16 15:35:12 +0000271 // Cap potentially huge stacks.
272 if (n_ + !!toppc > kTraceStackSize) {
273 start = n_ - kTraceStackSize + !!toppc;
274 n_ = kTraceStackSize - !!toppc;
275 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000276 s_ = (uptr*)internal_alloc(MBlockStackTrace,
277 (n_ + !!toppc) * sizeof(s_[0]));
278 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000279 for (uptr i = 0; i < n_; i++)
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000280 s_[i] = thr->shadow_stack[start + i];
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000281 if (toppc) {
282 s_[n_] = toppc;
283 n_++;
284 }
285}
286
287void StackTrace::CopyFrom(const StackTrace& other) {
288 Reset();
289 Init(other.Begin(), other.Size());
290}
291
292bool StackTrace::IsEmpty() const {
293 return n_ == 0;
294}
295
296uptr StackTrace::Size() const {
297 return n_;
298}
299
300uptr StackTrace::Get(uptr i) const {
301 CHECK_LT(i, n_);
302 return s_[i];
303}
304
305const uptr *StackTrace::Begin() const {
306 return s_;
307}
308
309} // namespace __tsan