blob: b25346ef344f96522116c08547878691d5379c7f [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000020SyncVar::SyncVar(uptr addr, u64 uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000021 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000023 , uid(uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024 , owner_tid(kInvalidTid)
Dmitry Vyukov3482ec32012-08-16 15:08:49 +000025 , last_lock()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000026 , recursion()
27 , is_rw()
28 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000029 , is_broken()
30 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000031}
32
33SyncTab::Part::Part()
34 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
35 , val() {
36}
37
38SyncTab::SyncTab() {
39}
40
41SyncTab::~SyncTab() {
42 for (int i = 0; i < kPartCount; i++) {
43 while (tab_[i].val) {
44 SyncVar *tmp = tab_[i].val;
45 tab_[i].val = tmp->next;
46 DestroyAndFree(tmp);
47 }
48 }
49}
50
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000051SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
52 uptr addr, bool write_lock) {
53 return GetAndLock(thr, pc, addr, write_lock, true);
54}
55
56SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
57 return GetAndLock(0, 0, addr, write_lock, false);
58}
59
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000060SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
61 StatInc(thr, StatSyncCreated);
62 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
63 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
64 SyncVar *res = new(mem) SyncVar(addr, uid);
65#ifndef TSAN_GO
66 res->creation_stack.ObtainCurrent(thr, pc);
67#endif
68 return res;
69}
70
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000071SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000072 uptr addr, bool write_lock, bool create) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000073#ifndef TSAN_GO
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000074 { // NOLINT
75 SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
76 if (res)
77 return res;
78 }
79
Dmitry Vyukov44e1beae2012-12-18 14:44:44 +000080 // Here we ask only PrimaryAllocator, because
81 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
82 // the hashmap anyway.
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000083 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
84 MBlock *b = user_mblock(thr, (void*)addr);
85 Lock l(&b->mtx);
86 SyncVar *res = 0;
87 for (res = b->head; res; res = res->next) {
88 if (res->addr == addr)
89 break;
90 }
91 if (res == 0) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000092 if (!create)
93 return 0;
Dmitry Vyukov2547ac62012-12-20 17:29:34 +000094 res = Create(thr, pc, addr);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000095 res->next = b->head;
96 b->head = res;
97 }
98 if (write_lock)
99 res->mtx.Lock();
100 else
101 res->mtx.ReadLock();
102 return res;
103 }
104#endif
105
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000106 Part *p = &tab_[PartIdx(addr)];
107 {
108 ReadLock l(&p->mtx);
109 for (SyncVar *res = p->val; res; res = res->next) {
110 if (res->addr == addr) {
111 if (write_lock)
112 res->mtx.Lock();
113 else
114 res->mtx.ReadLock();
115 return res;
116 }
117 }
118 }
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000119 if (!create)
120 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000121 {
122 Lock l(&p->mtx);
123 SyncVar *res = p->val;
124 for (; res; res = res->next) {
125 if (res->addr == addr)
126 break;
127 }
128 if (res == 0) {
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000129 res = Create(thr, pc, addr);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000130 res->next = p->val;
131 p->val = res;
132 }
133 if (write_lock)
134 res->mtx.Lock();
135 else
136 res->mtx.ReadLock();
137 return res;
138 }
139}
140
141SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000142#ifndef TSAN_GO
Dmitry Vyukov2547ac62012-12-20 17:29:34 +0000143 { // NOLINT
144 SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
145 if (res)
146 return res;
147 }
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000148 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
149 MBlock *b = user_mblock(thr, (void*)addr);
150 SyncVar *res = 0;
151 {
152 Lock l(&b->mtx);
153 SyncVar **prev = &b->head;
154 res = *prev;
155 while (res) {
156 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000157 if (res->is_linker_init)
158 return 0;
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000159 *prev = res->next;
160 break;
161 }
162 prev = &res->next;
163 res = *prev;
164 }
165 }
166 if (res) {
167 StatInc(thr, StatSyncDestroyed);
168 res->mtx.Lock();
169 res->mtx.Unlock();
170 }
171 return res;
172 }
173#endif
174
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000175 Part *p = &tab_[PartIdx(addr)];
176 SyncVar *res = 0;
177 {
178 Lock l(&p->mtx);
179 SyncVar **prev = &p->val;
180 res = *prev;
181 while (res) {
182 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000183 if (res->is_linker_init)
184 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000185 *prev = res->next;
186 break;
187 }
188 prev = &res->next;
189 res = *prev;
190 }
191 }
192 if (res) {
193 StatInc(thr, StatSyncDestroyed);
194 res->mtx.Lock();
195 res->mtx.Unlock();
196 }
197 return res;
198}
199
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000200uptr SyncVar::GetMemoryConsumption() {
201 return sizeof(*this)
202 + clock.size() * sizeof(u64)
203 + read_clock.size() * sizeof(u64)
204 + creation_stack.Size() * sizeof(uptr);
205}
206
207uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
208 uptr mem = 0;
209 for (int i = 0; i < kPartCount; i++) {
210 Part *p = &tab_[i];
211 Lock l(&p->mtx);
212 for (SyncVar *s = p->val; s; s = s->next) {
213 *nsync += 1;
214 mem += s->GetMemoryConsumption();
215 }
216 }
217 return mem;
218}
219
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000220int SyncTab::PartIdx(uptr addr) {
221 return (addr >> 3) % kPartCount;
222}
223
224StackTrace::StackTrace()
225 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000226 , s_()
227 , c_() {
228}
229
230StackTrace::StackTrace(uptr *buf, uptr cnt)
231 : n_()
232 , s_(buf)
233 , c_(cnt) {
234 CHECK_NE(buf, 0);
235 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000236}
237
238StackTrace::~StackTrace() {
239 Reset();
240}
241
242void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000243 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000244 CHECK_NE(n_, 0);
245 internal_free(s_);
246 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000247 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000248 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000249}
250
251void StackTrace::Init(const uptr *pcs, uptr cnt) {
252 Reset();
253 if (cnt == 0)
254 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000255 if (c_) {
256 CHECK_NE(s_, 0);
257 CHECK_LE(cnt, c_);
258 } else {
259 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
260 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000261 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000262 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000263}
264
265void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
266 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000267 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000268 if (n_ + !!toppc == 0)
269 return;
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000270 uptr start = 0;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000271 if (c_) {
272 CHECK_NE(s_, 0);
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000273 if (n_ + !!toppc > c_) {
274 start = n_ - c_ + !!toppc;
275 n_ = c_ - !!toppc;
276 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000277 } else {
278 s_ = (uptr*)internal_alloc(MBlockStackTrace,
279 (n_ + !!toppc) * sizeof(s_[0]));
280 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000281 for (uptr i = 0; i < n_; i++)
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000282 s_[i] = thr->shadow_stack[start + i];
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000283 if (toppc) {
284 s_[n_] = toppc;
285 n_++;
286 }
287}
288
289void StackTrace::CopyFrom(const StackTrace& other) {
290 Reset();
291 Init(other.Begin(), other.Size());
292}
293
294bool StackTrace::IsEmpty() const {
295 return n_ == 0;
296}
297
298uptr StackTrace::Size() const {
299 return n_;
300}
301
302uptr StackTrace::Get(uptr i) const {
303 CHECK_LT(i, n_);
304 return s_[i];
305}
306
307const uptr *StackTrace::Begin() const {
308 return s_;
309}
310
311} // namespace __tsan