blob: 8951325462e8e15f8daad291acf45449fd602048 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000020SyncVar::SyncVar(uptr addr, u64 uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000021 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000023 , uid(uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024 , owner_tid(kInvalidTid)
Dmitry Vyukov3482ec32012-08-16 15:08:49 +000025 , last_lock()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000026 , recursion()
27 , is_rw()
28 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000029 , is_broken()
30 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000031}
32
33SyncTab::Part::Part()
34 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
35 , val() {
36}
37
38SyncTab::SyncTab() {
39}
40
41SyncTab::~SyncTab() {
42 for (int i = 0; i < kPartCount; i++) {
43 while (tab_[i].val) {
44 SyncVar *tmp = tab_[i].val;
45 tab_[i].val = tmp->next;
46 DestroyAndFree(tmp);
47 }
48 }
49}
50
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000051SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
52 uptr addr, bool write_lock) {
53 return GetAndLock(thr, pc, addr, write_lock, true);
54}
55
56SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
57 return GetAndLock(0, 0, addr, write_lock, false);
58}
59
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000060SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000061 uptr addr, bool write_lock, bool create) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000062#ifndef TSAN_GO
Dmitry Vyukov44e1beae2012-12-18 14:44:44 +000063 // Here we ask only PrimaryAllocator, because
64 // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
65 // the hashmap anyway.
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000066 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
67 MBlock *b = user_mblock(thr, (void*)addr);
68 Lock l(&b->mtx);
69 SyncVar *res = 0;
70 for (res = b->head; res; res = res->next) {
71 if (res->addr == addr)
72 break;
73 }
74 if (res == 0) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000075 if (!create)
76 return 0;
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000077 StatInc(thr, StatSyncCreated);
78 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000079 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
80 res = new(mem) SyncVar(addr, uid);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000081 res->creation_stack.ObtainCurrent(thr, pc);
82 res->next = b->head;
83 b->head = res;
84 }
85 if (write_lock)
86 res->mtx.Lock();
87 else
88 res->mtx.ReadLock();
89 return res;
90 }
91#endif
92
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000093 Part *p = &tab_[PartIdx(addr)];
94 {
95 ReadLock l(&p->mtx);
96 for (SyncVar *res = p->val; res; res = res->next) {
97 if (res->addr == addr) {
98 if (write_lock)
99 res->mtx.Lock();
100 else
101 res->mtx.ReadLock();
102 return res;
103 }
104 }
105 }
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000106 if (!create)
107 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000108 {
109 Lock l(&p->mtx);
110 SyncVar *res = p->val;
111 for (; res; res = res->next) {
112 if (res->addr == addr)
113 break;
114 }
115 if (res == 0) {
116 StatInc(thr, StatSyncCreated);
117 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000118 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
119 res = new(mem) SyncVar(addr, uid);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000120#ifndef TSAN_GO
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000121 res->creation_stack.ObtainCurrent(thr, pc);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000122#endif
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000123 res->next = p->val;
124 p->val = res;
125 }
126 if (write_lock)
127 res->mtx.Lock();
128 else
129 res->mtx.ReadLock();
130 return res;
131 }
132}
133
134SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000135#ifndef TSAN_GO
136 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
137 MBlock *b = user_mblock(thr, (void*)addr);
138 SyncVar *res = 0;
139 {
140 Lock l(&b->mtx);
141 SyncVar **prev = &b->head;
142 res = *prev;
143 while (res) {
144 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000145 if (res->is_linker_init)
146 return 0;
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000147 *prev = res->next;
148 break;
149 }
150 prev = &res->next;
151 res = *prev;
152 }
153 }
154 if (res) {
155 StatInc(thr, StatSyncDestroyed);
156 res->mtx.Lock();
157 res->mtx.Unlock();
158 }
159 return res;
160 }
161#endif
162
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000163 Part *p = &tab_[PartIdx(addr)];
164 SyncVar *res = 0;
165 {
166 Lock l(&p->mtx);
167 SyncVar **prev = &p->val;
168 res = *prev;
169 while (res) {
170 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000171 if (res->is_linker_init)
172 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000173 *prev = res->next;
174 break;
175 }
176 prev = &res->next;
177 res = *prev;
178 }
179 }
180 if (res) {
181 StatInc(thr, StatSyncDestroyed);
182 res->mtx.Lock();
183 res->mtx.Unlock();
184 }
185 return res;
186}
187
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000188uptr SyncVar::GetMemoryConsumption() {
189 return sizeof(*this)
190 + clock.size() * sizeof(u64)
191 + read_clock.size() * sizeof(u64)
192 + creation_stack.Size() * sizeof(uptr);
193}
194
195uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
196 uptr mem = 0;
197 for (int i = 0; i < kPartCount; i++) {
198 Part *p = &tab_[i];
199 Lock l(&p->mtx);
200 for (SyncVar *s = p->val; s; s = s->next) {
201 *nsync += 1;
202 mem += s->GetMemoryConsumption();
203 }
204 }
205 return mem;
206}
207
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000208int SyncTab::PartIdx(uptr addr) {
209 return (addr >> 3) % kPartCount;
210}
211
212StackTrace::StackTrace()
213 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000214 , s_()
215 , c_() {
216}
217
218StackTrace::StackTrace(uptr *buf, uptr cnt)
219 : n_()
220 , s_(buf)
221 , c_(cnt) {
222 CHECK_NE(buf, 0);
223 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000224}
225
226StackTrace::~StackTrace() {
227 Reset();
228}
229
230void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000231 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000232 CHECK_NE(n_, 0);
233 internal_free(s_);
234 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000235 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000236 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000237}
238
239void StackTrace::Init(const uptr *pcs, uptr cnt) {
240 Reset();
241 if (cnt == 0)
242 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000243 if (c_) {
244 CHECK_NE(s_, 0);
245 CHECK_LE(cnt, c_);
246 } else {
247 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
248 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000249 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000250 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000251}
252
253void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
254 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000255 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000256 if (n_ + !!toppc == 0)
257 return;
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000258 uptr start = 0;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000259 if (c_) {
260 CHECK_NE(s_, 0);
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000261 if (n_ + !!toppc > c_) {
262 start = n_ - c_ + !!toppc;
263 n_ = c_ - !!toppc;
264 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000265 } else {
266 s_ = (uptr*)internal_alloc(MBlockStackTrace,
267 (n_ + !!toppc) * sizeof(s_[0]));
268 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000269 for (uptr i = 0; i < n_; i++)
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000270 s_[i] = thr->shadow_stack[start + i];
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000271 if (toppc) {
272 s_[n_] = toppc;
273 n_++;
274 }
275}
276
277void StackTrace::CopyFrom(const StackTrace& other) {
278 Reset();
279 Init(other.Begin(), other.Size());
280}
281
282bool StackTrace::IsEmpty() const {
283 return n_ == 0;
284}
285
286uptr StackTrace::Size() const {
287 return n_;
288}
289
290uptr StackTrace::Get(uptr i) const {
291 CHECK_LT(i, n_);
292 return s_[i];
293}
294
295const uptr *StackTrace::Begin() const {
296 return s_;
297}
298
299} // namespace __tsan