blob: 38ecc6e8c5f9214007947f8cd6007527de3424ef [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000020SyncVar::SyncVar(uptr addr, u64 uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000021 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000023 , uid(uid)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024 , owner_tid(kInvalidTid)
Dmitry Vyukov3482ec32012-08-16 15:08:49 +000025 , last_lock()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000026 , recursion()
27 , is_rw()
28 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000029 , is_broken()
30 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000031}
32
33SyncTab::Part::Part()
34 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
35 , val() {
36}
37
38SyncTab::SyncTab() {
39}
40
41SyncTab::~SyncTab() {
42 for (int i = 0; i < kPartCount; i++) {
43 while (tab_[i].val) {
44 SyncVar *tmp = tab_[i].val;
45 tab_[i].val = tmp->next;
46 DestroyAndFree(tmp);
47 }
48 }
49}
50
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000051SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
52 uptr addr, bool write_lock) {
53 return GetAndLock(thr, pc, addr, write_lock, true);
54}
55
56SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
57 return GetAndLock(0, 0, addr, write_lock, false);
58}
59
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000060SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000061 uptr addr, bool write_lock, bool create) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000062#ifndef TSAN_GO
63 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
64 MBlock *b = user_mblock(thr, (void*)addr);
65 Lock l(&b->mtx);
66 SyncVar *res = 0;
67 for (res = b->head; res; res = res->next) {
68 if (res->addr == addr)
69 break;
70 }
71 if (res == 0) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000072 if (!create)
73 return 0;
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000074 StatInc(thr, StatSyncCreated);
75 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +000076 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
77 res = new(mem) SyncVar(addr, uid);
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000078 res->creation_stack.ObtainCurrent(thr, pc);
79 res->next = b->head;
80 b->head = res;
81 }
82 if (write_lock)
83 res->mtx.Lock();
84 else
85 res->mtx.ReadLock();
86 return res;
87 }
88#endif
89
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000090 Part *p = &tab_[PartIdx(addr)];
91 {
92 ReadLock l(&p->mtx);
93 for (SyncVar *res = p->val; res; res = res->next) {
94 if (res->addr == addr) {
95 if (write_lock)
96 res->mtx.Lock();
97 else
98 res->mtx.ReadLock();
99 return res;
100 }
101 }
102 }
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000103 if (!create)
104 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000105 {
106 Lock l(&p->mtx);
107 SyncVar *res = p->val;
108 for (; res; res = res->next) {
109 if (res->addr == addr)
110 break;
111 }
112 if (res == 0) {
113 StatInc(thr, StatSyncCreated);
114 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000115 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
116 res = new(mem) SyncVar(addr, uid);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000117#ifndef TSAN_GO
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000118 res->creation_stack.ObtainCurrent(thr, pc);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000119#endif
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000120 res->next = p->val;
121 p->val = res;
122 }
123 if (write_lock)
124 res->mtx.Lock();
125 else
126 res->mtx.ReadLock();
127 return res;
128 }
129}
130
131SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000132#ifndef TSAN_GO
133 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
134 MBlock *b = user_mblock(thr, (void*)addr);
135 SyncVar *res = 0;
136 {
137 Lock l(&b->mtx);
138 SyncVar **prev = &b->head;
139 res = *prev;
140 while (res) {
141 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000142 if (res->is_linker_init)
143 return 0;
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000144 *prev = res->next;
145 break;
146 }
147 prev = &res->next;
148 res = *prev;
149 }
150 }
151 if (res) {
152 StatInc(thr, StatSyncDestroyed);
153 res->mtx.Lock();
154 res->mtx.Unlock();
155 }
156 return res;
157 }
158#endif
159
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000160 Part *p = &tab_[PartIdx(addr)];
161 SyncVar *res = 0;
162 {
163 Lock l(&p->mtx);
164 SyncVar **prev = &p->val;
165 res = *prev;
166 while (res) {
167 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000168 if (res->is_linker_init)
169 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000170 *prev = res->next;
171 break;
172 }
173 prev = &res->next;
174 res = *prev;
175 }
176 }
177 if (res) {
178 StatInc(thr, StatSyncDestroyed);
179 res->mtx.Lock();
180 res->mtx.Unlock();
181 }
182 return res;
183}
184
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000185uptr SyncVar::GetMemoryConsumption() {
186 return sizeof(*this)
187 + clock.size() * sizeof(u64)
188 + read_clock.size() * sizeof(u64)
189 + creation_stack.Size() * sizeof(uptr);
190}
191
192uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
193 uptr mem = 0;
194 for (int i = 0; i < kPartCount; i++) {
195 Part *p = &tab_[i];
196 Lock l(&p->mtx);
197 for (SyncVar *s = p->val; s; s = s->next) {
198 *nsync += 1;
199 mem += s->GetMemoryConsumption();
200 }
201 }
202 return mem;
203}
204
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000205int SyncTab::PartIdx(uptr addr) {
206 return (addr >> 3) % kPartCount;
207}
208
209StackTrace::StackTrace()
210 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000211 , s_()
212 , c_() {
213}
214
215StackTrace::StackTrace(uptr *buf, uptr cnt)
216 : n_()
217 , s_(buf)
218 , c_(cnt) {
219 CHECK_NE(buf, 0);
220 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000221}
222
223StackTrace::~StackTrace() {
224 Reset();
225}
226
227void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000228 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000229 CHECK_NE(n_, 0);
230 internal_free(s_);
231 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000232 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000233 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000234}
235
236void StackTrace::Init(const uptr *pcs, uptr cnt) {
237 Reset();
238 if (cnt == 0)
239 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000240 if (c_) {
241 CHECK_NE(s_, 0);
242 CHECK_LE(cnt, c_);
243 } else {
244 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
245 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000246 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000247 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000248}
249
250void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
251 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000252 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000253 if (n_ + !!toppc == 0)
254 return;
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000255 uptr start = 0;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000256 if (c_) {
257 CHECK_NE(s_, 0);
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000258 if (n_ + !!toppc > c_) {
259 start = n_ - c_ + !!toppc;
260 n_ = c_ - !!toppc;
261 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000262 } else {
263 s_ = (uptr*)internal_alloc(MBlockStackTrace,
264 (n_ + !!toppc) * sizeof(s_[0]));
265 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000266 for (uptr i = 0; i < n_; i++)
Dmitry Vyukovc87e7282012-09-06 15:18:14 +0000267 s_[i] = thr->shadow_stack[start + i];
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000268 if (toppc) {
269 s_[n_] = toppc;
270 n_++;
271 }
272}
273
274void StackTrace::CopyFrom(const StackTrace& other) {
275 Reset();
276 Init(other.Begin(), other.Size());
277}
278
279bool StackTrace::IsEmpty() const {
280 return n_ == 0;
281}
282
283uptr StackTrace::Size() const {
284 return n_;
285}
286
287uptr StackTrace::Get(uptr i) const {
288 CHECK_LT(i, n_);
289 return s_[i];
290}
291
292const uptr *StackTrace::Begin() const {
293 return s_;
294}
295
296} // namespace __tsan