blob: 86265bb816167449699a4a857f975b25029c40d3 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr)
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
23 , owner_tid(kInvalidTid)
Dmitry Vyukov3482ec32012-08-16 15:08:49 +000024 , last_lock()
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000025 , recursion()
26 , is_rw()
27 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000028 , is_broken()
29 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000030}
31
32SyncTab::Part::Part()
33 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
34 , val() {
35}
36
37SyncTab::SyncTab() {
38}
39
40SyncTab::~SyncTab() {
41 for (int i = 0; i < kPartCount; i++) {
42 while (tab_[i].val) {
43 SyncVar *tmp = tab_[i].val;
44 tab_[i].val = tmp->next;
45 DestroyAndFree(tmp);
46 }
47 }
48}
49
50SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
51 uptr addr, bool write_lock) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000052#ifndef TSAN_GO
53 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
54 MBlock *b = user_mblock(thr, (void*)addr);
55 Lock l(&b->mtx);
56 SyncVar *res = 0;
57 for (res = b->head; res; res = res->next) {
58 if (res->addr == addr)
59 break;
60 }
61 if (res == 0) {
62 StatInc(thr, StatSyncCreated);
63 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
64 res = new(mem) SyncVar(addr);
65 res->creation_stack.ObtainCurrent(thr, pc);
66 res->next = b->head;
67 b->head = res;
68 }
69 if (write_lock)
70 res->mtx.Lock();
71 else
72 res->mtx.ReadLock();
73 return res;
74 }
75#endif
76
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000077 Part *p = &tab_[PartIdx(addr)];
78 {
79 ReadLock l(&p->mtx);
80 for (SyncVar *res = p->val; res; res = res->next) {
81 if (res->addr == addr) {
82 if (write_lock)
83 res->mtx.Lock();
84 else
85 res->mtx.ReadLock();
86 return res;
87 }
88 }
89 }
90 {
91 Lock l(&p->mtx);
92 SyncVar *res = p->val;
93 for (; res; res = res->next) {
94 if (res->addr == addr)
95 break;
96 }
97 if (res == 0) {
98 StatInc(thr, StatSyncCreated);
99 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
100 res = new(mem) SyncVar(addr);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000101#ifndef TSAN_GO
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000102 res->creation_stack.ObtainCurrent(thr, pc);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000103#endif
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000104 res->next = p->val;
105 p->val = res;
106 }
107 if (write_lock)
108 res->mtx.Lock();
109 else
110 res->mtx.ReadLock();
111 return res;
112 }
113}
114
115SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000116#ifndef TSAN_GO
117 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
118 MBlock *b = user_mblock(thr, (void*)addr);
119 SyncVar *res = 0;
120 {
121 Lock l(&b->mtx);
122 SyncVar **prev = &b->head;
123 res = *prev;
124 while (res) {
125 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000126 if (res->is_linker_init)
127 return 0;
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000128 *prev = res->next;
129 break;
130 }
131 prev = &res->next;
132 res = *prev;
133 }
134 }
135 if (res) {
136 StatInc(thr, StatSyncDestroyed);
137 res->mtx.Lock();
138 res->mtx.Unlock();
139 }
140 return res;
141 }
142#endif
143
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000144 Part *p = &tab_[PartIdx(addr)];
145 SyncVar *res = 0;
146 {
147 Lock l(&p->mtx);
148 SyncVar **prev = &p->val;
149 res = *prev;
150 while (res) {
151 if (res->addr == addr) {
Dmitry Vyukove59bed42012-08-18 11:49:00 +0000152 if (res->is_linker_init)
153 return 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000154 *prev = res->next;
155 break;
156 }
157 prev = &res->next;
158 res = *prev;
159 }
160 }
161 if (res) {
162 StatInc(thr, StatSyncDestroyed);
163 res->mtx.Lock();
164 res->mtx.Unlock();
165 }
166 return res;
167}
168
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000169uptr SyncVar::GetMemoryConsumption() {
170 return sizeof(*this)
171 + clock.size() * sizeof(u64)
172 + read_clock.size() * sizeof(u64)
173 + creation_stack.Size() * sizeof(uptr);
174}
175
176uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
177 uptr mem = 0;
178 for (int i = 0; i < kPartCount; i++) {
179 Part *p = &tab_[i];
180 Lock l(&p->mtx);
181 for (SyncVar *s = p->val; s; s = s->next) {
182 *nsync += 1;
183 mem += s->GetMemoryConsumption();
184 }
185 }
186 return mem;
187}
188
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000189int SyncTab::PartIdx(uptr addr) {
190 return (addr >> 3) % kPartCount;
191}
192
193StackTrace::StackTrace()
194 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000195 , s_()
196 , c_() {
197}
198
199StackTrace::StackTrace(uptr *buf, uptr cnt)
200 : n_()
201 , s_(buf)
202 , c_(cnt) {
203 CHECK_NE(buf, 0);
204 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000205}
206
207StackTrace::~StackTrace() {
208 Reset();
209}
210
211void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000212 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000213 CHECK_NE(n_, 0);
214 internal_free(s_);
215 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000216 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000217 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000218}
219
220void StackTrace::Init(const uptr *pcs, uptr cnt) {
221 Reset();
222 if (cnt == 0)
223 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000224 if (c_) {
225 CHECK_NE(s_, 0);
226 CHECK_LE(cnt, c_);
227 } else {
228 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
229 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000230 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000231 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000232}
233
234void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
235 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000236 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000237 if (n_ + !!toppc == 0)
238 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000239 if (c_) {
240 CHECK_NE(s_, 0);
241 CHECK_LE(n_ + !!toppc, c_);
242 } else {
243 s_ = (uptr*)internal_alloc(MBlockStackTrace,
244 (n_ + !!toppc) * sizeof(s_[0]));
245 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000246 for (uptr i = 0; i < n_; i++)
247 s_[i] = thr->shadow_stack[i];
248 if (toppc) {
249 s_[n_] = toppc;
250 n_++;
251 }
252}
253
254void StackTrace::CopyFrom(const StackTrace& other) {
255 Reset();
256 Init(other.Begin(), other.Size());
257}
258
259bool StackTrace::IsEmpty() const {
260 return n_ == 0;
261}
262
263uptr StackTrace::Size() const {
264 return n_;
265}
266
267uptr StackTrace::Get(uptr i) const {
268 CHECK_LT(i, n_);
269 return s_[i];
270}
271
272const uptr *StackTrace::Begin() const {
273 return s_;
274}
275
276} // namespace __tsan