blob: 2f8e785c1bbb4c1fe2c5cb24f9f0107444f5b01c [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr)
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
23 , owner_tid(kInvalidTid)
24 , recursion()
25 , is_rw()
26 , is_recursive()
27 , is_broken() {
28}
29
30SyncTab::Part::Part()
31 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
32 , val() {
33}
34
35SyncTab::SyncTab() {
36}
37
38SyncTab::~SyncTab() {
39 for (int i = 0; i < kPartCount; i++) {
40 while (tab_[i].val) {
41 SyncVar *tmp = tab_[i].val;
42 tab_[i].val = tmp->next;
43 DestroyAndFree(tmp);
44 }
45 }
46}
47
48SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
49 uptr addr, bool write_lock) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000050#ifndef TSAN_GO
51 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
52 MBlock *b = user_mblock(thr, (void*)addr);
53 Lock l(&b->mtx);
54 SyncVar *res = 0;
55 for (res = b->head; res; res = res->next) {
56 if (res->addr == addr)
57 break;
58 }
59 if (res == 0) {
60 StatInc(thr, StatSyncCreated);
61 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
62 res = new(mem) SyncVar(addr);
63 res->creation_stack.ObtainCurrent(thr, pc);
64 res->next = b->head;
65 b->head = res;
66 }
67 if (write_lock)
68 res->mtx.Lock();
69 else
70 res->mtx.ReadLock();
71 return res;
72 }
73#endif
74
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000075 Part *p = &tab_[PartIdx(addr)];
76 {
77 ReadLock l(&p->mtx);
78 for (SyncVar *res = p->val; res; res = res->next) {
79 if (res->addr == addr) {
80 if (write_lock)
81 res->mtx.Lock();
82 else
83 res->mtx.ReadLock();
84 return res;
85 }
86 }
87 }
88 {
89 Lock l(&p->mtx);
90 SyncVar *res = p->val;
91 for (; res; res = res->next) {
92 if (res->addr == addr)
93 break;
94 }
95 if (res == 0) {
96 StatInc(thr, StatSyncCreated);
97 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
98 res = new(mem) SyncVar(addr);
Dmitry Vyukov536551d2012-07-27 13:21:33 +000099#ifndef TSAN_GO
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000100 res->creation_stack.ObtainCurrent(thr, pc);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000101#endif
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000102 res->next = p->val;
103 p->val = res;
104 }
105 if (write_lock)
106 res->mtx.Lock();
107 else
108 res->mtx.ReadLock();
109 return res;
110 }
111}
112
113SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000114#ifndef TSAN_GO
115 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
116 MBlock *b = user_mblock(thr, (void*)addr);
117 SyncVar *res = 0;
118 {
119 Lock l(&b->mtx);
120 SyncVar **prev = &b->head;
121 res = *prev;
122 while (res) {
123 if (res->addr == addr) {
124 *prev = res->next;
125 break;
126 }
127 prev = &res->next;
128 res = *prev;
129 }
130 }
131 if (res) {
132 StatInc(thr, StatSyncDestroyed);
133 res->mtx.Lock();
134 res->mtx.Unlock();
135 }
136 return res;
137 }
138#endif
139
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000140 Part *p = &tab_[PartIdx(addr)];
141 SyncVar *res = 0;
142 {
143 Lock l(&p->mtx);
144 SyncVar **prev = &p->val;
145 res = *prev;
146 while (res) {
147 if (res->addr == addr) {
148 *prev = res->next;
149 break;
150 }
151 prev = &res->next;
152 res = *prev;
153 }
154 }
155 if (res) {
156 StatInc(thr, StatSyncDestroyed);
157 res->mtx.Lock();
158 res->mtx.Unlock();
159 }
160 return res;
161}
162
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000163uptr SyncVar::GetMemoryConsumption() {
164 return sizeof(*this)
165 + clock.size() * sizeof(u64)
166 + read_clock.size() * sizeof(u64)
167 + creation_stack.Size() * sizeof(uptr);
168}
169
170uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
171 uptr mem = 0;
172 for (int i = 0; i < kPartCount; i++) {
173 Part *p = &tab_[i];
174 Lock l(&p->mtx);
175 for (SyncVar *s = p->val; s; s = s->next) {
176 *nsync += 1;
177 mem += s->GetMemoryConsumption();
178 }
179 }
180 return mem;
181}
182
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000183int SyncTab::PartIdx(uptr addr) {
184 return (addr >> 3) % kPartCount;
185}
186
187StackTrace::StackTrace()
188 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000189 , s_()
190 , c_() {
191}
192
193StackTrace::StackTrace(uptr *buf, uptr cnt)
194 : n_()
195 , s_(buf)
196 , c_(cnt) {
197 CHECK_NE(buf, 0);
198 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000199}
200
201StackTrace::~StackTrace() {
202 Reset();
203}
204
205void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000206 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000207 CHECK_NE(n_, 0);
208 internal_free(s_);
209 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000210 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000211 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000212}
213
214void StackTrace::Init(const uptr *pcs, uptr cnt) {
215 Reset();
216 if (cnt == 0)
217 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000218 if (c_) {
219 CHECK_NE(s_, 0);
220 CHECK_LE(cnt, c_);
221 } else {
222 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
223 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000224 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000225 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000226}
227
228void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
229 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000230 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000231 if (n_ + !!toppc == 0)
232 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000233 if (c_) {
234 CHECK_NE(s_, 0);
235 CHECK_LE(n_ + !!toppc, c_);
236 } else {
237 s_ = (uptr*)internal_alloc(MBlockStackTrace,
238 (n_ + !!toppc) * sizeof(s_[0]));
239 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000240 for (uptr i = 0; i < n_; i++)
241 s_[i] = thr->shadow_stack[i];
242 if (toppc) {
243 s_[n_] = toppc;
244 n_++;
245 }
246}
247
248void StackTrace::CopyFrom(const StackTrace& other) {
249 Reset();
250 Init(other.Begin(), other.Size());
251}
252
253bool StackTrace::IsEmpty() const {
254 return n_ == 0;
255}
256
257uptr StackTrace::Size() const {
258 return n_;
259}
260
261uptr StackTrace::Get(uptr i) const {
262 CHECK_LT(i, n_);
263 return s_[i];
264}
265
266const uptr *StackTrace::Begin() const {
267 return s_;
268}
269
270} // namespace __tsan