blob: 2004c8eb2cd8be8cd86b07539fb85d13a9363357 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr)
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
23 , owner_tid(kInvalidTid)
24 , recursion()
25 , is_rw()
26 , is_recursive()
Dmitry Vyukov19ae9f32012-08-16 14:21:09 +000027 , is_broken()
28 , is_linker_init() {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000029}
30
31SyncTab::Part::Part()
32 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
33 , val() {
34}
35
36SyncTab::SyncTab() {
37}
38
39SyncTab::~SyncTab() {
40 for (int i = 0; i < kPartCount; i++) {
41 while (tab_[i].val) {
42 SyncVar *tmp = tab_[i].val;
43 tab_[i].val = tmp->next;
44 DestroyAndFree(tmp);
45 }
46 }
47}
48
49SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
50 uptr addr, bool write_lock) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +000051#ifndef TSAN_GO
52 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
53 MBlock *b = user_mblock(thr, (void*)addr);
54 Lock l(&b->mtx);
55 SyncVar *res = 0;
56 for (res = b->head; res; res = res->next) {
57 if (res->addr == addr)
58 break;
59 }
60 if (res == 0) {
61 StatInc(thr, StatSyncCreated);
62 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
63 res = new(mem) SyncVar(addr);
64 res->creation_stack.ObtainCurrent(thr, pc);
65 res->next = b->head;
66 b->head = res;
67 }
68 if (write_lock)
69 res->mtx.Lock();
70 else
71 res->mtx.ReadLock();
72 return res;
73 }
74#endif
75
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000076 Part *p = &tab_[PartIdx(addr)];
77 {
78 ReadLock l(&p->mtx);
79 for (SyncVar *res = p->val; res; res = res->next) {
80 if (res->addr == addr) {
81 if (write_lock)
82 res->mtx.Lock();
83 else
84 res->mtx.ReadLock();
85 return res;
86 }
87 }
88 }
89 {
90 Lock l(&p->mtx);
91 SyncVar *res = p->val;
92 for (; res; res = res->next) {
93 if (res->addr == addr)
94 break;
95 }
96 if (res == 0) {
97 StatInc(thr, StatSyncCreated);
98 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
99 res = new(mem) SyncVar(addr);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000100#ifndef TSAN_GO
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000101 res->creation_stack.ObtainCurrent(thr, pc);
Dmitry Vyukov536551d2012-07-27 13:21:33 +0000102#endif
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000103 res->next = p->val;
104 p->val = res;
105 }
106 if (write_lock)
107 res->mtx.Lock();
108 else
109 res->mtx.ReadLock();
110 return res;
111 }
112}
113
114SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000115#ifndef TSAN_GO
116 if (PrimaryAllocator::PointerIsMine((void*)addr)) {
117 MBlock *b = user_mblock(thr, (void*)addr);
118 SyncVar *res = 0;
119 {
120 Lock l(&b->mtx);
121 SyncVar **prev = &b->head;
122 res = *prev;
123 while (res) {
124 if (res->addr == addr) {
125 *prev = res->next;
126 break;
127 }
128 prev = &res->next;
129 res = *prev;
130 }
131 }
132 if (res) {
133 StatInc(thr, StatSyncDestroyed);
134 res->mtx.Lock();
135 res->mtx.Unlock();
136 }
137 return res;
138 }
139#endif
140
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000141 Part *p = &tab_[PartIdx(addr)];
142 SyncVar *res = 0;
143 {
144 Lock l(&p->mtx);
145 SyncVar **prev = &p->val;
146 res = *prev;
147 while (res) {
148 if (res->addr == addr) {
149 *prev = res->next;
150 break;
151 }
152 prev = &res->next;
153 res = *prev;
154 }
155 }
156 if (res) {
157 StatInc(thr, StatSyncDestroyed);
158 res->mtx.Lock();
159 res->mtx.Unlock();
160 }
161 return res;
162}
163
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000164uptr SyncVar::GetMemoryConsumption() {
165 return sizeof(*this)
166 + clock.size() * sizeof(u64)
167 + read_clock.size() * sizeof(u64)
168 + creation_stack.Size() * sizeof(uptr);
169}
170
171uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
172 uptr mem = 0;
173 for (int i = 0; i < kPartCount; i++) {
174 Part *p = &tab_[i];
175 Lock l(&p->mtx);
176 for (SyncVar *s = p->val; s; s = s->next) {
177 *nsync += 1;
178 mem += s->GetMemoryConsumption();
179 }
180 }
181 return mem;
182}
183
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000184int SyncTab::PartIdx(uptr addr) {
185 return (addr >> 3) % kPartCount;
186}
187
188StackTrace::StackTrace()
189 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000190 , s_()
191 , c_() {
192}
193
194StackTrace::StackTrace(uptr *buf, uptr cnt)
195 : n_()
196 , s_(buf)
197 , c_(cnt) {
198 CHECK_NE(buf, 0);
199 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000200}
201
202StackTrace::~StackTrace() {
203 Reset();
204}
205
206void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000207 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000208 CHECK_NE(n_, 0);
209 internal_free(s_);
210 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000211 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000212 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000213}
214
215void StackTrace::Init(const uptr *pcs, uptr cnt) {
216 Reset();
217 if (cnt == 0)
218 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000219 if (c_) {
220 CHECK_NE(s_, 0);
221 CHECK_LE(cnt, c_);
222 } else {
223 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
224 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000225 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000226 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000227}
228
229void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
230 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000231 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000232 if (n_ + !!toppc == 0)
233 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000234 if (c_) {
235 CHECK_NE(s_, 0);
236 CHECK_LE(n_ + !!toppc, c_);
237 } else {
238 s_ = (uptr*)internal_alloc(MBlockStackTrace,
239 (n_ + !!toppc) * sizeof(s_[0]));
240 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000241 for (uptr i = 0; i < n_; i++)
242 s_[i] = thr->shadow_stack[i];
243 if (toppc) {
244 s_[n_] = toppc;
245 n_++;
246 }
247}
248
249void StackTrace::CopyFrom(const StackTrace& other) {
250 Reset();
251 Init(other.Begin(), other.Size());
252}
253
254bool StackTrace::IsEmpty() const {
255 return n_ == 0;
256}
257
258uptr StackTrace::Size() const {
259 return n_;
260}
261
262uptr StackTrace::Get(uptr i) const {
263 CHECK_LT(i, n_);
264 return s_[i];
265}
266
267const uptr *StackTrace::Begin() const {
268 return s_;
269}
270
271} // namespace __tsan