blob: abb5a2ad298f655a1279c13d2a7a9937e56bdccf [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr)
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
23 , owner_tid(kInvalidTid)
24 , recursion()
25 , is_rw()
26 , is_recursive()
27 , is_broken() {
28}
29
30SyncTab::Part::Part()
31 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
32 , val() {
33}
34
35SyncTab::SyncTab() {
36}
37
38SyncTab::~SyncTab() {
39 for (int i = 0; i < kPartCount; i++) {
40 while (tab_[i].val) {
41 SyncVar *tmp = tab_[i].val;
42 tab_[i].val = tmp->next;
43 DestroyAndFree(tmp);
44 }
45 }
46}
47
48SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
49 uptr addr, bool write_lock) {
50 Part *p = &tab_[PartIdx(addr)];
51 {
52 ReadLock l(&p->mtx);
53 for (SyncVar *res = p->val; res; res = res->next) {
54 if (res->addr == addr) {
55 if (write_lock)
56 res->mtx.Lock();
57 else
58 res->mtx.ReadLock();
59 return res;
60 }
61 }
62 }
63 {
64 Lock l(&p->mtx);
65 SyncVar *res = p->val;
66 for (; res; res = res->next) {
67 if (res->addr == addr)
68 break;
69 }
70 if (res == 0) {
71 StatInc(thr, StatSyncCreated);
72 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
73 res = new(mem) SyncVar(addr);
Dmitry Vyukov536551d2012-07-27 13:21:33 +000074#ifndef TSAN_GO
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000075 res->creation_stack.ObtainCurrent(thr, pc);
Dmitry Vyukov536551d2012-07-27 13:21:33 +000076#endif
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000077 res->next = p->val;
78 p->val = res;
79 }
80 if (write_lock)
81 res->mtx.Lock();
82 else
83 res->mtx.ReadLock();
84 return res;
85 }
86}
87
88SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
89 Part *p = &tab_[PartIdx(addr)];
90 SyncVar *res = 0;
91 {
92 Lock l(&p->mtx);
93 SyncVar **prev = &p->val;
94 res = *prev;
95 while (res) {
96 if (res->addr == addr) {
97 *prev = res->next;
98 break;
99 }
100 prev = &res->next;
101 res = *prev;
102 }
103 }
104 if (res) {
105 StatInc(thr, StatSyncDestroyed);
106 res->mtx.Lock();
107 res->mtx.Unlock();
108 }
109 return res;
110}
111
Dmitry Vyukov15710c92012-05-22 11:33:03 +0000112uptr SyncVar::GetMemoryConsumption() {
113 return sizeof(*this)
114 + clock.size() * sizeof(u64)
115 + read_clock.size() * sizeof(u64)
116 + creation_stack.Size() * sizeof(uptr);
117}
118
119uptr SyncTab::GetMemoryConsumption(uptr *nsync) {
120 uptr mem = 0;
121 for (int i = 0; i < kPartCount; i++) {
122 Part *p = &tab_[i];
123 Lock l(&p->mtx);
124 for (SyncVar *s = p->val; s; s = s->next) {
125 *nsync += 1;
126 mem += s->GetMemoryConsumption();
127 }
128 }
129 return mem;
130}
131
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000132int SyncTab::PartIdx(uptr addr) {
133 return (addr >> 3) % kPartCount;
134}
135
136StackTrace::StackTrace()
137 : n_()
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000138 , s_()
139 , c_() {
140}
141
142StackTrace::StackTrace(uptr *buf, uptr cnt)
143 : n_()
144 , s_(buf)
145 , c_(cnt) {
146 CHECK_NE(buf, 0);
147 CHECK_NE(cnt, 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000148}
149
150StackTrace::~StackTrace() {
151 Reset();
152}
153
154void StackTrace::Reset() {
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000155 if (s_ && !c_) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000156 CHECK_NE(n_, 0);
157 internal_free(s_);
158 s_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000159 }
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000160 n_ = 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000161}
162
163void StackTrace::Init(const uptr *pcs, uptr cnt) {
164 Reset();
165 if (cnt == 0)
166 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000167 if (c_) {
168 CHECK_NE(s_, 0);
169 CHECK_LE(cnt, c_);
170 } else {
171 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
172 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000173 n_ = cnt;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000174 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000175}
176
177void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
178 Reset();
Dmitry Vyukov5bfac972012-07-16 16:44:47 +0000179 n_ = thr->shadow_stack_pos - thr->shadow_stack;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000180 if (n_ + !!toppc == 0)
181 return;
Dmitry Vyukovde1fd1c2012-06-22 11:08:55 +0000182 if (c_) {
183 CHECK_NE(s_, 0);
184 CHECK_LE(n_ + !!toppc, c_);
185 } else {
186 s_ = (uptr*)internal_alloc(MBlockStackTrace,
187 (n_ + !!toppc) * sizeof(s_[0]));
188 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000189 for (uptr i = 0; i < n_; i++)
190 s_[i] = thr->shadow_stack[i];
191 if (toppc) {
192 s_[n_] = toppc;
193 n_++;
194 }
195}
196
197void StackTrace::CopyFrom(const StackTrace& other) {
198 Reset();
199 Init(other.Begin(), other.Size());
200}
201
202bool StackTrace::IsEmpty() const {
203 return n_ == 0;
204}
205
206uptr StackTrace::Size() const {
207 return n_;
208}
209
210uptr StackTrace::Get(uptr i) const {
211 CHECK_LT(i, n_);
212 return s_[i];
213}
214
215const uptr *StackTrace::Begin() const {
216 return s_;
217}
218
219} // namespace __tsan