blob: 0b31ab9ceca75f13ac638017902a182334ba4c53 [file] [log] [blame]
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00001//===-- tsan_sync.cc --------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13#include "tsan_sync.h"
14#include "tsan_placement_new.h"
15#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
20SyncVar::SyncVar(uptr addr)
21 : mtx(MutexTypeSyncVar, StatMtxSyncVar)
22 , addr(addr)
23 , owner_tid(kInvalidTid)
24 , recursion()
25 , is_rw()
26 , is_recursive()
27 , is_broken() {
28}
29
30SyncTab::Part::Part()
31 : mtx(MutexTypeSyncTab, StatMtxSyncTab)
32 , val() {
33}
34
35SyncTab::SyncTab() {
36}
37
38SyncTab::~SyncTab() {
39 for (int i = 0; i < kPartCount; i++) {
40 while (tab_[i].val) {
41 SyncVar *tmp = tab_[i].val;
42 tab_[i].val = tmp->next;
43 DestroyAndFree(tmp);
44 }
45 }
46}
47
48SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
49 uptr addr, bool write_lock) {
50 Part *p = &tab_[PartIdx(addr)];
51 {
52 ReadLock l(&p->mtx);
53 for (SyncVar *res = p->val; res; res = res->next) {
54 if (res->addr == addr) {
55 if (write_lock)
56 res->mtx.Lock();
57 else
58 res->mtx.ReadLock();
59 return res;
60 }
61 }
62 }
63 {
64 Lock l(&p->mtx);
65 SyncVar *res = p->val;
66 for (; res; res = res->next) {
67 if (res->addr == addr)
68 break;
69 }
70 if (res == 0) {
71 StatInc(thr, StatSyncCreated);
72 void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
73 res = new(mem) SyncVar(addr);
74 res->creation_stack.ObtainCurrent(thr, pc);
75 res->next = p->val;
76 p->val = res;
77 }
78 if (write_lock)
79 res->mtx.Lock();
80 else
81 res->mtx.ReadLock();
82 return res;
83 }
84}
85
86SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
87 Part *p = &tab_[PartIdx(addr)];
88 SyncVar *res = 0;
89 {
90 Lock l(&p->mtx);
91 SyncVar **prev = &p->val;
92 res = *prev;
93 while (res) {
94 if (res->addr == addr) {
95 *prev = res->next;
96 break;
97 }
98 prev = &res->next;
99 res = *prev;
100 }
101 }
102 if (res) {
103 StatInc(thr, StatSyncDestroyed);
104 res->mtx.Lock();
105 res->mtx.Unlock();
106 }
107 return res;
108}
109
110int SyncTab::PartIdx(uptr addr) {
111 return (addr >> 3) % kPartCount;
112}
113
114StackTrace::StackTrace()
115 : n_()
116 , s_() {
117}
118
119StackTrace::~StackTrace() {
120 Reset();
121}
122
123void StackTrace::Reset() {
124 if (s_) {
125 CHECK_NE(n_, 0);
126 internal_free(s_);
127 s_ = 0;
128 n_ = 0;
129 }
130}
131
132void StackTrace::Init(const uptr *pcs, uptr cnt) {
133 Reset();
134 if (cnt == 0)
135 return;
136 n_ = cnt;
137 s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
138 internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
139}
140
141void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
142 Reset();
143 n_ = thr->shadow_stack_pos - &thr->shadow_stack[0];
144 if (n_ + !!toppc == 0)
145 return;
146 s_ = (uptr*)internal_alloc(MBlockStackTrace, (n_ + !!toppc) * sizeof(s_[0]));
147 for (uptr i = 0; i < n_; i++)
148 s_[i] = thr->shadow_stack[i];
149 if (toppc) {
150 s_[n_] = toppc;
151 n_++;
152 }
153}
154
155void StackTrace::CopyFrom(const StackTrace& other) {
156 Reset();
157 Init(other.Begin(), other.Size());
158}
159
160bool StackTrace::IsEmpty() const {
161 return n_ == 0;
162}
163
164uptr StackTrace::Size() const {
165 return n_;
166}
167
168uptr StackTrace::Get(uptr i) const {
169 CHECK_LT(i, n_);
170 return s_[i];
171}
172
173const uptr *StackTrace::Begin() const {
174 return s_;
175}
176
177} // namespace __tsan