| Alexey Samsonov | 3b2f9f4 | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_sync.cc ------------------------------------------------------===// |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| Alexey Samsonov | 8bd9098 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 13 | #include "sanitizer_common/sanitizer_placement_new.h" |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 14 | #include "tsan_sync.h" |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 15 | #include "tsan_rtl.h" |
| 16 | #include "tsan_mman.h" |
| 17 | |
| 18 | namespace __tsan { |
| 19 | |
| 20 | SyncVar::SyncVar(uptr addr) |
| 21 | : mtx(MutexTypeSyncVar, StatMtxSyncVar) |
| 22 | , addr(addr) |
| 23 | , owner_tid(kInvalidTid) |
| 24 | , recursion() |
| 25 | , is_rw() |
| 26 | , is_recursive() |
| 27 | , is_broken() { |
| 28 | } |
| 29 | |
| 30 | SyncTab::Part::Part() |
| 31 | : mtx(MutexTypeSyncTab, StatMtxSyncTab) |
| 32 | , val() { |
| 33 | } |
| 34 | |
| 35 | SyncTab::SyncTab() { |
| 36 | } |
| 37 | |
| 38 | SyncTab::~SyncTab() { |
| 39 | for (int i = 0; i < kPartCount; i++) { |
| 40 | while (tab_[i].val) { |
| 41 | SyncVar *tmp = tab_[i].val; |
| 42 | tab_[i].val = tmp->next; |
| 43 | DestroyAndFree(tmp); |
| 44 | } |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, |
| 49 | uptr addr, bool write_lock) { |
| 50 | Part *p = &tab_[PartIdx(addr)]; |
| 51 | { |
| 52 | ReadLock l(&p->mtx); |
| 53 | for (SyncVar *res = p->val; res; res = res->next) { |
| 54 | if (res->addr == addr) { |
| 55 | if (write_lock) |
| 56 | res->mtx.Lock(); |
| 57 | else |
| 58 | res->mtx.ReadLock(); |
| 59 | return res; |
| 60 | } |
| 61 | } |
| 62 | } |
| 63 | { |
| 64 | Lock l(&p->mtx); |
| 65 | SyncVar *res = p->val; |
| 66 | for (; res; res = res->next) { |
| 67 | if (res->addr == addr) |
| 68 | break; |
| 69 | } |
| 70 | if (res == 0) { |
| 71 | StatInc(thr, StatSyncCreated); |
| 72 | void *mem = internal_alloc(MBlockSync, sizeof(SyncVar)); |
| 73 | res = new(mem) SyncVar(addr); |
| Dmitry Vyukov | 536551d | 2012-07-27 13:21:33 +0000 | [diff] [blame^] | 74 | #ifndef TSAN_GO |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 75 | res->creation_stack.ObtainCurrent(thr, pc); |
| Dmitry Vyukov | 536551d | 2012-07-27 13:21:33 +0000 | [diff] [blame^] | 76 | #endif |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 77 | res->next = p->val; |
| 78 | p->val = res; |
| 79 | } |
| 80 | if (write_lock) |
| 81 | res->mtx.Lock(); |
| 82 | else |
| 83 | res->mtx.ReadLock(); |
| 84 | return res; |
| 85 | } |
| 86 | } |
| 87 | |
| 88 | SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { |
| 89 | Part *p = &tab_[PartIdx(addr)]; |
| 90 | SyncVar *res = 0; |
| 91 | { |
| 92 | Lock l(&p->mtx); |
| 93 | SyncVar **prev = &p->val; |
| 94 | res = *prev; |
| 95 | while (res) { |
| 96 | if (res->addr == addr) { |
| 97 | *prev = res->next; |
| 98 | break; |
| 99 | } |
| 100 | prev = &res->next; |
| 101 | res = *prev; |
| 102 | } |
| 103 | } |
| 104 | if (res) { |
| 105 | StatInc(thr, StatSyncDestroyed); |
| 106 | res->mtx.Lock(); |
| 107 | res->mtx.Unlock(); |
| 108 | } |
| 109 | return res; |
| 110 | } |
| 111 | |
| Dmitry Vyukov | 15710c9 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 112 | uptr SyncVar::GetMemoryConsumption() { |
| 113 | return sizeof(*this) |
| 114 | + clock.size() * sizeof(u64) |
| 115 | + read_clock.size() * sizeof(u64) |
| 116 | + creation_stack.Size() * sizeof(uptr); |
| 117 | } |
| 118 | |
| 119 | uptr SyncTab::GetMemoryConsumption(uptr *nsync) { |
| 120 | uptr mem = 0; |
| 121 | for (int i = 0; i < kPartCount; i++) { |
| 122 | Part *p = &tab_[i]; |
| 123 | Lock l(&p->mtx); |
| 124 | for (SyncVar *s = p->val; s; s = s->next) { |
| 125 | *nsync += 1; |
| 126 | mem += s->GetMemoryConsumption(); |
| 127 | } |
| 128 | } |
| 129 | return mem; |
| 130 | } |
| 131 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 132 | int SyncTab::PartIdx(uptr addr) { |
| 133 | return (addr >> 3) % kPartCount; |
| 134 | } |
| 135 | |
| 136 | StackTrace::StackTrace() |
| 137 | : n_() |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 138 | , s_() |
| 139 | , c_() { |
| 140 | } |
| 141 | |
| 142 | StackTrace::StackTrace(uptr *buf, uptr cnt) |
| 143 | : n_() |
| 144 | , s_(buf) |
| 145 | , c_(cnt) { |
| 146 | CHECK_NE(buf, 0); |
| 147 | CHECK_NE(cnt, 0); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | StackTrace::~StackTrace() { |
| 151 | Reset(); |
| 152 | } |
| 153 | |
| 154 | void StackTrace::Reset() { |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 155 | if (s_ && !c_) { |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 156 | CHECK_NE(n_, 0); |
| 157 | internal_free(s_); |
| 158 | s_ = 0; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 159 | } |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 160 | n_ = 0; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | void StackTrace::Init(const uptr *pcs, uptr cnt) { |
| 164 | Reset(); |
| 165 | if (cnt == 0) |
| 166 | return; |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 167 | if (c_) { |
| 168 | CHECK_NE(s_, 0); |
| 169 | CHECK_LE(cnt, c_); |
| 170 | } else { |
| 171 | s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0])); |
| 172 | } |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 173 | n_ = cnt; |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 174 | internal_memcpy(s_, pcs, cnt * sizeof(s_[0])); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) { |
| 178 | Reset(); |
| Dmitry Vyukov | 5bfac97 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 179 | n_ = thr->shadow_stack_pos - thr->shadow_stack; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 180 | if (n_ + !!toppc == 0) |
| 181 | return; |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 182 | if (c_) { |
| 183 | CHECK_NE(s_, 0); |
| 184 | CHECK_LE(n_ + !!toppc, c_); |
| 185 | } else { |
| 186 | s_ = (uptr*)internal_alloc(MBlockStackTrace, |
| 187 | (n_ + !!toppc) * sizeof(s_[0])); |
| 188 | } |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 189 | for (uptr i = 0; i < n_; i++) |
| 190 | s_[i] = thr->shadow_stack[i]; |
| 191 | if (toppc) { |
| 192 | s_[n_] = toppc; |
| 193 | n_++; |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | void StackTrace::CopyFrom(const StackTrace& other) { |
| 198 | Reset(); |
| 199 | Init(other.Begin(), other.Size()); |
| 200 | } |
| 201 | |
| 202 | bool StackTrace::IsEmpty() const { |
| 203 | return n_ == 0; |
| 204 | } |
| 205 | |
| 206 | uptr StackTrace::Size() const { |
| 207 | return n_; |
| 208 | } |
| 209 | |
| 210 | uptr StackTrace::Get(uptr i) const { |
| 211 | CHECK_LT(i, n_); |
| 212 | return s_[i]; |
| 213 | } |
| 214 | |
| 215 | const uptr *StackTrace::Begin() const { |
| 216 | return s_; |
| 217 | } |
| 218 | |
| 219 | } // namespace __tsan |