blob: 10f52b48908725046adef87d653dcc3bf363f051 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukov9b410fb2014-03-05 13:41:21 +000020void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000022SyncVar::SyncVar()
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
24 Reset();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000025}
26
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000027void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28 this->addr = addr;
29 this->uid = uid;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +000030 this->next = 0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000031
32 creation_stack_id = 0;
33 if (kCppMode) // Go does not use them
34 creation_stack_id = CurrentStackId(thr, pc);
35 if (flags()->detect_deadlocks)
36 DDMutexInit(thr, pc, this);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000037}
38
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000039void SyncVar::Reset() {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000040 uid = 0;
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
43 last_lock = 0;
44 recursion = 0;
45 is_rw = 0;
46 is_recursive = 0;
47 is_broken = 0;
48 is_linker_init = 0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000049
50 clock.Zero();
51 read_clock.Reset();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000052}
53
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000054MetaMap::MetaMap() {
55 atomic_store(&uid_gen_, 0, memory_order_relaxed);
56}
57
58void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
59 u32 idx = block_alloc_.Alloc(&thr->block_cache);
60 MBlock *b = block_alloc_.Map(idx);
61 b->siz = sz;
62 b->tid = thr->tid;
63 b->stk = CurrentStackId(thr, pc);
64 u32 *meta = MemToMeta(p);
65 DCHECK_EQ(*meta, 0);
66 *meta = idx | kFlagBlock;
67}
68
69uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
70 MBlock* b = GetBlock(p);
71 if (b == 0)
72 return 0;
73 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
74 FreeRange(thr, pc, p, sz);
75 return sz;
76}
77
78void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
79 u32 *meta = MemToMeta(p);
80 u32 *end = MemToMeta(p + sz);
81 if (end == meta)
82 end++;
83 for (; meta < end; meta++) {
84 u32 idx = *meta;
85 *meta = 0;
86 for (;;) {
87 if (idx == 0)
88 break;
89 if (idx & kFlagBlock) {
90 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
91 break;
92 } else if (idx & kFlagSync) {
93 DCHECK(idx & kFlagSync);
Dmitry Vyukov9eaae3d2014-06-21 02:10:17 +000094 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000095 u32 next = s->next;
96 s->Reset();
97 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
98 idx = next;
99 } else {
100 CHECK(0);
101 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000102 }
103 }
104}
105
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000106MBlock* MetaMap::GetBlock(uptr p) {
107 u32 *meta = MemToMeta(p);
108 u32 idx = *meta;
109 for (;;) {
110 if (idx == 0)
111 return 0;
112 if (idx & kFlagBlock)
113 return block_alloc_.Map(idx & ~kFlagMask);
114 DCHECK(idx & kFlagSync);
115 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
116 idx = s->next;
117 }
118}
119
120SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
121 uptr addr, bool write_lock) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000122 return GetAndLock(thr, pc, addr, write_lock, true);
123}
124
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000125SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
126 return GetAndLock(0, 0, addr, true, false);
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000127}
128
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000129SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000130 uptr addr, bool write_lock, bool create) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000131 u32 *meta = MemToMeta(addr);
132 u32 idx0 = *meta;
133 u32 myidx = 0;
134 SyncVar *mys = 0;
135 for (;;) {
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000136 u32 idx = idx0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000137 for (;;) {
138 if (idx == 0)
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000139 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000140 if (idx & kFlagBlock)
141 break;
142 DCHECK(idx & kFlagSync);
143 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
144 if (s->addr == addr) {
145 if (myidx != 0) {
146 mys->Reset();
147 sync_alloc_.Free(&thr->sync_cache, myidx);
148 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000149 if (write_lock)
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000150 s->mtx.Lock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000151 else
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000152 s->mtx.ReadLock();
153 return s;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000154 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000155 idx = s->next;
156 }
157 if (!create)
158 return 0;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000159 if (*meta != idx0) {
160 idx0 = *meta;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000161 continue;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000162 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000163
164 if (myidx == 0) {
165 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
166 myidx = sync_alloc_.Alloc(&thr->sync_cache);
167 mys = sync_alloc_.Map(myidx);
168 mys->Init(thr, pc, addr, uid);
169 }
170 mys->next = idx0;
171 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
172 myidx | kFlagSync, memory_order_release)) {
173 if (write_lock)
174 mys->mtx.Lock();
175 else
176 mys->mtx.ReadLock();
177 return mys;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000178 }
179 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000180}
181
182void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
Dmitry Vyukov3f5ad1a2014-07-08 20:01:12 +0000183 // src and dst can overlap,
184 // there are no concurrent accesses to the regions (e.g. stop-the-world).
185 CHECK_NE(src, dst);
186 CHECK_NE(sz, 0);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000187 uptr diff = dst - src;
188 u32 *src_meta = MemToMeta(src);
189 u32 *dst_meta = MemToMeta(dst);
190 u32 *src_meta_end = MemToMeta(src + sz);
Dmitry Vyukov3f5ad1a2014-07-08 20:01:12 +0000191 uptr inc = 1;
192 if (dst > src) {
193 src_meta = MemToMeta(src + sz) - 1;
194 dst_meta = MemToMeta(dst + sz) - 1;
195 src_meta_end = MemToMeta(src) - 1;
196 inc = -1;
197 }
198 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000199 CHECK_EQ(*dst_meta, 0);
200 u32 idx = *src_meta;
201 *src_meta = 0;
202 *dst_meta = idx;
203 // Patch the addresses in sync objects.
204 while (idx != 0) {
205 if (idx & kFlagBlock)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000206 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000207 CHECK(idx & kFlagSync);
208 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
209 s->addr += diff;
210 idx = s->next;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000211 }
212 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000213}
214
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000215void MetaMap::OnThreadIdle(ThreadState *thr) {
216 block_alloc_.FlushCache(&thr->block_cache);
217 sync_alloc_.FlushCache(&thr->sync_cache);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000218}
219
220} // namespace __tsan