blob: 4202d30a2e1732396c111eafb681423d7d289529 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukov9b410fb2014-03-05 13:41:21 +000020void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000022SyncVar::SyncVar()
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
Dmitry Vyukov70db9d42014-08-05 18:45:02 +000024 Reset(0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000025}
26
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000027void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28 this->addr = addr;
29 this->uid = uid;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +000030 this->next = 0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000031
32 creation_stack_id = 0;
33 if (kCppMode) // Go does not use them
34 creation_stack_id = CurrentStackId(thr, pc);
Alexey Samsonov5c825962014-09-10 23:08:06 +000035 if (common_flags()->detect_deadlocks)
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000036 DDMutexInit(thr, pc, this);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000037}
38
Dmitry Vyukov70db9d42014-08-05 18:45:02 +000039void SyncVar::Reset(ThreadState *thr) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000040 uid = 0;
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
43 last_lock = 0;
44 recursion = 0;
45 is_rw = 0;
46 is_recursive = 0;
47 is_broken = 0;
48 is_linker_init = 0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000049
Dmitry Vyukov70db9d42014-08-05 18:45:02 +000050 if (thr == 0) {
51 CHECK_EQ(clock.size(), 0);
52 CHECK_EQ(read_clock.size(), 0);
53 } else {
54 clock.Reset(&thr->clock_cache);
55 read_clock.Reset(&thr->clock_cache);
56 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000057}
58
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000059MetaMap::MetaMap() {
60 atomic_store(&uid_gen_, 0, memory_order_relaxed);
61}
62
63void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
64 u32 idx = block_alloc_.Alloc(&thr->block_cache);
65 MBlock *b = block_alloc_.Map(idx);
66 b->siz = sz;
67 b->tid = thr->tid;
68 b->stk = CurrentStackId(thr, pc);
69 u32 *meta = MemToMeta(p);
70 DCHECK_EQ(*meta, 0);
71 *meta = idx | kFlagBlock;
72}
73
74uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
75 MBlock* b = GetBlock(p);
76 if (b == 0)
77 return 0;
78 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
79 FreeRange(thr, pc, p, sz);
80 return sz;
81}
82
Dmitry Vyukova60829a2015-03-12 11:24:16 +000083bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
84 bool has_something = false;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000085 u32 *meta = MemToMeta(p);
86 u32 *end = MemToMeta(p + sz);
87 if (end == meta)
88 end++;
89 for (; meta < end; meta++) {
90 u32 idx = *meta;
Dmitry Vyukov2034ce62015-03-27 12:22:44 +000091 if (idx == 0) {
92 // Note: don't write to meta in this case -- the block can be huge.
93 continue;
94 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000095 *meta = 0;
Dmitry Vyukov2034ce62015-03-27 12:22:44 +000096 has_something = true;
97 while (idx != 0) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000098 if (idx & kFlagBlock) {
99 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
100 break;
101 } else if (idx & kFlagSync) {
102 DCHECK(idx & kFlagSync);
Dmitry Vyukov9eaae3d2014-06-21 02:10:17 +0000103 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000104 u32 next = s->next;
Dmitry Vyukov70db9d42014-08-05 18:45:02 +0000105 s->Reset(thr);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000106 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
107 idx = next;
108 } else {
109 CHECK(0);
110 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000111 }
112 }
Dmitry Vyukova60829a2015-03-12 11:24:16 +0000113 return has_something;
114}
115
116// ResetRange removes all meta objects from the range.
117// It is called for large mmap-ed regions. The function is best-effort wrt
118// freeing of meta objects, because we don't want to page in the whole range
119// which can be huge. The function probes pages one-by-one until it finds a page
120// without meta objects, at this point it stops freeing meta objects. Because
121// thread stacks grow top-down, we do the same starting from end as well.
122void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
123 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
124 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
125 if (sz <= 4 * kPageSize) {
126 // If the range is small, just do the normal free procedure.
127 FreeRange(thr, pc, p, sz);
128 return;
129 }
130 // First, round both ends of the range to page size.
131 uptr diff = RoundUp(p, kPageSize) - p;
132 if (diff != 0) {
133 FreeRange(thr, pc, p, diff);
134 p += diff;
135 sz -= diff;
136 }
137 diff = p + sz - RoundDown(p + sz, kPageSize);
138 if (diff != 0) {
139 FreeRange(thr, pc, p + sz - diff, diff);
140 sz -= diff;
141 }
142 // Now we must have a non-empty page-aligned range.
143 CHECK_GT(sz, 0);
144 CHECK_EQ(p, RoundUp(p, kPageSize));
145 CHECK_EQ(sz, RoundUp(sz, kPageSize));
146 const uptr p0 = p;
147 const uptr sz0 = sz;
148 // Probe start of the range.
149 while (sz > 0) {
150 bool has_something = FreeRange(thr, pc, p, kPageSize);
151 p += kPageSize;
152 sz -= kPageSize;
153 if (!has_something)
154 break;
155 }
156 // Probe end of the range.
157 while (sz > 0) {
158 bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
159 sz -= kPageSize;
160 if (!has_something)
161 break;
162 }
163 // Finally, page out the whole range (including the parts that we've just
164 // freed). Note: we can't simply madvise, because we need to leave a zeroed
165 // range (otherwise __tsan_java_move can crash if it encounters a left-over
166 // meta objects in java heap).
Dmitry Vyukovb7521282015-03-12 12:48:19 +0000167 uptr metap = (uptr)MemToMeta(p0);
168 uptr metasz = sz0 / kMetaRatio;
169 UnmapOrDie((void*)metap, metasz);
170 MmapFixedNoReserve(metap, metasz);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000171}
172
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000173MBlock* MetaMap::GetBlock(uptr p) {
174 u32 *meta = MemToMeta(p);
175 u32 idx = *meta;
176 for (;;) {
177 if (idx == 0)
178 return 0;
179 if (idx & kFlagBlock)
180 return block_alloc_.Map(idx & ~kFlagMask);
181 DCHECK(idx & kFlagSync);
182 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
183 idx = s->next;
184 }
185}
186
187SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
188 uptr addr, bool write_lock) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000189 return GetAndLock(thr, pc, addr, write_lock, true);
190}
191
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000192SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
193 return GetAndLock(0, 0, addr, true, false);
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000194}
195
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000196SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000197 uptr addr, bool write_lock, bool create) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000198 u32 *meta = MemToMeta(addr);
199 u32 idx0 = *meta;
200 u32 myidx = 0;
201 SyncVar *mys = 0;
202 for (;;) {
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000203 u32 idx = idx0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000204 for (;;) {
205 if (idx == 0)
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000206 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000207 if (idx & kFlagBlock)
208 break;
209 DCHECK(idx & kFlagSync);
210 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
211 if (s->addr == addr) {
212 if (myidx != 0) {
Dmitry Vyukov70db9d42014-08-05 18:45:02 +0000213 mys->Reset(thr);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000214 sync_alloc_.Free(&thr->sync_cache, myidx);
215 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000216 if (write_lock)
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000217 s->mtx.Lock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000218 else
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000219 s->mtx.ReadLock();
220 return s;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000221 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000222 idx = s->next;
223 }
224 if (!create)
225 return 0;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000226 if (*meta != idx0) {
227 idx0 = *meta;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000228 continue;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000229 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000230
231 if (myidx == 0) {
232 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
233 myidx = sync_alloc_.Alloc(&thr->sync_cache);
234 mys = sync_alloc_.Map(myidx);
235 mys->Init(thr, pc, addr, uid);
236 }
237 mys->next = idx0;
238 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
239 myidx | kFlagSync, memory_order_release)) {
240 if (write_lock)
241 mys->mtx.Lock();
242 else
243 mys->mtx.ReadLock();
244 return mys;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000245 }
246 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000247}
248
249void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
Dmitry Vyukov3f5ad1a2014-07-08 20:01:12 +0000250 // src and dst can overlap,
251 // there are no concurrent accesses to the regions (e.g. stop-the-world).
252 CHECK_NE(src, dst);
253 CHECK_NE(sz, 0);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000254 uptr diff = dst - src;
255 u32 *src_meta = MemToMeta(src);
256 u32 *dst_meta = MemToMeta(dst);
257 u32 *src_meta_end = MemToMeta(src + sz);
Dmitry Vyukov3f5ad1a2014-07-08 20:01:12 +0000258 uptr inc = 1;
259 if (dst > src) {
260 src_meta = MemToMeta(src + sz) - 1;
261 dst_meta = MemToMeta(dst + sz) - 1;
262 src_meta_end = MemToMeta(src) - 1;
263 inc = -1;
264 }
265 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000266 CHECK_EQ(*dst_meta, 0);
267 u32 idx = *src_meta;
268 *src_meta = 0;
269 *dst_meta = idx;
270 // Patch the addresses in sync objects.
271 while (idx != 0) {
272 if (idx & kFlagBlock)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000273 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000274 CHECK(idx & kFlagSync);
275 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
276 s->addr += diff;
277 idx = s->next;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000278 }
279 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000280}
281
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000282void MetaMap::OnThreadIdle(ThreadState *thr) {
283 block_alloc_.FlushCache(&thr->block_cache);
284 sync_alloc_.FlushCache(&thr->sync_cache);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000285}
286
287} // namespace __tsan