blob: 3d2463353fbc15e652db6c411f0cb710a092d355 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukov9b410fb2014-03-05 13:41:21 +000020void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000022SyncVar::SyncVar()
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
Dmitry Vyukov70db9d42014-08-05 18:45:02 +000024 Reset(0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000025}
26
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000027void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28 this->addr = addr;
29 this->uid = uid;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +000030 this->next = 0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000031
32 creation_stack_id = 0;
33 if (kCppMode) // Go does not use them
34 creation_stack_id = CurrentStackId(thr, pc);
Alexey Samsonov5c825962014-09-10 23:08:06 +000035 if (common_flags()->detect_deadlocks)
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000036 DDMutexInit(thr, pc, this);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000037}
38
Dmitry Vyukov70db9d42014-08-05 18:45:02 +000039void SyncVar::Reset(ThreadState *thr) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000040 uid = 0;
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
43 last_lock = 0;
44 recursion = 0;
45 is_rw = 0;
46 is_recursive = 0;
47 is_broken = 0;
48 is_linker_init = 0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000049
Dmitry Vyukov70db9d42014-08-05 18:45:02 +000050 if (thr == 0) {
51 CHECK_EQ(clock.size(), 0);
52 CHECK_EQ(read_clock.size(), 0);
53 } else {
54 clock.Reset(&thr->clock_cache);
55 read_clock.Reset(&thr->clock_cache);
56 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000057}
58
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000059MetaMap::MetaMap() {
60 atomic_store(&uid_gen_, 0, memory_order_relaxed);
61}
62
63void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
64 u32 idx = block_alloc_.Alloc(&thr->block_cache);
65 MBlock *b = block_alloc_.Map(idx);
66 b->siz = sz;
67 b->tid = thr->tid;
68 b->stk = CurrentStackId(thr, pc);
69 u32 *meta = MemToMeta(p);
70 DCHECK_EQ(*meta, 0);
71 *meta = idx | kFlagBlock;
72}
73
74uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
75 MBlock* b = GetBlock(p);
76 if (b == 0)
77 return 0;
78 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
79 FreeRange(thr, pc, p, sz);
80 return sz;
81}
82
Dmitry Vyukova60829a2015-03-12 11:24:16 +000083bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
84 bool has_something = false;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000085 u32 *meta = MemToMeta(p);
86 u32 *end = MemToMeta(p + sz);
87 if (end == meta)
88 end++;
89 for (; meta < end; meta++) {
90 u32 idx = *meta;
91 *meta = 0;
92 for (;;) {
93 if (idx == 0)
94 break;
Dmitry Vyukova60829a2015-03-12 11:24:16 +000095 has_something = true;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000096 if (idx & kFlagBlock) {
97 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
98 break;
99 } else if (idx & kFlagSync) {
100 DCHECK(idx & kFlagSync);
Dmitry Vyukov9eaae3d2014-06-21 02:10:17 +0000101 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000102 u32 next = s->next;
Dmitry Vyukov70db9d42014-08-05 18:45:02 +0000103 s->Reset(thr);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000104 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
105 idx = next;
106 } else {
107 CHECK(0);
108 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000109 }
110 }
Dmitry Vyukova60829a2015-03-12 11:24:16 +0000111 return has_something;
112}
113
114// ResetRange removes all meta objects from the range.
115// It is called for large mmap-ed regions. The function is best-effort wrt
116// freeing of meta objects, because we don't want to page in the whole range
117// which can be huge. The function probes pages one-by-one until it finds a page
118// without meta objects, at this point it stops freeing meta objects. Because
119// thread stacks grow top-down, we do the same starting from end as well.
120void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
121 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
122 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
123 if (sz <= 4 * kPageSize) {
124 // If the range is small, just do the normal free procedure.
125 FreeRange(thr, pc, p, sz);
126 return;
127 }
128 // First, round both ends of the range to page size.
129 uptr diff = RoundUp(p, kPageSize) - p;
130 if (diff != 0) {
131 FreeRange(thr, pc, p, diff);
132 p += diff;
133 sz -= diff;
134 }
135 diff = p + sz - RoundDown(p + sz, kPageSize);
136 if (diff != 0) {
137 FreeRange(thr, pc, p + sz - diff, diff);
138 sz -= diff;
139 }
140 // Now we must have a non-empty page-aligned range.
141 CHECK_GT(sz, 0);
142 CHECK_EQ(p, RoundUp(p, kPageSize));
143 CHECK_EQ(sz, RoundUp(sz, kPageSize));
144 const uptr p0 = p;
145 const uptr sz0 = sz;
146 // Probe start of the range.
147 while (sz > 0) {
148 bool has_something = FreeRange(thr, pc, p, kPageSize);
149 p += kPageSize;
150 sz -= kPageSize;
151 if (!has_something)
152 break;
153 }
154 // Probe end of the range.
155 while (sz > 0) {
156 bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
157 sz -= kPageSize;
158 if (!has_something)
159 break;
160 }
161 // Finally, page out the whole range (including the parts that we've just
162 // freed). Note: we can't simply madvise, because we need to leave a zeroed
163 // range (otherwise __tsan_java_move can crash if it encounters a left-over
164 // meta objects in java heap).
165 UnmapOrDie((void*)p0, sz0);
166 MmapFixedNoReserve(p0, sz0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000167}
168
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000169MBlock* MetaMap::GetBlock(uptr p) {
170 u32 *meta = MemToMeta(p);
171 u32 idx = *meta;
172 for (;;) {
173 if (idx == 0)
174 return 0;
175 if (idx & kFlagBlock)
176 return block_alloc_.Map(idx & ~kFlagMask);
177 DCHECK(idx & kFlagSync);
178 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
179 idx = s->next;
180 }
181}
182
183SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
184 uptr addr, bool write_lock) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000185 return GetAndLock(thr, pc, addr, write_lock, true);
186}
187
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000188SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
189 return GetAndLock(0, 0, addr, true, false);
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000190}
191
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000192SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000193 uptr addr, bool write_lock, bool create) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000194 u32 *meta = MemToMeta(addr);
195 u32 idx0 = *meta;
196 u32 myidx = 0;
197 SyncVar *mys = 0;
198 for (;;) {
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000199 u32 idx = idx0;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000200 for (;;) {
201 if (idx == 0)
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000202 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000203 if (idx & kFlagBlock)
204 break;
205 DCHECK(idx & kFlagSync);
206 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
207 if (s->addr == addr) {
208 if (myidx != 0) {
Dmitry Vyukov70db9d42014-08-05 18:45:02 +0000209 mys->Reset(thr);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000210 sync_alloc_.Free(&thr->sync_cache, myidx);
211 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000212 if (write_lock)
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000213 s->mtx.Lock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000214 else
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000215 s->mtx.ReadLock();
216 return s;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000217 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000218 idx = s->next;
219 }
220 if (!create)
221 return 0;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000222 if (*meta != idx0) {
223 idx0 = *meta;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000224 continue;
Dmitry Vyukov67ccf982014-07-08 20:37:16 +0000225 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000226
227 if (myidx == 0) {
228 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
229 myidx = sync_alloc_.Alloc(&thr->sync_cache);
230 mys = sync_alloc_.Map(myidx);
231 mys->Init(thr, pc, addr, uid);
232 }
233 mys->next = idx0;
234 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
235 myidx | kFlagSync, memory_order_release)) {
236 if (write_lock)
237 mys->mtx.Lock();
238 else
239 mys->mtx.ReadLock();
240 return mys;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000241 }
242 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000243}
244
245void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
Dmitry Vyukov3f5ad1a2014-07-08 20:01:12 +0000246 // src and dst can overlap,
247 // there are no concurrent accesses to the regions (e.g. stop-the-world).
248 CHECK_NE(src, dst);
249 CHECK_NE(sz, 0);
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000250 uptr diff = dst - src;
251 u32 *src_meta = MemToMeta(src);
252 u32 *dst_meta = MemToMeta(dst);
253 u32 *src_meta_end = MemToMeta(src + sz);
Dmitry Vyukov3f5ad1a2014-07-08 20:01:12 +0000254 uptr inc = 1;
255 if (dst > src) {
256 src_meta = MemToMeta(src + sz) - 1;
257 dst_meta = MemToMeta(dst + sz) - 1;
258 src_meta_end = MemToMeta(src) - 1;
259 inc = -1;
260 }
261 for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000262 CHECK_EQ(*dst_meta, 0);
263 u32 idx = *src_meta;
264 *src_meta = 0;
265 *dst_meta = idx;
266 // Patch the addresses in sync objects.
267 while (idx != 0) {
268 if (idx & kFlagBlock)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000269 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000270 CHECK(idx & kFlagSync);
271 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
272 s->addr += diff;
273 idx = s->next;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000274 }
275 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000276}
277
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000278void MetaMap::OnThreadIdle(ThreadState *thr) {
279 block_alloc_.FlushCache(&thr->block_cache);
280 sync_alloc_.FlushCache(&thr->sync_cache);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000281}
282
283} // namespace __tsan