Alexey Samsonov | 3b2f9f4 | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_sync.cc ------------------------------------------------------===// |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
Alexey Samsonov | 8bd9098 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 13 | #include "sanitizer_common/sanitizer_placement_new.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 14 | #include "tsan_sync.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 15 | #include "tsan_rtl.h" |
| 16 | #include "tsan_mman.h" |
| 17 | |
| 18 | namespace __tsan { |
| 19 | |
Dmitry Vyukov | 9b410fb | 2014-03-05 13:41:21 +0000 | [diff] [blame] | 20 | void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s); |
| 21 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 22 | SyncVar::SyncVar() |
| 23 | : mtx(MutexTypeSyncVar, StatMtxSyncVar) { |
Dmitry Vyukov | 70db9d4 | 2014-08-05 18:45:02 +0000 | [diff] [blame] | 24 | Reset(0); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 25 | } |
| 26 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 27 | void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { |
| 28 | this->addr = addr; |
| 29 | this->uid = uid; |
Dmitry Vyukov | 67ccf98 | 2014-07-08 20:37:16 +0000 | [diff] [blame] | 30 | this->next = 0; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 31 | |
| 32 | creation_stack_id = 0; |
| 33 | if (kCppMode) // Go does not use them |
| 34 | creation_stack_id = CurrentStackId(thr, pc); |
Alexey Samsonov | 5c82596 | 2014-09-10 23:08:06 +0000 | [diff] [blame] | 35 | if (common_flags()->detect_deadlocks) |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 36 | DDMutexInit(thr, pc, this); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 37 | } |
| 38 | |
Dmitry Vyukov | 70db9d4 | 2014-08-05 18:45:02 +0000 | [diff] [blame] | 39 | void SyncVar::Reset(ThreadState *thr) { |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 40 | uid = 0; |
| 41 | creation_stack_id = 0; |
| 42 | owner_tid = kInvalidTid; |
| 43 | last_lock = 0; |
| 44 | recursion = 0; |
| 45 | is_rw = 0; |
| 46 | is_recursive = 0; |
| 47 | is_broken = 0; |
| 48 | is_linker_init = 0; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 49 | |
Dmitry Vyukov | 70db9d4 | 2014-08-05 18:45:02 +0000 | [diff] [blame] | 50 | if (thr == 0) { |
| 51 | CHECK_EQ(clock.size(), 0); |
| 52 | CHECK_EQ(read_clock.size(), 0); |
| 53 | } else { |
| 54 | clock.Reset(&thr->clock_cache); |
| 55 | read_clock.Reset(&thr->clock_cache); |
| 56 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 57 | } |
| 58 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 59 | MetaMap::MetaMap() { |
| 60 | atomic_store(&uid_gen_, 0, memory_order_relaxed); |
| 61 | } |
| 62 | |
| 63 | void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) { |
| 64 | u32 idx = block_alloc_.Alloc(&thr->block_cache); |
| 65 | MBlock *b = block_alloc_.Map(idx); |
| 66 | b->siz = sz; |
| 67 | b->tid = thr->tid; |
| 68 | b->stk = CurrentStackId(thr, pc); |
| 69 | u32 *meta = MemToMeta(p); |
| 70 | DCHECK_EQ(*meta, 0); |
| 71 | *meta = idx | kFlagBlock; |
| 72 | } |
| 73 | |
| 74 | uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { |
| 75 | MBlock* b = GetBlock(p); |
| 76 | if (b == 0) |
| 77 | return 0; |
| 78 | uptr sz = RoundUpTo(b->siz, kMetaShadowCell); |
| 79 | FreeRange(thr, pc, p, sz); |
| 80 | return sz; |
| 81 | } |
| 82 | |
Dmitry Vyukov | a60829a | 2015-03-12 11:24:16 +0000 | [diff] [blame] | 83 | bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { |
| 84 | bool has_something = false; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 85 | u32 *meta = MemToMeta(p); |
| 86 | u32 *end = MemToMeta(p + sz); |
| 87 | if (end == meta) |
| 88 | end++; |
| 89 | for (; meta < end; meta++) { |
| 90 | u32 idx = *meta; |
| 91 | *meta = 0; |
| 92 | for (;;) { |
| 93 | if (idx == 0) |
| 94 | break; |
Dmitry Vyukov | a60829a | 2015-03-12 11:24:16 +0000 | [diff] [blame] | 95 | has_something = true; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 96 | if (idx & kFlagBlock) { |
| 97 | block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask); |
| 98 | break; |
| 99 | } else if (idx & kFlagSync) { |
| 100 | DCHECK(idx & kFlagSync); |
Dmitry Vyukov | 9eaae3d | 2014-06-21 02:10:17 +0000 | [diff] [blame] | 101 | SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 102 | u32 next = s->next; |
Dmitry Vyukov | 70db9d4 | 2014-08-05 18:45:02 +0000 | [diff] [blame] | 103 | s->Reset(thr); |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 104 | sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask); |
| 105 | idx = next; |
| 106 | } else { |
| 107 | CHECK(0); |
| 108 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 109 | } |
| 110 | } |
Dmitry Vyukov | a60829a | 2015-03-12 11:24:16 +0000 | [diff] [blame] | 111 | return has_something; |
| 112 | } |
| 113 | |
| 114 | // ResetRange removes all meta objects from the range. |
| 115 | // It is called for large mmap-ed regions. The function is best-effort wrt |
| 116 | // freeing of meta objects, because we don't want to page in the whole range |
| 117 | // which can be huge. The function probes pages one-by-one until it finds a page |
| 118 | // without meta objects, at this point it stops freeing meta objects. Because |
| 119 | // thread stacks grow top-down, we do the same starting from end as well. |
| 120 | void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { |
| 121 | const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; |
| 122 | const uptr kPageSize = GetPageSizeCached() * kMetaRatio; |
| 123 | if (sz <= 4 * kPageSize) { |
| 124 | // If the range is small, just do the normal free procedure. |
| 125 | FreeRange(thr, pc, p, sz); |
| 126 | return; |
| 127 | } |
| 128 | // First, round both ends of the range to page size. |
| 129 | uptr diff = RoundUp(p, kPageSize) - p; |
| 130 | if (diff != 0) { |
| 131 | FreeRange(thr, pc, p, diff); |
| 132 | p += diff; |
| 133 | sz -= diff; |
| 134 | } |
| 135 | diff = p + sz - RoundDown(p + sz, kPageSize); |
| 136 | if (diff != 0) { |
| 137 | FreeRange(thr, pc, p + sz - diff, diff); |
| 138 | sz -= diff; |
| 139 | } |
| 140 | // Now we must have a non-empty page-aligned range. |
| 141 | CHECK_GT(sz, 0); |
| 142 | CHECK_EQ(p, RoundUp(p, kPageSize)); |
| 143 | CHECK_EQ(sz, RoundUp(sz, kPageSize)); |
| 144 | const uptr p0 = p; |
| 145 | const uptr sz0 = sz; |
| 146 | // Probe start of the range. |
| 147 | while (sz > 0) { |
| 148 | bool has_something = FreeRange(thr, pc, p, kPageSize); |
| 149 | p += kPageSize; |
| 150 | sz -= kPageSize; |
| 151 | if (!has_something) |
| 152 | break; |
| 153 | } |
| 154 | // Probe end of the range. |
| 155 | while (sz > 0) { |
| 156 | bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize); |
| 157 | sz -= kPageSize; |
| 158 | if (!has_something) |
| 159 | break; |
| 160 | } |
| 161 | // Finally, page out the whole range (including the parts that we've just |
| 162 | // freed). Note: we can't simply madvise, because we need to leave a zeroed |
| 163 | // range (otherwise __tsan_java_move can crash if it encounters a left-over |
| 164 | // meta objects in java heap). |
| 165 | UnmapOrDie((void*)p0, sz0); |
| 166 | MmapFixedNoReserve(p0, sz0); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 167 | } |
| 168 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 169 | MBlock* MetaMap::GetBlock(uptr p) { |
| 170 | u32 *meta = MemToMeta(p); |
| 171 | u32 idx = *meta; |
| 172 | for (;;) { |
| 173 | if (idx == 0) |
| 174 | return 0; |
| 175 | if (idx & kFlagBlock) |
| 176 | return block_alloc_.Map(idx & ~kFlagMask); |
| 177 | DCHECK(idx & kFlagSync); |
| 178 | SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); |
| 179 | idx = s->next; |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc, |
| 184 | uptr addr, bool write_lock) { |
Dmitry Vyukov | fd5ebcd | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 185 | return GetAndLock(thr, pc, addr, write_lock, true); |
| 186 | } |
| 187 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 188 | SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) { |
| 189 | return GetAndLock(0, 0, addr, true, false); |
Dmitry Vyukov | fd5ebcd | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 190 | } |
| 191 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 192 | SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, |
Dmitry Vyukov | fd5ebcd | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 193 | uptr addr, bool write_lock, bool create) { |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 194 | u32 *meta = MemToMeta(addr); |
| 195 | u32 idx0 = *meta; |
| 196 | u32 myidx = 0; |
| 197 | SyncVar *mys = 0; |
| 198 | for (;;) { |
Dmitry Vyukov | 67ccf98 | 2014-07-08 20:37:16 +0000 | [diff] [blame] | 199 | u32 idx = idx0; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 200 | for (;;) { |
| 201 | if (idx == 0) |
Dmitry Vyukov | 1c0b3c6 | 2012-08-15 17:27:20 +0000 | [diff] [blame] | 202 | break; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 203 | if (idx & kFlagBlock) |
| 204 | break; |
| 205 | DCHECK(idx & kFlagSync); |
| 206 | SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask); |
| 207 | if (s->addr == addr) { |
| 208 | if (myidx != 0) { |
Dmitry Vyukov | 70db9d4 | 2014-08-05 18:45:02 +0000 | [diff] [blame] | 209 | mys->Reset(thr); |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 210 | sync_alloc_.Free(&thr->sync_cache, myidx); |
| 211 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 212 | if (write_lock) |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 213 | s->mtx.Lock(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 214 | else |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 215 | s->mtx.ReadLock(); |
| 216 | return s; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 217 | } |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 218 | idx = s->next; |
| 219 | } |
| 220 | if (!create) |
| 221 | return 0; |
Dmitry Vyukov | 67ccf98 | 2014-07-08 20:37:16 +0000 | [diff] [blame] | 222 | if (*meta != idx0) { |
| 223 | idx0 = *meta; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 224 | continue; |
Dmitry Vyukov | 67ccf98 | 2014-07-08 20:37:16 +0000 | [diff] [blame] | 225 | } |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 226 | |
| 227 | if (myidx == 0) { |
| 228 | const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); |
| 229 | myidx = sync_alloc_.Alloc(&thr->sync_cache); |
| 230 | mys = sync_alloc_.Map(myidx); |
| 231 | mys->Init(thr, pc, addr, uid); |
| 232 | } |
| 233 | mys->next = idx0; |
| 234 | if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, |
| 235 | myidx | kFlagSync, memory_order_release)) { |
| 236 | if (write_lock) |
| 237 | mys->mtx.Lock(); |
| 238 | else |
| 239 | mys->mtx.ReadLock(); |
| 240 | return mys; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 241 | } |
| 242 | } |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) { |
Dmitry Vyukov | 3f5ad1a | 2014-07-08 20:01:12 +0000 | [diff] [blame] | 246 | // src and dst can overlap, |
| 247 | // there are no concurrent accesses to the regions (e.g. stop-the-world). |
| 248 | CHECK_NE(src, dst); |
| 249 | CHECK_NE(sz, 0); |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 250 | uptr diff = dst - src; |
| 251 | u32 *src_meta = MemToMeta(src); |
| 252 | u32 *dst_meta = MemToMeta(dst); |
| 253 | u32 *src_meta_end = MemToMeta(src + sz); |
Dmitry Vyukov | 3f5ad1a | 2014-07-08 20:01:12 +0000 | [diff] [blame] | 254 | uptr inc = 1; |
| 255 | if (dst > src) { |
| 256 | src_meta = MemToMeta(src + sz) - 1; |
| 257 | dst_meta = MemToMeta(dst + sz) - 1; |
| 258 | src_meta_end = MemToMeta(src) - 1; |
| 259 | inc = -1; |
| 260 | } |
| 261 | for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) { |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 262 | CHECK_EQ(*dst_meta, 0); |
| 263 | u32 idx = *src_meta; |
| 264 | *src_meta = 0; |
| 265 | *dst_meta = idx; |
| 266 | // Patch the addresses in sync objects. |
| 267 | while (idx != 0) { |
| 268 | if (idx & kFlagBlock) |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 269 | break; |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 270 | CHECK(idx & kFlagSync); |
| 271 | SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask); |
| 272 | s->addr += diff; |
| 273 | idx = s->next; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 274 | } |
| 275 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 276 | } |
| 277 | |
Dmitry Vyukov | bde4c9c | 2014-05-29 13:50:54 +0000 | [diff] [blame] | 278 | void MetaMap::OnThreadIdle(ThreadState *thr) { |
| 279 | block_alloc_.FlushCache(&thr->block_cache); |
| 280 | sync_alloc_.FlushCache(&thr->sync_cache); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | } // namespace __tsan |