blob: 9e514045dec66a33d60a1b3e459afe16499a2020 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_sync.cc ------------------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
Alexey Samsonov8bd90982012-06-07 09:50:16 +000013#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000014#include "tsan_sync.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000015#include "tsan_rtl.h"
16#include "tsan_mman.h"
17
18namespace __tsan {
19
Dmitry Vyukov9b410fb2014-03-05 13:41:21 +000020void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
21
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000022SyncVar::SyncVar()
23 : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
24 Reset();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000025}
26
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000027void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
28 this->addr = addr;
29 this->uid = uid;
30
31 creation_stack_id = 0;
32 if (kCppMode) // Go does not use them
33 creation_stack_id = CurrentStackId(thr, pc);
34 if (flags()->detect_deadlocks)
35 DDMutexInit(thr, pc, this);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000036}
37
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000038void SyncVar::Reset() {
39 addr = 0;
40 uid = 0;
41 creation_stack_id = 0;
42 owner_tid = kInvalidTid;
43 last_lock = 0;
44 recursion = 0;
45 is_rw = 0;
46 is_recursive = 0;
47 is_broken = 0;
48 is_linker_init = 0;
49 next = 0;
50
51 clock.Zero();
52 read_clock.Reset();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000053}
54
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +000055MetaMap::MetaMap() {
56 atomic_store(&uid_gen_, 0, memory_order_relaxed);
57}
58
59void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
60 u32 idx = block_alloc_.Alloc(&thr->block_cache);
61 MBlock *b = block_alloc_.Map(idx);
62 b->siz = sz;
63 b->tid = thr->tid;
64 b->stk = CurrentStackId(thr, pc);
65 u32 *meta = MemToMeta(p);
66 DCHECK_EQ(*meta, 0);
67 *meta = idx | kFlagBlock;
68}
69
70uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
71 MBlock* b = GetBlock(p);
72 if (b == 0)
73 return 0;
74 uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
75 FreeRange(thr, pc, p, sz);
76 return sz;
77}
78
79void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
80 u32 *meta = MemToMeta(p);
81 u32 *end = MemToMeta(p + sz);
82 if (end == meta)
83 end++;
84 for (; meta < end; meta++) {
85 u32 idx = *meta;
86 *meta = 0;
87 for (;;) {
88 if (idx == 0)
89 break;
90 if (idx & kFlagBlock) {
91 block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
92 break;
93 } else if (idx & kFlagSync) {
94 DCHECK(idx & kFlagSync);
95 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
96 u32 next = s->next;
97 s->Reset();
98 sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
99 idx = next;
100 } else {
101 CHECK(0);
102 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000103 }
104 }
105}
106
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000107MBlock* MetaMap::GetBlock(uptr p) {
108 u32 *meta = MemToMeta(p);
109 u32 idx = *meta;
110 for (;;) {
111 if (idx == 0)
112 return 0;
113 if (idx & kFlagBlock)
114 return block_alloc_.Map(idx & ~kFlagMask);
115 DCHECK(idx & kFlagSync);
116 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
117 idx = s->next;
118 }
119}
120
121SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
122 uptr addr, bool write_lock) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000123 return GetAndLock(thr, pc, addr, write_lock, true);
124}
125
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000126SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
127 return GetAndLock(0, 0, addr, true, false);
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000128}
129
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000130SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000131 uptr addr, bool write_lock, bool create) {
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000132 u32 *meta = MemToMeta(addr);
133 u32 idx0 = *meta;
134 u32 myidx = 0;
135 SyncVar *mys = 0;
136 for (;;) {
137 u32 idx = *meta;
138 for (;;) {
139 if (idx == 0)
Dmitry Vyukov1c0b3c62012-08-15 17:27:20 +0000140 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000141 if (idx & kFlagBlock)
142 break;
143 DCHECK(idx & kFlagSync);
144 SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
145 if (s->addr == addr) {
146 if (myidx != 0) {
147 mys->Reset();
148 sync_alloc_.Free(&thr->sync_cache, myidx);
149 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000150 if (write_lock)
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000151 s->mtx.Lock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000152 else
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000153 s->mtx.ReadLock();
154 return s;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000155 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000156 idx = s->next;
157 }
158 if (!create)
159 return 0;
160 if (*meta != idx0)
161 continue;
162
163 if (myidx == 0) {
164 const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
165 myidx = sync_alloc_.Alloc(&thr->sync_cache);
166 mys = sync_alloc_.Map(myidx);
167 mys->Init(thr, pc, addr, uid);
168 }
169 mys->next = idx0;
170 if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
171 myidx | kFlagSync, memory_order_release)) {
172 if (write_lock)
173 mys->mtx.Lock();
174 else
175 mys->mtx.ReadLock();
176 return mys;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000177 }
178 }
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000179}
180
181void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
182 // Here we assume that src and dst do not overlap,
183 // and there are no concurrent accesses to the regions (e.g. stop-the-world).
184 uptr diff = dst - src;
185 u32 *src_meta = MemToMeta(src);
186 u32 *dst_meta = MemToMeta(dst);
187 u32 *src_meta_end = MemToMeta(src + sz);
188 for (; src_meta != src_meta_end; src_meta++, dst_meta++) {
189 CHECK_EQ(*dst_meta, 0);
190 u32 idx = *src_meta;
191 *src_meta = 0;
192 *dst_meta = idx;
193 // Patch the addresses in sync objects.
194 while (idx != 0) {
195 if (idx & kFlagBlock)
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000196 break;
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000197 CHECK(idx & kFlagSync);
198 SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
199 s->addr += diff;
200 idx = s->next;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000201 }
202 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000203}
204
Dmitry Vyukovbde4c9c2014-05-29 13:50:54 +0000205void MetaMap::OnThreadIdle(ThreadState *thr) {
206 block_alloc_.FlushCache(&thr->block_cache);
207 sync_alloc_.FlushCache(&thr->sync_cache);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000208}
209
210} // namespace __tsan