blob: 28006f00b961f7bb7e420a781a7d922850f19255 [file] [log] [blame]
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00001//===-- tsan_rtl.cc ---------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
15#include "tsan_defs.h"
16#include "tsan_platform.h"
17#include "tsan_rtl.h"
18#include "tsan_interface.h"
19#include "tsan_atomic.h"
20#include "tsan_mman.h"
21#include "tsan_placement_new.h"
22#include "tsan_suppressions.h"
23
24volatile int __tsan_stop = 0;
25
26extern "C" void __tsan_resume() {
27 __tsan_stop = 0;
28}
29
30namespace __tsan {
31
32THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGN(64);
33static char ctx_placeholder[sizeof(Context)] ALIGN(64);
34
35static Context *ctx;
36Context *CTX() {
37 return ctx;
38}
39
40Context::Context()
41 : initialized()
42 , report_mtx(MutexTypeReport, StatMtxReport)
43 , nreported()
44 , nmissed_expected()
45 , thread_mtx(MutexTypeThreads, StatMtxThreads)
46 , racy_stacks(MBlockRacyStacks)
47 , racy_addresses(MBlockRacyAddresses) {
48}
49
50// The objects are allocated in TLS, so one may rely on zero-initialization.
51ThreadState::ThreadState(Context *ctx, int tid, u64 epoch,
52 uptr stk_addr, uptr stk_size,
53 uptr tls_addr, uptr tls_size)
54 : fast_state(tid, epoch)
55 // Do not touch these, rely on zero initialization,
56 // they may be accessed before the ctor.
57 // , fast_ignore_reads()
58 // , fast_ignore_writes()
59 // , in_rtl()
60 , shadow_stack_pos(&shadow_stack[0])
61 , tid(tid)
62 , func_call_count()
63 , stk_addr(stk_addr)
64 , stk_size(stk_size)
65 , tls_addr(tls_addr)
66 , tls_size(tls_size) {
67}
68
69ThreadContext::ThreadContext(int tid)
70 : tid(tid)
71 , unique_id()
72 , user_id()
73 , thr()
74 , status(ThreadStatusInvalid)
75 , detached()
76 , reuse_count()
77 , epoch0()
78 , epoch1()
79 , dead_next() {
80}
81
82void Initialize(ThreadState *thr) {
83 // Thread safe because done before all threads exist.
84 static bool is_initialized = false;
85 if (is_initialized)
86 return;
87 is_initialized = true;
88 ScopedInRtl in_rtl;
89 InitializeInterceptors();
90 const char *env = InitializePlatform();
91 InitializeMutex();
92 InitializeDynamicAnnotations();
93 ctx = new(ctx_placeholder) Context;
94 InitializeShadowMemory();
95 ctx->dead_list_size = 0;
96 ctx->dead_list_head = 0;
97 ctx->dead_list_tail = 0;
98 InitializeFlags(&ctx->flags, env);
99 InitializeSuppressions();
100
101 if (ctx->flags.verbosity)
102 Printf("***** Running under ThreadSanitizer v2 (pid=%d) *****\n", GetPid());
103
104 // Initialize thread 0.
105 ctx->thread_seq = 0;
106 int tid = ThreadCreate(thr, 0, 0, true);
107 CHECK_EQ(tid, 0);
108 ThreadStart(thr, tid);
109 CHECK_EQ(thr->in_rtl, 1);
110 ctx->initialized = true;
111
112 if (__tsan_stop) {
113 Printf("ThreadSanitizer is suspended at startup.\n");
114 while (__tsan_stop);
115 }
116}
117
118int Finalize(ThreadState *thr) {
119 ScopedInRtl in_rtl;
120 Context *ctx = __tsan::ctx;
121 bool failed = false;
122
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000123 ThreadFinalize(thr);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000124
125 if (ctx->nreported) {
126 failed = true;
127 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
128 }
129
130 if (ctx->nmissed_expected) {
131 failed = true;
132 Printf("ThreadSanitizer: missed %d expected races\n",
133 ctx->nmissed_expected);
134 }
135
136 StatOutput(ctx->stat);
Dmitry Vyukov19b855f2012-05-17 15:00:27 +0000137 return failed ? flags()->exitcode : 0;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000138}
139
140static void TraceSwitch(ThreadState *thr) {
141 ScopedInRtl in_rtl;
142 Lock l(&thr->trace.mtx);
143 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
144 TraceHeader *hdr = &thr->trace.headers[trace];
145 hdr->epoch0 = thr->fast_state.epoch();
146 hdr->stack0.ObtainCurrent(thr, 0);
147}
148
149extern "C" void __tsan_trace_switch() {
150 TraceSwitch(cur_thread());
151}
152
153extern "C" void __tsan_report_race() {
154 ReportRace(cur_thread());
155}
156
157ALWAYS_INLINE
158static Shadow LoadShadow(u64 *p) {
159 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
160 return Shadow(raw);
161}
162
163ALWAYS_INLINE
164static void StoreShadow(u64 *sp, u64 s) {
165 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
166}
167
168ALWAYS_INLINE
169static void StoreIfNotYetStored(u64 *sp, u64 *s) {
170 StoreShadow(sp, *s);
171 *s = 0;
172}
173
174static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
175 Shadow cur, Shadow old) {
176 thr->racy_state[0] = cur.raw();
177 thr->racy_state[1] = old.raw();
178 thr->racy_shadow_addr = shadow_mem;
179 HACKY_CALL(__tsan_report_race);
180}
181
182static inline bool BothReads(Shadow s, int kAccessIsWrite) {
183 return !kAccessIsWrite && !s.is_write();
184}
185
186static inline bool OldIsRWStronger(Shadow old, int kAccessIsWrite) {
187 return old.is_write() || !kAccessIsWrite;
188}
189
190static inline bool OldIsRWWeaker(Shadow old, int kAccessIsWrite) {
191 return !old.is_write() || kAccessIsWrite;
192}
193
194static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
195 return old.epoch() >= thr->fast_synch_epoch;
196}
197
198static inline bool HappensBefore(Shadow old, ThreadState *thr) {
199 return thr->clock.get(old.tid()) >= old.epoch();
200}
201
202ALWAYS_INLINE
203void MemoryAccessImpl(ThreadState *thr, uptr addr,
204 int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
205 u64 *shadow_mem, Shadow cur) {
206 StatInc(thr, StatMop);
207 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
208 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
209
210 // This potentially can live in an MMX/SSE scratch register.
211 // The required intrinsics are:
212 // __m128i _mm_move_epi64(__m128i*);
213 // _mm_storel_epi64(u64*, __m128i);
214 u64 store_word = cur.raw();
215
216 // scan all the shadow values and dispatch to 4 categories:
217 // same, replace, candidate and race (see comments below).
218 // we consider only 3 cases regarding access sizes:
219 // equal, intersect and not intersect. initially I considered
220 // larger and smaller as well, it allowed to replace some
221 // 'candidates' with 'same' or 'replace', but I think
222 // it's just not worth it (performance- and complexity-wise).
223
224 Shadow old(0);
225 if (kShadowCnt == 1) {
226 int idx = 0;
227#include "tsan_update_shadow_word_inl.h"
228 } else if (kShadowCnt == 2) {
229 int idx = 0;
230#include "tsan_update_shadow_word_inl.h"
231 idx = 1;
232#include "tsan_update_shadow_word_inl.h"
233 } else if (kShadowCnt == 4) {
234 int idx = 0;
235#include "tsan_update_shadow_word_inl.h"
236 idx = 1;
237#include "tsan_update_shadow_word_inl.h"
238 idx = 2;
239#include "tsan_update_shadow_word_inl.h"
240 idx = 3;
241#include "tsan_update_shadow_word_inl.h"
242 } else if (kShadowCnt == 8) {
243 int idx = 0;
244#include "tsan_update_shadow_word_inl.h"
245 idx = 1;
246#include "tsan_update_shadow_word_inl.h"
247 idx = 2;
248#include "tsan_update_shadow_word_inl.h"
249 idx = 3;
250#include "tsan_update_shadow_word_inl.h"
251 idx = 4;
252#include "tsan_update_shadow_word_inl.h"
253 idx = 5;
254#include "tsan_update_shadow_word_inl.h"
255 idx = 6;
256#include "tsan_update_shadow_word_inl.h"
257 idx = 7;
258#include "tsan_update_shadow_word_inl.h"
259 } else {
260 CHECK(false);
261 }
262
263 // we did not find any races and had already stored
264 // the current access info, so we are done
265 if (LIKELY(store_word == 0))
266 return;
267 // choose a random candidate slot and replace it
268 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
269 StatInc(thr, StatShadowReplace);
270 return;
271 RACE:
272 HandleRace(thr, shadow_mem, cur, old);
273 return;
274}
275
276ALWAYS_INLINE
277void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
278 int kAccessSizeLog, bool kAccessIsWrite) {
279 u64 *shadow_mem = (u64*)MemToShadow(addr);
280 DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d"
281 " is_write=%d shadow_mem=%p {%llx, %llx, %llx, %llx}\n",
282 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
283 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
284 shadow_mem[0], shadow_mem[1], shadow_mem[2], shadow_mem[3]);
285#if TSAN_DEBUG
286 if (!IsAppMem(addr)) {
287 Printf("Access to non app mem %lx\n", addr);
288 DCHECK(IsAppMem(addr));
289 }
290 if (!IsShadowMem((uptr)shadow_mem)) {
291 Printf("Bad shadow addr %p (%lx)\n", shadow_mem, addr);
292 DCHECK(IsShadowMem((uptr)shadow_mem));
293 }
294#endif
295
296 FastState fast_state = thr->fast_state;
297 if (fast_state.GetIgnoreBit())
298 return;
299 fast_state.IncrementEpoch();
300 thr->fast_state = fast_state;
301 Shadow cur(fast_state);
302 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
303 cur.SetWrite(kAccessIsWrite);
304
305 // We must not store to the trace if we do not store to the shadow.
306 // That is, this call must be moved somewhere below.
307 TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
308
309 MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, fast_state,
310 shadow_mem, cur);
311}
312
313static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
314 u64 val) {
315 if (size == 0)
316 return;
317 // FIXME: fix me.
318 uptr offset = addr % kShadowCell;
319 if (offset) {
320 offset = kShadowCell - offset;
321 if (size <= offset)
322 return;
323 addr += offset;
324 size -= offset;
325 }
326 CHECK_EQ(addr % 8, 0);
327 CHECK(IsAppMem(addr));
328 CHECK(IsAppMem(addr + size - 1));
329 (void)thr;
330 (void)pc;
331 // Some programs mmap like hundreds of GBs but actually used a small part.
332 // So, it's better to report a false positive on the memory
333 // then to hang here senselessly.
334 const uptr kMaxResetSize = 1024*1024*1024;
335 if (size > kMaxResetSize)
336 size = kMaxResetSize;
337 size = (size + 7) & ~7;
338 u64 *p = (u64*)MemToShadow(addr);
339 CHECK(IsShadowMem((uptr)p));
340 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
341 // FIXME: may overwrite a part outside the region
342 for (uptr i = 0; i < size * kShadowCnt / kShadowCell; i++)
343 p[i] = val;
344}
345
346void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
347 MemoryRangeSet(thr, pc, addr, size, 0);
348}
349
350void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
351 MemoryAccessRange(thr, pc, addr, size, true);
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +0000352 Shadow s(thr->fast_state);
353 s.MarkAsFreed();
354 s.SetWrite(true);
355 s.SetAddr0AndSizeLog(0, 3);
356 MemoryRangeSet(thr, pc, addr, size, s.raw());
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000357}
358
359void FuncEntry(ThreadState *thr, uptr pc) {
360 DCHECK_EQ(thr->in_rtl, 0);
361 StatInc(thr, StatFuncEnter);
362 DPrintf2("#%d: tsan::FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
363 thr->fast_state.IncrementEpoch();
364 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
365
366 // Shadow stack maintenance can be replaced with
367 // stack unwinding during trace switch (which presumably must be faster).
368 DCHECK(thr->shadow_stack_pos >= &thr->shadow_stack[0]);
369 DCHECK(thr->shadow_stack_pos < &thr->shadow_stack[kShadowStackSize]);
370 thr->shadow_stack_pos[0] = pc;
371 thr->shadow_stack_pos++;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000372}
373
374void FuncExit(ThreadState *thr) {
375 DCHECK_EQ(thr->in_rtl, 0);
376 StatInc(thr, StatFuncExit);
377 DPrintf2("#%d: tsan::FuncExit\n", (int)thr->fast_state.tid());
378 thr->fast_state.IncrementEpoch();
379 TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
380
381 DCHECK(thr->shadow_stack_pos > &thr->shadow_stack[0]);
382 DCHECK(thr->shadow_stack_pos < &thr->shadow_stack[kShadowStackSize]);
383 thr->shadow_stack_pos--;
384}
385
386void IgnoreCtl(ThreadState *thr, bool write, bool begin) {
387 DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin);
388 thr->ignore_reads_and_writes += begin ? 1 : -1;
389 CHECK_GE(thr->ignore_reads_and_writes, 0);
390 if (thr->ignore_reads_and_writes)
391 thr->fast_state.SetIgnoreBit();
392 else
393 thr->fast_state.ClearIgnoreBit();
394}
395
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000396#if TSAN_DEBUG
397void build_consistency_debug() {}
398#else
399void build_consistency_release() {}
400#endif
401
402#if TSAN_COLLECT_STATS
403void build_consistency_stats() {}
404#else
405void build_consistency_nostats() {}
406#endif
407
408#if TSAN_SHADOW_COUNT == 1
409void build_consistency_shadow1() {}
410#elif TSAN_SHADOW_COUNT == 2
411void build_consistency_shadow2() {}
412#elif TSAN_SHADOW_COUNT == 4
413void build_consistency_shadow4() {}
414#else
415void build_consistency_shadow8() {}
416#endif
417
418} // namespace __tsan
419
420// Must be included in this file to make sure everything is inlined.
421#include "tsan_interface_inl.h"