blob: 5da0b14d272109dda0aa200f44000ca3447b6995 [file] [log] [blame]
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00001//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main internal TSan header file.
13//
14// Ground rules:
15// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16// function-scope locals)
17// - All functions/classes/etc reside in namespace __tsan, except for those
18// declared in tsan_interface.h.
19// - Platform-specific files should be used instead of ifdefs (*).
20// - No system headers included in header files (*).
21// - Platform specific headres included only into platform-specific files (*).
22//
23// (*) Except when inlining is critical for performance.
24//===----------------------------------------------------------------------===//
25
26#ifndef TSAN_RTL_H
27#define TSAN_RTL_H
28
29#include "tsan_clock.h"
30#include "tsan_defs.h"
31#include "tsan_flags.h"
32#include "tsan_sync.h"
33#include "tsan_trace.h"
34#include "tsan_vector.h"
35#include "tsan_report.h"
36
37namespace __tsan {
38
39void Printf(const char *format, ...) FORMAT(1, 2);
40uptr Snprintf(char *buffer, uptr length, const char *format, ...) FORMAT(3, 4);
41
42inline void NOINLINE breakhere() {
43 volatile int x = 42;
44 (void)x;
45}
46
47// FastState (from most significant bit):
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000048// unused : 1
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000049// tid : kTidBits
50// epoch : kClkBits
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000051// unused : -
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000052// ignore_bit : 1
53class FastState {
54 public:
55 FastState(u64 tid, u64 epoch) {
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000056 x_ = tid << kTidShift;
57 x_ |= epoch << kClkShift;
58 DCHECK(tid == this->tid());
59 DCHECK(epoch == this->epoch());
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000060 }
61
62 explicit FastState(u64 x)
63 : x_(x) {
64 }
65
66 u64 tid() const {
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000067 u64 res = x_ >> kTidShift;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000068 return res;
69 }
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000070
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000071 u64 epoch() const {
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000072 u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000073 return res;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000074 }
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000075
76 void IncrementEpoch() {
77 u64 old_epoch = epoch();
78 x_ += 1 << kClkShift;
Dmitry Vyukov163a83382012-05-21 10:20:53 +000079 DCHECK_EQ(old_epoch + 1, epoch());
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000080 (void)old_epoch;
81 }
82
83 void SetIgnoreBit() { x_ |= kIgnoreBit; }
84 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
Dmitry Vyukov302cebb2012-05-22 18:07:45 +000085 bool GetIgnoreBit() const { return x_ & kIgnoreBit; }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000086
87 private:
88 friend class Shadow;
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000089 static const int kTidShift = 64 - kTidBits - 1;
90 static const int kClkShift = kTidShift - kClkBits;
91 static const u64 kIgnoreBit = 1ull;
92 static const u64 kFreedBit = 1ull << 63;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000093 u64 x_;
94};
95
96// Shadow (from most significant bit):
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +000097// freed : 1
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000098// tid : kTidBits
99// epoch : kClkBits
100// is_write : 1
101// size_log : 2
102// addr0 : 3
103class Shadow: public FastState {
104 public:
105 explicit Shadow(u64 x) : FastState(x) { }
106
107 explicit Shadow(const FastState &s) : FastState(s.x_) { }
108
109 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
110 DCHECK_EQ(x_ & 31, 0);
111 DCHECK_LE(addr0, 7);
112 DCHECK_LE(kAccessSizeLog, 3);
113 x_ |= (kAccessSizeLog << 3) | addr0;
114 DCHECK_EQ(kAccessSizeLog, size_log());
115 DCHECK_EQ(addr0, this->addr0());
116 }
117
118 void SetWrite(unsigned kAccessIsWrite) {
119 DCHECK_EQ(x_ & 32, 0);
120 if (kAccessIsWrite)
121 x_ |= 32;
122 DCHECK_EQ(kAccessIsWrite, is_write());
123 }
124
125 bool IsZero() const { return x_ == 0; }
126 u64 raw() const { return x_; }
127
Dmitry Vyukov302cebb2012-05-22 18:07:45 +0000128 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +0000129 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000130 DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
131 return shifted_xor == 0;
132 }
Dmitry Vyukov302cebb2012-05-22 18:07:45 +0000133
134 static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000135 u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
136 return masked_xor == 0;
137 }
138
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000139 static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
140 unsigned kS2AccessSize) {
141 bool res = false;
142 u64 diff = s1.addr0() - s2.addr0();
143 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
144 // if (s1.addr0() + size1) > s2.addr0()) return true;
145 if (s1.size() > -diff) res = true;
146 } else {
147 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
148 if (kS2AccessSize > diff) res = true;
149 }
150 DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
151 DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
152 return res;
153 }
154
155 // The idea behind the offset is as follows.
156 // Consider that we have 8 bool's contained within a single 8-byte block
157 // (mapped to a single shadow "cell"). Now consider that we write to the bools
158 // from a single thread (which we consider the common case).
159 // W/o offsetting each access will have to scan 4 shadow values at average
160 // to find the corresponding shadow value for the bool.
161 // With offsetting we start scanning shadow with the offset so that
162 // each access hits necessary shadow straight off (at least in an expected
163 // optimistic case).
164 // This logic works seamlessly for any layout of user data. For example,
165 // if user data is {int, short, char, char}, then accesses to the int are
166 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
167 // from a single thread won't need to scan all 8 shadow values.
168 unsigned ComputeSearchOffset() {
169 return x_ & 7;
170 }
171 u64 addr0() const { return x_ & 7; }
172 u64 size() const { return 1ull << size_log(); }
173 bool is_write() const { return x_ & 32; }
174
Dmitry Vyukovfee5b7d2012-05-17 14:17:51 +0000175 // The idea behind the freed bit is as follows.
176 // When the memory is freed (or otherwise unaccessible) we write to the shadow
177 // values with tid/epoch related to the free and the freed bit set.
178 // During memory accesses processing the freed bit is considered
179 // as msb of tid. So any access races with shadow with freed bit set
180 // (it is as if write from a thread with which we never synchronized before).
181 // This allows us to detect accesses to freed memory w/o additional
182 // overheads in memory access processing and at the same time restore
183 // tid/epoch of free.
184 void MarkAsFreed() {
185 x_ |= kFreedBit;
186 }
187
188 bool GetFreedAndReset() {
189 bool res = x_ & kFreedBit;
190 x_ &= ~kFreedBit;
191 return res;
192 }
193
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000194 private:
195 u64 size_log() const { return (x_ >> 3) & 3; }
Dmitry Vyukov302cebb2012-05-22 18:07:45 +0000196
197 static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
198 if (s1.addr0() == s2.addr0()) return true;
199 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
200 return true;
201 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
202 return true;
203 return false;
204 }
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000205};
206
207// Freed memory.
208// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything.
209const u64 kShadowFreed = 0xfffffffffffffff8ull;
210
Dmitry Vyukovf6985e32012-05-22 14:34:43 +0000211const int kSigCount = 128;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000212const int kShadowStackSize = 1024;
213
214struct my_siginfo_t {
215 int opaque[128];
216};
217
218struct SignalDesc {
219 bool armed;
220 bool sigaction;
221 my_siginfo_t siginfo;
222};
223
224// This struct is stored in TLS.
225struct ThreadState {
226 FastState fast_state;
227 // Synch epoch represents the threads's epoch before the last synchronization
228 // action. It allows to reduce number of shadow state updates.
229 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
230 // if we are processing write to X from the same thread at epoch=200,
231 // we do nothing, because both writes happen in the same 'synch epoch'.
232 // That is, if another memory access does not race with the former write,
233 // it does not race with the latter as well.
234 // QUESTION: can we can squeeze this into ThreadState::Fast?
235 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
236 // taken by epoch between synchs.
237 // This way we can save one load from tls.
238 u64 fast_synch_epoch;
239 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
240 // We do not distinguish beteween ignoring reads and writes
241 // for better performance.
242 int ignore_reads_and_writes;
243 uptr *shadow_stack_pos;
244 u64 *racy_shadow_addr;
245 u64 racy_state[2];
246 Trace trace;
247 uptr shadow_stack[kShadowStackSize];
248 ThreadClock clock;
249 u64 stat[StatCnt];
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000250 const int tid;
251 int in_rtl;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000252 const uptr stk_addr;
253 const uptr stk_size;
254 const uptr tls_addr;
255 const uptr tls_size;
256
257 DeadlockDetector deadlock_detector;
258
259 bool in_signal_handler;
Dmitry Vyukov98953b72012-05-21 08:26:51 +0000260 int int_signal_send;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000261 int pending_signal_count;
262 SignalDesc pending_signals[kSigCount];
263
264 explicit ThreadState(Context *ctx, int tid, u64 epoch,
265 uptr stk_addr, uptr stk_size,
266 uptr tls_addr, uptr tls_size);
267};
268
269Context *CTX();
270extern THREADLOCAL char cur_thread_placeholder[];
271
272INLINE ThreadState *cur_thread() {
273 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
274}
275
276enum ThreadStatus {
277 ThreadStatusInvalid, // Non-existent thread, data is invalid.
278 ThreadStatusCreated, // Created but not yet running.
279 ThreadStatusRunning, // The thread is currently running.
280 ThreadStatusFinished, // Joinable thread is finished but not yet joined.
281 ThreadStatusDead, // Joined, but some info (trace) is still alive.
282};
283
284// An info about a thread that is hold for some time after its termination.
285struct ThreadDeadInfo {
286 Trace trace;
287};
288
289struct ThreadContext {
290 const int tid;
291 int unique_id; // Non-rolling thread id.
292 uptr user_id; // Some opaque user thread id (e.g. pthread_t).
293 ThreadState *thr;
294 ThreadStatus status;
295 bool detached;
296 int reuse_count;
297 SyncClock sync;
298 // Epoch at which the thread had started.
299 // If we see an event from the thread stamped by an older epoch,
300 // the event is from a dead thread that shared tid with this thread.
301 u64 epoch0;
302 u64 epoch1;
303 StackTrace creation_stack;
Dmitry Vyukovf6985e32012-05-22 14:34:43 +0000304 ThreadDeadInfo *dead_info;
305 ThreadContext *dead_next; // In dead thread list.
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000306
307 explicit ThreadContext(int tid);
308};
309
310struct RacyStacks {
311 MD5Hash hash[2];
312 bool operator==(const RacyStacks &other) const {
313 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
314 return true;
315 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
316 return true;
317 return false;
318 }
319};
320
321struct RacyAddress {
322 uptr addr_min;
323 uptr addr_max;
324};
325
326struct Context {
327 Context();
328
329 bool initialized;
330
331 SyncTab synctab;
332
333 Mutex report_mtx;
334 int nreported;
335 int nmissed_expected;
336
337 Mutex thread_mtx;
Kostya Serebryany07c48052012-05-11 14:42:24 +0000338 unsigned thread_seq;
339 unsigned unique_thread_seq;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000340 int alive_threads;
341 int max_alive_threads;
342 ThreadContext *threads[kMaxTid];
343 int dead_list_size;
344 ThreadContext* dead_list_head;
345 ThreadContext* dead_list_tail;
346
347 Vector<RacyStacks> racy_stacks;
348 Vector<RacyAddress> racy_addresses;
349
350 Flags flags;
351
352 u64 stat[StatCnt];
353 u64 int_alloc_cnt[MBlockTypeCount];
354 u64 int_alloc_siz[MBlockTypeCount];
355};
356
357class ScopedInRtl {
358 public:
359 ScopedInRtl();
360 ~ScopedInRtl();
361 private:
362 ThreadState*thr_;
363 int in_rtl_;
364 int errno_;
365};
366
367class ScopedReport {
368 public:
369 explicit ScopedReport(ReportType typ);
370 ~ScopedReport();
371
372 void AddStack(const StackTrace *stack);
373 void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack);
374 void AddThread(const ThreadContext *tctx);
375 void AddMutex(const SyncVar *s);
376 void AddLocation(uptr addr, uptr size);
377
378 const ReportDesc *GetReport() const;
379
380 private:
381 Context *ctx_;
382 ReportDesc *rep_;
383
384 ScopedReport(const ScopedReport&);
385 void operator = (const ScopedReport&);
386};
387
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000388void StatAggregate(u64 *dst, u64 *src);
389void StatOutput(u64 *stat);
390void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
391 if (kCollectStats)
392 thr->stat[typ] += n;
393}
394
395void InitializeShadowMemory();
396void InitializeInterceptors();
397void InitializeDynamicAnnotations();
398void Die() NORETURN;
399
400void ReportRace(ThreadState *thr);
Dmitry Vyukov665ce2a2012-05-14 15:28:03 +0000401bool OutputReport(const ScopedReport &srep,
402 const ReportStack *suppress_stack = 0);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000403bool IsExpectedReport(uptr addr, uptr size);
404
405#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
406# define DPrintf Printf
407#else
408# define DPrintf(...)
409#endif
410
411#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
412# define DPrintf2 Printf
413#else
414# define DPrintf2(...)
415#endif
416
417void Initialize(ThreadState *thr);
418int Finalize(ThreadState *thr);
419
420void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
421 int kAccessSizeLog, bool kAccessIsWrite);
422void MemoryAccessImpl(ThreadState *thr, uptr addr,
423 int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
424 u64 *shadow_mem, Shadow cur);
425void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr);
426void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr);
427void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr);
428void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr);
429void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
430 uptr size, bool is_write);
431void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
432void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
433void IgnoreCtl(ThreadState *thr, bool write, bool begin);
434
435void FuncEntry(ThreadState *thr, uptr pc);
436void FuncExit(ThreadState *thr);
437
438int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
439void ThreadStart(ThreadState *thr, int tid);
440void ThreadFinish(ThreadState *thr);
441int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
442void ThreadJoin(ThreadState *thr, uptr pc, int tid);
443void ThreadDetach(ThreadState *thr, uptr pc, int tid);
444void ThreadFinalize(ThreadState *thr);
445
446void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive);
447void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
448void MutexLock(ThreadState *thr, uptr pc, uptr addr);
449void MutexUnlock(ThreadState *thr, uptr pc, uptr addr);
450void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
451void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
452void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
453
454void Acquire(ThreadState *thr, uptr pc, uptr addr);
455void Release(ThreadState *thr, uptr pc, uptr addr);
456
457// The hacky call uses custom calling convention and an assembly thunk.
458// It is considerably faster that a normal call for the caller
459// if it is not executed (it is intended for slow paths from hot functions).
460// The trick is that the call preserves all registers and the compiler
461// does not treat it as a call.
462// If it does not work for you, use normal call.
463#if TSAN_DEBUG == 0
464// The caller may not create the stack frame for itself at all,
465// so we create a reserve stack frame for it (1024b must be enough).
466#define HACKY_CALL(f) \
467 __asm__ __volatile__("sub $0x400, %%rsp;" \
468 "call " #f "_thunk;" \
469 "add $0x400, %%rsp;" ::: "memory");
470#else
471#define HACKY_CALL(f) f()
472#endif
473
474extern "C" void __tsan_trace_switch();
475void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
476 EventType typ, uptr addr) {
477 StatInc(thr, StatEvents);
478 if (UNLIKELY((epoch % kTracePartSize) == 0))
479 HACKY_CALL(__tsan_trace_switch);
480 Event *evp = &thr->trace.events[epoch % kTraceSize];
481 Event ev = (u64)addr | ((u64)typ << 61);
482 *evp = ev;
483}
484
485} // namespace __tsan
486
487#endif // TSAN_RTL_H