blob: 71ca5d47a57570c7a0bf7d4659575a14a2829f85 [file] [log] [blame]
Kostya Serebryany7ac41482012-05-10 13:48:04 +00001//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main internal TSan header file.
13//
14// Ground rules:
15// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16// function-scope locals)
17// - All functions/classes/etc reside in namespace __tsan, except for those
18// declared in tsan_interface.h.
19// - Platform-specific files should be used instead of ifdefs (*).
20// - No system headers included in header files (*).
21// - Platform specific headres included only into platform-specific files (*).
22//
23// (*) Except when inlining is critical for performance.
24//===----------------------------------------------------------------------===//
25
26#ifndef TSAN_RTL_H
27#define TSAN_RTL_H
28
Alexey Samsonov9edf7502012-06-06 06:47:26 +000029#include "sanitizer_common/sanitizer_common.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000030#include "tsan_clock.h"
31#include "tsan_defs.h"
32#include "tsan_flags.h"
33#include "tsan_sync.h"
34#include "tsan_trace.h"
35#include "tsan_vector.h"
36#include "tsan_report.h"
37
38namespace __tsan {
39
Alexey Samsonov7fdcdf52012-06-06 13:58:39 +000040void TsanPrintf(const char *format, ...);
Kostya Serebryany7ac41482012-05-10 13:48:04 +000041
Kostya Serebryany7ac41482012-05-10 13:48:04 +000042// FastState (from most significant bit):
Dmitry Vyukov069ce822012-05-17 14:17:51 +000043// unused : 1
Kostya Serebryany7ac41482012-05-10 13:48:04 +000044// tid : kTidBits
45// epoch : kClkBits
Dmitry Vyukov069ce822012-05-17 14:17:51 +000046// unused : -
Kostya Serebryany7ac41482012-05-10 13:48:04 +000047// ignore_bit : 1
48class FastState {
49 public:
50 FastState(u64 tid, u64 epoch) {
Dmitry Vyukov069ce822012-05-17 14:17:51 +000051 x_ = tid << kTidShift;
52 x_ |= epoch << kClkShift;
53 DCHECK(tid == this->tid());
54 DCHECK(epoch == this->epoch());
Kostya Serebryany7ac41482012-05-10 13:48:04 +000055 }
56
57 explicit FastState(u64 x)
58 : x_(x) {
59 }
60
61 u64 tid() const {
Dmitry Vyukov069ce822012-05-17 14:17:51 +000062 u64 res = x_ >> kTidShift;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000063 return res;
64 }
Dmitry Vyukov069ce822012-05-17 14:17:51 +000065
Kostya Serebryany7ac41482012-05-10 13:48:04 +000066 u64 epoch() const {
Dmitry Vyukov069ce822012-05-17 14:17:51 +000067 u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
Kostya Serebryany7ac41482012-05-10 13:48:04 +000068 return res;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000069 }
Dmitry Vyukov069ce822012-05-17 14:17:51 +000070
71 void IncrementEpoch() {
72 u64 old_epoch = epoch();
73 x_ += 1 << kClkShift;
Dmitry Vyukove784ad42012-05-21 10:20:53 +000074 DCHECK_EQ(old_epoch + 1, epoch());
Dmitry Vyukov069ce822012-05-17 14:17:51 +000075 (void)old_epoch;
76 }
77
78 void SetIgnoreBit() { x_ |= kIgnoreBit; }
79 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
Dmitry Vyukovadfb6502012-05-22 18:07:45 +000080 bool GetIgnoreBit() const { return x_ & kIgnoreBit; }
Kostya Serebryany7ac41482012-05-10 13:48:04 +000081
82 private:
83 friend class Shadow;
Dmitry Vyukov069ce822012-05-17 14:17:51 +000084 static const int kTidShift = 64 - kTidBits - 1;
85 static const int kClkShift = kTidShift - kClkBits;
86 static const u64 kIgnoreBit = 1ull;
87 static const u64 kFreedBit = 1ull << 63;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000088 u64 x_;
89};
90
91// Shadow (from most significant bit):
Dmitry Vyukov069ce822012-05-17 14:17:51 +000092// freed : 1
Kostya Serebryany7ac41482012-05-10 13:48:04 +000093// tid : kTidBits
94// epoch : kClkBits
95// is_write : 1
96// size_log : 2
97// addr0 : 3
Dmitry Vyukove9636662012-06-27 16:05:06 +000098class Shadow : public FastState {
Kostya Serebryany7ac41482012-05-10 13:48:04 +000099 public:
100 explicit Shadow(u64 x) : FastState(x) { }
101
102 explicit Shadow(const FastState &s) : FastState(s.x_) { }
103
104 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
105 DCHECK_EQ(x_ & 31, 0);
106 DCHECK_LE(addr0, 7);
107 DCHECK_LE(kAccessSizeLog, 3);
108 x_ |= (kAccessSizeLog << 3) | addr0;
109 DCHECK_EQ(kAccessSizeLog, size_log());
110 DCHECK_EQ(addr0, this->addr0());
111 }
112
113 void SetWrite(unsigned kAccessIsWrite) {
114 DCHECK_EQ(x_ & 32, 0);
115 if (kAccessIsWrite)
116 x_ |= 32;
117 DCHECK_EQ(kAccessIsWrite, is_write());
118 }
119
120 bool IsZero() const { return x_ == 0; }
121 u64 raw() const { return x_; }
122
Dmitry Vyukovadfb6502012-05-22 18:07:45 +0000123 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000124 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000125 DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
126 return shifted_xor == 0;
127 }
Dmitry Vyukovadfb6502012-05-22 18:07:45 +0000128
129 static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000130 u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
131 return masked_xor == 0;
132 }
133
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000134 static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
135 unsigned kS2AccessSize) {
136 bool res = false;
137 u64 diff = s1.addr0() - s2.addr0();
138 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
139 // if (s1.addr0() + size1) > s2.addr0()) return true;
140 if (s1.size() > -diff) res = true;
141 } else {
142 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
143 if (kS2AccessSize > diff) res = true;
144 }
145 DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
146 DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
147 return res;
148 }
149
150 // The idea behind the offset is as follows.
151 // Consider that we have 8 bool's contained within a single 8-byte block
152 // (mapped to a single shadow "cell"). Now consider that we write to the bools
153 // from a single thread (which we consider the common case).
154 // W/o offsetting each access will have to scan 4 shadow values at average
155 // to find the corresponding shadow value for the bool.
156 // With offsetting we start scanning shadow with the offset so that
157 // each access hits necessary shadow straight off (at least in an expected
158 // optimistic case).
159 // This logic works seamlessly for any layout of user data. For example,
160 // if user data is {int, short, char, char}, then accesses to the int are
161 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
162 // from a single thread won't need to scan all 8 shadow values.
163 unsigned ComputeSearchOffset() {
164 return x_ & 7;
165 }
166 u64 addr0() const { return x_ & 7; }
167 u64 size() const { return 1ull << size_log(); }
168 bool is_write() const { return x_ & 32; }
169
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000170 // The idea behind the freed bit is as follows.
171 // When the memory is freed (or otherwise unaccessible) we write to the shadow
172 // values with tid/epoch related to the free and the freed bit set.
173 // During memory accesses processing the freed bit is considered
174 // as msb of tid. So any access races with shadow with freed bit set
175 // (it is as if write from a thread with which we never synchronized before).
176 // This allows us to detect accesses to freed memory w/o additional
177 // overheads in memory access processing and at the same time restore
178 // tid/epoch of free.
179 void MarkAsFreed() {
180 x_ |= kFreedBit;
181 }
182
183 bool GetFreedAndReset() {
184 bool res = x_ & kFreedBit;
185 x_ &= ~kFreedBit;
186 return res;
187 }
188
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000189 private:
190 u64 size_log() const { return (x_ >> 3) & 3; }
Dmitry Vyukovadfb6502012-05-22 18:07:45 +0000191
192 static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
193 if (s1.addr0() == s2.addr0()) return true;
194 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
195 return true;
196 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
197 return true;
198 return false;
199 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000200};
201
202// Freed memory.
203// As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything.
204const u64 kShadowFreed = 0xfffffffffffffff8ull;
205
Dmitry Vyukove9636662012-06-27 16:05:06 +0000206struct SignalContext;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000207
208// This struct is stored in TLS.
209struct ThreadState {
210 FastState fast_state;
211 // Synch epoch represents the threads's epoch before the last synchronization
212 // action. It allows to reduce number of shadow state updates.
213 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
214 // if we are processing write to X from the same thread at epoch=200,
215 // we do nothing, because both writes happen in the same 'synch epoch'.
216 // That is, if another memory access does not race with the former write,
217 // it does not race with the latter as well.
218 // QUESTION: can we can squeeze this into ThreadState::Fast?
219 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
220 // taken by epoch between synchs.
221 // This way we can save one load from tls.
222 u64 fast_synch_epoch;
223 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
224 // We do not distinguish beteween ignoring reads and writes
225 // for better performance.
226 int ignore_reads_and_writes;
227 uptr *shadow_stack_pos;
228 u64 *racy_shadow_addr;
229 u64 racy_state[2];
230 Trace trace;
231 uptr shadow_stack[kShadowStackSize];
232 ThreadClock clock;
233 u64 stat[StatCnt];
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000234 const int tid;
235 int in_rtl;
Dmitry Vyukov1fc03d52012-06-28 18:07:46 +0000236 bool is_alive;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000237 const uptr stk_addr;
238 const uptr stk_size;
239 const uptr tls_addr;
240 const uptr tls_size;
241
242 DeadlockDetector deadlock_detector;
243
244 bool in_signal_handler;
Dmitry Vyukove9636662012-06-27 16:05:06 +0000245 SignalContext *signal_ctx;
246
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000247 // Set in regions of runtime that must be signal-safe and fork-safe.
248 // If set, malloc must not be called.
249 int nomalloc;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000250
251 explicit ThreadState(Context *ctx, int tid, u64 epoch,
252 uptr stk_addr, uptr stk_size,
253 uptr tls_addr, uptr tls_size);
254};
255
256Context *CTX();
257extern THREADLOCAL char cur_thread_placeholder[];
258
259INLINE ThreadState *cur_thread() {
260 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
261}
262
263enum ThreadStatus {
264 ThreadStatusInvalid, // Non-existent thread, data is invalid.
265 ThreadStatusCreated, // Created but not yet running.
266 ThreadStatusRunning, // The thread is currently running.
267 ThreadStatusFinished, // Joinable thread is finished but not yet joined.
268 ThreadStatusDead, // Joined, but some info (trace) is still alive.
269};
270
271// An info about a thread that is hold for some time after its termination.
272struct ThreadDeadInfo {
273 Trace trace;
274};
275
276struct ThreadContext {
277 const int tid;
278 int unique_id; // Non-rolling thread id.
279 uptr user_id; // Some opaque user thread id (e.g. pthread_t).
280 ThreadState *thr;
281 ThreadStatus status;
282 bool detached;
283 int reuse_count;
284 SyncClock sync;
285 // Epoch at which the thread had started.
286 // If we see an event from the thread stamped by an older epoch,
287 // the event is from a dead thread that shared tid with this thread.
288 u64 epoch0;
289 u64 epoch1;
290 StackTrace creation_stack;
Dmitry Vyukov9d2ffc22012-05-22 14:34:43 +0000291 ThreadDeadInfo *dead_info;
292 ThreadContext *dead_next; // In dead thread list.
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000293
294 explicit ThreadContext(int tid);
295};
296
297struct RacyStacks {
298 MD5Hash hash[2];
299 bool operator==(const RacyStacks &other) const {
300 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
301 return true;
302 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
303 return true;
304 return false;
305 }
306};
307
308struct RacyAddress {
309 uptr addr_min;
310 uptr addr_max;
311};
312
313struct Context {
314 Context();
315
316 bool initialized;
317
318 SyncTab synctab;
319
320 Mutex report_mtx;
321 int nreported;
322 int nmissed_expected;
323
324 Mutex thread_mtx;
Kostya Serebryany3d6ae152012-05-11 14:42:24 +0000325 unsigned thread_seq;
326 unsigned unique_thread_seq;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000327 int alive_threads;
328 int max_alive_threads;
329 ThreadContext *threads[kMaxTid];
330 int dead_list_size;
331 ThreadContext* dead_list_head;
332 ThreadContext* dead_list_tail;
333
334 Vector<RacyStacks> racy_stacks;
335 Vector<RacyAddress> racy_addresses;
336
337 Flags flags;
338
339 u64 stat[StatCnt];
340 u64 int_alloc_cnt[MBlockTypeCount];
341 u64 int_alloc_siz[MBlockTypeCount];
342};
343
344class ScopedInRtl {
345 public:
346 ScopedInRtl();
347 ~ScopedInRtl();
348 private:
349 ThreadState*thr_;
350 int in_rtl_;
351 int errno_;
352};
353
354class ScopedReport {
355 public:
356 explicit ScopedReport(ReportType typ);
357 ~ScopedReport();
358
359 void AddStack(const StackTrace *stack);
360 void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack);
361 void AddThread(const ThreadContext *tctx);
362 void AddMutex(const SyncVar *s);
363 void AddLocation(uptr addr, uptr size);
364
365 const ReportDesc *GetReport() const;
366
367 private:
368 Context *ctx_;
369 ReportDesc *rep_;
370
371 ScopedReport(const ScopedReport&);
372 void operator = (const ScopedReport&);
373};
374
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000375void StatAggregate(u64 *dst, u64 *src);
376void StatOutput(u64 *stat);
377void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
378 if (kCollectStats)
379 thr->stat[typ] += n;
380}
381
382void InitializeShadowMemory();
383void InitializeInterceptors();
384void InitializeDynamicAnnotations();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000385
386void ReportRace(ThreadState *thr);
Dmitry Vyukovf5820e72012-05-14 15:28:03 +0000387bool OutputReport(const ScopedReport &srep,
388 const ReportStack *suppress_stack = 0);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000389bool IsExpectedReport(uptr addr, uptr size);
390
391#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
Alexey Samsonov67a64dd2012-06-06 10:13:27 +0000392# define DPrintf TsanPrintf
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000393#else
394# define DPrintf(...)
395#endif
396
397#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
Alexey Samsonov67a64dd2012-06-06 10:13:27 +0000398# define DPrintf2 TsanPrintf
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000399#else
400# define DPrintf2(...)
401#endif
402
403void Initialize(ThreadState *thr);
404int Finalize(ThreadState *thr);
405
406void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
407 int kAccessSizeLog, bool kAccessIsWrite);
408void MemoryAccessImpl(ThreadState *thr, uptr addr,
409 int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state,
410 u64 *shadow_mem, Shadow cur);
411void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr);
412void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr);
413void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr);
414void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr);
415void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
416 uptr size, bool is_write);
417void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
418void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
419void IgnoreCtl(ThreadState *thr, bool write, bool begin);
420
421void FuncEntry(ThreadState *thr, uptr pc);
422void FuncExit(ThreadState *thr);
423
424int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
425void ThreadStart(ThreadState *thr, int tid);
426void ThreadFinish(ThreadState *thr);
427int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
428void ThreadJoin(ThreadState *thr, uptr pc, int tid);
429void ThreadDetach(ThreadState *thr, uptr pc, int tid);
430void ThreadFinalize(ThreadState *thr);
431
432void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive);
433void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
434void MutexLock(ThreadState *thr, uptr pc, uptr addr);
435void MutexUnlock(ThreadState *thr, uptr pc, uptr addr);
436void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
437void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
438void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
439
440void Acquire(ThreadState *thr, uptr pc, uptr addr);
441void Release(ThreadState *thr, uptr pc, uptr addr);
442
443// The hacky call uses custom calling convention and an assembly thunk.
444// It is considerably faster that a normal call for the caller
445// if it is not executed (it is intended for slow paths from hot functions).
446// The trick is that the call preserves all registers and the compiler
447// does not treat it as a call.
448// If it does not work for you, use normal call.
449#if TSAN_DEBUG == 0
450// The caller may not create the stack frame for itself at all,
451// so we create a reserve stack frame for it (1024b must be enough).
452#define HACKY_CALL(f) \
453 __asm__ __volatile__("sub $0x400, %%rsp;" \
454 "call " #f "_thunk;" \
455 "add $0x400, %%rsp;" ::: "memory");
456#else
457#define HACKY_CALL(f) f()
458#endif
459
460extern "C" void __tsan_trace_switch();
461void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
462 EventType typ, uptr addr) {
463 StatInc(thr, StatEvents);
464 if (UNLIKELY((epoch % kTracePartSize) == 0))
465 HACKY_CALL(__tsan_trace_switch);
466 Event *evp = &thr->trace.events[epoch % kTraceSize];
467 Event ev = (u64)addr | ((u64)typ << 61);
468 *evp = ev;
469}
470
471} // namespace __tsan
472
473#endif // TSAN_RTL_H