| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | // Main internal TSan header file. |
| 13 | // |
| 14 | // Ground rules: |
| 15 | // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static |
| 16 | // function-scope locals) |
| 17 | // - All functions/classes/etc reside in namespace __tsan, except for those |
| 18 | // declared in tsan_interface.h. |
| 19 | // - Platform-specific files should be used instead of ifdefs (*). |
| 20 | // - No system headers included in header files (*). |
| 21 | // - Platform specific headres included only into platform-specific files (*). |
| 22 | // |
| 23 | // (*) Except when inlining is critical for performance. |
| 24 | //===----------------------------------------------------------------------===// |
| 25 | |
| 26 | #ifndef TSAN_RTL_H |
| 27 | #define TSAN_RTL_H |
| 28 | |
| Alexey Samsonov | bc3a7e3 | 2012-06-06 06:47:26 +0000 | [diff] [blame] | 29 | #include "sanitizer_common/sanitizer_common.h" |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 30 | #include "sanitizer_common/sanitizer_allocator64.h" |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 31 | #include "tsan_clock.h" |
| 32 | #include "tsan_defs.h" |
| 33 | #include "tsan_flags.h" |
| 34 | #include "tsan_sync.h" |
| 35 | #include "tsan_trace.h" |
| 36 | #include "tsan_vector.h" |
| 37 | #include "tsan_report.h" |
| 38 | |
| 39 | namespace __tsan { |
| 40 | |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 41 | // Descriptor of user's memory block. |
| 42 | struct MBlock { |
| Dmitry Vyukov | 9f1509f | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 43 | Mutex mtx; |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 44 | uptr size; |
| Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 45 | u32 alloc_tid; |
| 46 | u32 alloc_stack_id; |
| Dmitry Vyukov | 9f1509f | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 47 | SyncVar *head; |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 48 | }; |
| 49 | |
| 50 | #ifndef TSAN_GO |
| 51 | #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW |
| Dmitry Vyukov | f77c6ea | 2012-08-16 13:27:25 +0000 | [diff] [blame] | 52 | const uptr kAllocatorSpace = 0x7d0000000000ULL; |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 53 | #else |
| 54 | const uptr kAllocatorSpace = 0x7d0000000000ULL; |
| 55 | #endif |
| 56 | const uptr kAllocatorSize = 0x10000000000ULL; // 1T. |
| 57 | |
| 58 | typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), |
| 59 | DefaultSizeClassMap> PrimaryAllocator; |
| 60 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator::kNumClasses, |
| 61 | PrimaryAllocator> AllocatorCache; |
| 62 | typedef LargeMmapAllocator SecondaryAllocator; |
| 63 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 64 | SecondaryAllocator> Allocator; |
| Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 65 | Allocator *allocator(); |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 66 | #endif |
| 67 | |
| Alexey Samsonov | d323f4e | 2012-06-06 13:58:39 +0000 | [diff] [blame] | 68 | void TsanPrintf(const char *format, ...); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 69 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 70 | // FastState (from most significant bit): |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 71 | // unused : 1 |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 72 | // tid : kTidBits |
| 73 | // epoch : kClkBits |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 74 | // unused : - |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 75 | // ignore_bit : 1 |
| 76 | class FastState { |
| 77 | public: |
| 78 | FastState(u64 tid, u64 epoch) { |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 79 | x_ = tid << kTidShift; |
| 80 | x_ |= epoch << kClkShift; |
| 81 | DCHECK(tid == this->tid()); |
| 82 | DCHECK(epoch == this->epoch()); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | explicit FastState(u64 x) |
| 86 | : x_(x) { |
| 87 | } |
| 88 | |
| Dmitry Vyukov | 3482ec3 | 2012-08-16 15:08:49 +0000 | [diff] [blame] | 89 | u64 raw() const { |
| 90 | return x_; |
| 91 | } |
| 92 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 93 | u64 tid() const { |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 94 | u64 res = x_ >> kTidShift; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 95 | return res; |
| 96 | } |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 97 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 98 | u64 epoch() const { |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 99 | u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 100 | return res; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 101 | } |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 102 | |
| 103 | void IncrementEpoch() { |
| 104 | u64 old_epoch = epoch(); |
| 105 | x_ += 1 << kClkShift; |
| Dmitry Vyukov | 163a8338 | 2012-05-21 10:20:53 +0000 | [diff] [blame] | 106 | DCHECK_EQ(old_epoch + 1, epoch()); |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 107 | (void)old_epoch; |
| 108 | } |
| 109 | |
| 110 | void SetIgnoreBit() { x_ |= kIgnoreBit; } |
| 111 | void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } |
| Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 112 | bool GetIgnoreBit() const { return x_ & kIgnoreBit; } |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 113 | |
| 114 | private: |
| 115 | friend class Shadow; |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 116 | static const int kTidShift = 64 - kTidBits - 1; |
| 117 | static const int kClkShift = kTidShift - kClkBits; |
| 118 | static const u64 kIgnoreBit = 1ull; |
| 119 | static const u64 kFreedBit = 1ull << 63; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 120 | u64 x_; |
| 121 | }; |
| 122 | |
| 123 | // Shadow (from most significant bit): |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 124 | // freed : 1 |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 125 | // tid : kTidBits |
| 126 | // epoch : kClkBits |
| 127 | // is_write : 1 |
| 128 | // size_log : 2 |
| 129 | // addr0 : 3 |
| Dmitry Vyukov | 97c26bd | 2012-06-27 16:05:06 +0000 | [diff] [blame] | 130 | class Shadow : public FastState { |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 131 | public: |
| 132 | explicit Shadow(u64 x) : FastState(x) { } |
| 133 | |
| 134 | explicit Shadow(const FastState &s) : FastState(s.x_) { } |
| 135 | |
| 136 | void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { |
| 137 | DCHECK_EQ(x_ & 31, 0); |
| 138 | DCHECK_LE(addr0, 7); |
| 139 | DCHECK_LE(kAccessSizeLog, 3); |
| 140 | x_ |= (kAccessSizeLog << 3) | addr0; |
| 141 | DCHECK_EQ(kAccessSizeLog, size_log()); |
| 142 | DCHECK_EQ(addr0, this->addr0()); |
| 143 | } |
| 144 | |
| 145 | void SetWrite(unsigned kAccessIsWrite) { |
| 146 | DCHECK_EQ(x_ & 32, 0); |
| 147 | if (kAccessIsWrite) |
| 148 | x_ |= 32; |
| 149 | DCHECK_EQ(kAccessIsWrite, is_write()); |
| 150 | } |
| 151 | |
| 152 | bool IsZero() const { return x_ == 0; } |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 153 | |
| Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 154 | static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 155 | u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 156 | DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid()); |
| 157 | return shifted_xor == 0; |
| 158 | } |
| Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 159 | |
| 160 | static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 161 | u64 masked_xor = (s1.x_ ^ s2.x_) & 31; |
| 162 | return masked_xor == 0; |
| 163 | } |
| 164 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 165 | static inline bool TwoRangesIntersect(Shadow s1, Shadow s2, |
| 166 | unsigned kS2AccessSize) { |
| 167 | bool res = false; |
| 168 | u64 diff = s1.addr0() - s2.addr0(); |
| 169 | if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT |
| 170 | // if (s1.addr0() + size1) > s2.addr0()) return true; |
| 171 | if (s1.size() > -diff) res = true; |
| 172 | } else { |
| 173 | // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; |
| 174 | if (kS2AccessSize > diff) res = true; |
| 175 | } |
| 176 | DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2)); |
| 177 | DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1)); |
| 178 | return res; |
| 179 | } |
| 180 | |
| 181 | // The idea behind the offset is as follows. |
| 182 | // Consider that we have 8 bool's contained within a single 8-byte block |
| 183 | // (mapped to a single shadow "cell"). Now consider that we write to the bools |
| 184 | // from a single thread (which we consider the common case). |
| 185 | // W/o offsetting each access will have to scan 4 shadow values at average |
| 186 | // to find the corresponding shadow value for the bool. |
| 187 | // With offsetting we start scanning shadow with the offset so that |
| 188 | // each access hits necessary shadow straight off (at least in an expected |
| 189 | // optimistic case). |
| 190 | // This logic works seamlessly for any layout of user data. For example, |
| 191 | // if user data is {int, short, char, char}, then accesses to the int are |
| 192 | // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses |
| 193 | // from a single thread won't need to scan all 8 shadow values. |
| 194 | unsigned ComputeSearchOffset() { |
| 195 | return x_ & 7; |
| 196 | } |
| 197 | u64 addr0() const { return x_ & 7; } |
| 198 | u64 size() const { return 1ull << size_log(); } |
| 199 | bool is_write() const { return x_ & 32; } |
| 200 | |
| Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 201 | // The idea behind the freed bit is as follows. |
| 202 | // When the memory is freed (or otherwise unaccessible) we write to the shadow |
| 203 | // values with tid/epoch related to the free and the freed bit set. |
| 204 | // During memory accesses processing the freed bit is considered |
| 205 | // as msb of tid. So any access races with shadow with freed bit set |
| 206 | // (it is as if write from a thread with which we never synchronized before). |
| 207 | // This allows us to detect accesses to freed memory w/o additional |
| 208 | // overheads in memory access processing and at the same time restore |
| 209 | // tid/epoch of free. |
| 210 | void MarkAsFreed() { |
| 211 | x_ |= kFreedBit; |
| 212 | } |
| 213 | |
| 214 | bool GetFreedAndReset() { |
| 215 | bool res = x_ & kFreedBit; |
| 216 | x_ &= ~kFreedBit; |
| 217 | return res; |
| 218 | } |
| 219 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 220 | private: |
| 221 | u64 size_log() const { return (x_ >> 3) & 3; } |
| Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 222 | |
| 223 | static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { |
| 224 | if (s1.addr0() == s2.addr0()) return true; |
| 225 | if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) |
| 226 | return true; |
| 227 | if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) |
| 228 | return true; |
| 229 | return false; |
| 230 | } |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 231 | }; |
| 232 | |
| 233 | // Freed memory. |
| 234 | // As if 8-byte write by thread 0xff..f at epoch 0xff..f, races with everything. |
| 235 | const u64 kShadowFreed = 0xfffffffffffffff8ull; |
| 236 | |
| Dmitry Vyukov | 97c26bd | 2012-06-27 16:05:06 +0000 | [diff] [blame] | 237 | struct SignalContext; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 238 | |
| 239 | // This struct is stored in TLS. |
| 240 | struct ThreadState { |
| 241 | FastState fast_state; |
| 242 | // Synch epoch represents the threads's epoch before the last synchronization |
| 243 | // action. It allows to reduce number of shadow state updates. |
| 244 | // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, |
| 245 | // if we are processing write to X from the same thread at epoch=200, |
| 246 | // we do nothing, because both writes happen in the same 'synch epoch'. |
| 247 | // That is, if another memory access does not race with the former write, |
| 248 | // it does not race with the latter as well. |
| 249 | // QUESTION: can we can squeeze this into ThreadState::Fast? |
| 250 | // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are |
| 251 | // taken by epoch between synchs. |
| 252 | // This way we can save one load from tls. |
| 253 | u64 fast_synch_epoch; |
| 254 | // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. |
| 255 | // We do not distinguish beteween ignoring reads and writes |
| 256 | // for better performance. |
| 257 | int ignore_reads_and_writes; |
| 258 | uptr *shadow_stack_pos; |
| 259 | u64 *racy_shadow_addr; |
| 260 | u64 racy_state[2]; |
| 261 | Trace trace; |
| Dmitry Vyukov | 5bfac97 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 262 | #ifndef TSAN_GO |
| 263 | // C/C++ uses embed shadow stack of fixed size. |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 264 | uptr shadow_stack[kShadowStackSize]; |
| Dmitry Vyukov | 5bfac97 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 265 | #else |
| 266 | // Go uses satellite shadow stack with dynamic size. |
| 267 | uptr *shadow_stack; |
| 268 | uptr *shadow_stack_end; |
| 269 | #endif |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 270 | ThreadClock clock; |
| Dmitry Vyukov | 954fc8c | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 271 | #ifndef TSAN_GO |
| 272 | AllocatorCache alloc_cache; |
| 273 | #endif |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 274 | u64 stat[StatCnt]; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 275 | const int tid; |
| Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 276 | const int unique_id; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 277 | int in_rtl; |
| Dmitry Vyukov | fa985a0 | 2012-06-28 18:07:46 +0000 | [diff] [blame] | 278 | bool is_alive; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 279 | const uptr stk_addr; |
| 280 | const uptr stk_size; |
| 281 | const uptr tls_addr; |
| 282 | const uptr tls_size; |
| 283 | |
| 284 | DeadlockDetector deadlock_detector; |
| 285 | |
| 286 | bool in_signal_handler; |
| Dmitry Vyukov | 97c26bd | 2012-06-27 16:05:06 +0000 | [diff] [blame] | 287 | SignalContext *signal_ctx; |
| 288 | |
| Dmitry Vyukov | 318f777 | 2012-08-31 17:27:49 +0000 | [diff] [blame^] | 289 | #ifndef TSAN_GO |
| 290 | u32 last_sleep_stack_id; |
| 291 | ThreadClock last_sleep_clock; |
| 292 | #endif |
| 293 | |
| Dmitry Vyukov | de1fd1c | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 294 | // Set in regions of runtime that must be signal-safe and fork-safe. |
| 295 | // If set, malloc must not be called. |
| 296 | int nomalloc; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 297 | |
| Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 298 | explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 299 | uptr stk_addr, uptr stk_size, |
| 300 | uptr tls_addr, uptr tls_size); |
| 301 | }; |
| 302 | |
| 303 | Context *CTX(); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 304 | |
| Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 305 | #ifndef TSAN_GO |
| 306 | extern THREADLOCAL char cur_thread_placeholder[]; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 307 | INLINE ThreadState *cur_thread() { |
| 308 | return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); |
| 309 | } |
| Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 310 | #endif |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 311 | |
| 312 | enum ThreadStatus { |
| 313 | ThreadStatusInvalid, // Non-existent thread, data is invalid. |
| 314 | ThreadStatusCreated, // Created but not yet running. |
| 315 | ThreadStatusRunning, // The thread is currently running. |
| 316 | ThreadStatusFinished, // Joinable thread is finished but not yet joined. |
| 317 | ThreadStatusDead, // Joined, but some info (trace) is still alive. |
| 318 | }; |
| 319 | |
| 320 | // An info about a thread that is hold for some time after its termination. |
| 321 | struct ThreadDeadInfo { |
| 322 | Trace trace; |
| 323 | }; |
| 324 | |
| 325 | struct ThreadContext { |
| 326 | const int tid; |
| 327 | int unique_id; // Non-rolling thread id. |
| 328 | uptr user_id; // Some opaque user thread id (e.g. pthread_t). |
| 329 | ThreadState *thr; |
| 330 | ThreadStatus status; |
| 331 | bool detached; |
| 332 | int reuse_count; |
| 333 | SyncClock sync; |
| 334 | // Epoch at which the thread had started. |
| 335 | // If we see an event from the thread stamped by an older epoch, |
| 336 | // the event is from a dead thread that shared tid with this thread. |
| 337 | u64 epoch0; |
| 338 | u64 epoch1; |
| 339 | StackTrace creation_stack; |
| Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 340 | ThreadDeadInfo *dead_info; |
| 341 | ThreadContext *dead_next; // In dead thread list. |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 342 | |
| 343 | explicit ThreadContext(int tid); |
| 344 | }; |
| 345 | |
| 346 | struct RacyStacks { |
| 347 | MD5Hash hash[2]; |
| 348 | bool operator==(const RacyStacks &other) const { |
| 349 | if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) |
| 350 | return true; |
| 351 | if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) |
| 352 | return true; |
| 353 | return false; |
| 354 | } |
| 355 | }; |
| 356 | |
| 357 | struct RacyAddress { |
| 358 | uptr addr_min; |
| 359 | uptr addr_max; |
| 360 | }; |
| 361 | |
| 362 | struct Context { |
| 363 | Context(); |
| 364 | |
| 365 | bool initialized; |
| 366 | |
| 367 | SyncTab synctab; |
| 368 | |
| 369 | Mutex report_mtx; |
| 370 | int nreported; |
| 371 | int nmissed_expected; |
| 372 | |
| 373 | Mutex thread_mtx; |
| Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 374 | unsigned thread_seq; |
| 375 | unsigned unique_thread_seq; |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 376 | int alive_threads; |
| 377 | int max_alive_threads; |
| 378 | ThreadContext *threads[kMaxTid]; |
| 379 | int dead_list_size; |
| 380 | ThreadContext* dead_list_head; |
| 381 | ThreadContext* dead_list_tail; |
| 382 | |
| 383 | Vector<RacyStacks> racy_stacks; |
| 384 | Vector<RacyAddress> racy_addresses; |
| 385 | |
| 386 | Flags flags; |
| 387 | |
| 388 | u64 stat[StatCnt]; |
| 389 | u64 int_alloc_cnt[MBlockTypeCount]; |
| 390 | u64 int_alloc_siz[MBlockTypeCount]; |
| 391 | }; |
| 392 | |
| 393 | class ScopedInRtl { |
| 394 | public: |
| 395 | ScopedInRtl(); |
| 396 | ~ScopedInRtl(); |
| 397 | private: |
| 398 | ThreadState*thr_; |
| 399 | int in_rtl_; |
| 400 | int errno_; |
| 401 | }; |
| 402 | |
| 403 | class ScopedReport { |
| 404 | public: |
| 405 | explicit ScopedReport(ReportType typ); |
| 406 | ~ScopedReport(); |
| 407 | |
| 408 | void AddStack(const StackTrace *stack); |
| 409 | void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack); |
| 410 | void AddThread(const ThreadContext *tctx); |
| 411 | void AddMutex(const SyncVar *s); |
| 412 | void AddLocation(uptr addr, uptr size); |
| Dmitry Vyukov | 318f777 | 2012-08-31 17:27:49 +0000 | [diff] [blame^] | 413 | void AddSleep(u32 stack_id); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 414 | |
| 415 | const ReportDesc *GetReport() const; |
| 416 | |
| 417 | private: |
| 418 | Context *ctx_; |
| 419 | ReportDesc *rep_; |
| 420 | |
| 421 | ScopedReport(const ScopedReport&); |
| 422 | void operator = (const ScopedReport&); |
| 423 | }; |
| 424 | |
| Dmitry Vyukov | 3482ec3 | 2012-08-16 15:08:49 +0000 | [diff] [blame] | 425 | void RestoreStack(int tid, const u64 epoch, StackTrace *stk); |
| 426 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 427 | void StatAggregate(u64 *dst, u64 *src); |
| 428 | void StatOutput(u64 *stat); |
| 429 | void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { |
| 430 | if (kCollectStats) |
| 431 | thr->stat[typ] += n; |
| 432 | } |
| 433 | |
| 434 | void InitializeShadowMemory(); |
| 435 | void InitializeInterceptors(); |
| 436 | void InitializeDynamicAnnotations(); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 437 | |
| 438 | void ReportRace(ThreadState *thr); |
| Dmitry Vyukov | 665ce2a | 2012-05-14 15:28:03 +0000 | [diff] [blame] | 439 | bool OutputReport(const ScopedReport &srep, |
| 440 | const ReportStack *suppress_stack = 0); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 441 | bool IsExpectedReport(uptr addr, uptr size); |
| 442 | |
| 443 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 |
| Alexey Samsonov | ac4c290 | 2012-06-06 10:13:27 +0000 | [diff] [blame] | 444 | # define DPrintf TsanPrintf |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 445 | #else |
| 446 | # define DPrintf(...) |
| 447 | #endif |
| 448 | |
| 449 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 |
| Alexey Samsonov | ac4c290 | 2012-06-06 10:13:27 +0000 | [diff] [blame] | 450 | # define DPrintf2 TsanPrintf |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 451 | #else |
| 452 | # define DPrintf2(...) |
| 453 | #endif |
| 454 | |
| Dmitry Vyukov | 318f777 | 2012-08-31 17:27:49 +0000 | [diff] [blame^] | 455 | u32 CurrentStackId(ThreadState *thr, uptr pc); |
| 456 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 457 | void Initialize(ThreadState *thr); |
| 458 | int Finalize(ThreadState *thr); |
| 459 | |
| 460 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
| 461 | int kAccessSizeLog, bool kAccessIsWrite); |
| 462 | void MemoryAccessImpl(ThreadState *thr, uptr addr, |
| 463 | int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state, |
| 464 | u64 *shadow_mem, Shadow cur); |
| 465 | void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr); |
| 466 | void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr); |
| 467 | void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr); |
| 468 | void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr); |
| 469 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
| 470 | uptr size, bool is_write); |
| 471 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); |
| 472 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); |
| Dmitry Vyukov | 9f1509f | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 473 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 474 | void IgnoreCtl(ThreadState *thr, bool write, bool begin); |
| 475 | |
| 476 | void FuncEntry(ThreadState *thr, uptr pc); |
| 477 | void FuncExit(ThreadState *thr); |
| 478 | |
| 479 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); |
| 480 | void ThreadStart(ThreadState *thr, int tid); |
| 481 | void ThreadFinish(ThreadState *thr); |
| 482 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid); |
| 483 | void ThreadJoin(ThreadState *thr, uptr pc, int tid); |
| 484 | void ThreadDetach(ThreadState *thr, uptr pc, int tid); |
| 485 | void ThreadFinalize(ThreadState *thr); |
| Dmitry Vyukov | dfc8e52 | 2012-07-25 13:16:35 +0000 | [diff] [blame] | 486 | void ThreadFinalizerGoroutine(ThreadState *thr); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 487 | |
| Dmitry Vyukov | 4723e6b | 2012-08-16 13:29:41 +0000 | [diff] [blame] | 488 | void MutexCreate(ThreadState *thr, uptr pc, uptr addr, |
| 489 | bool rw, bool recursive, bool linker_init); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 490 | void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); |
| 491 | void MutexLock(ThreadState *thr, uptr pc, uptr addr); |
| 492 | void MutexUnlock(ThreadState *thr, uptr pc, uptr addr); |
| 493 | void MutexReadLock(ThreadState *thr, uptr pc, uptr addr); |
| 494 | void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); |
| 495 | void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); |
| 496 | |
| 497 | void Acquire(ThreadState *thr, uptr pc, uptr addr); |
| 498 | void Release(ThreadState *thr, uptr pc, uptr addr); |
| Dmitry Vyukov | 904d3f9 | 2012-07-28 15:27:41 +0000 | [diff] [blame] | 499 | void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); |
| Dmitry Vyukov | 318f777 | 2012-08-31 17:27:49 +0000 | [diff] [blame^] | 500 | void AfterSleep(ThreadState *thr, uptr pc); |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 501 | |
| 502 | // The hacky call uses custom calling convention and an assembly thunk. |
| 503 | // It is considerably faster that a normal call for the caller |
| 504 | // if it is not executed (it is intended for slow paths from hot functions). |
| 505 | // The trick is that the call preserves all registers and the compiler |
| 506 | // does not treat it as a call. |
| 507 | // If it does not work for you, use normal call. |
| 508 | #if TSAN_DEBUG == 0 |
| 509 | // The caller may not create the stack frame for itself at all, |
| 510 | // so we create a reserve stack frame for it (1024b must be enough). |
| 511 | #define HACKY_CALL(f) \ |
| 512 | __asm__ __volatile__("sub $0x400, %%rsp;" \ |
| 513 | "call " #f "_thunk;" \ |
| 514 | "add $0x400, %%rsp;" ::: "memory"); |
| 515 | #else |
| 516 | #define HACKY_CALL(f) f() |
| 517 | #endif |
| 518 | |
| Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 519 | void TraceSwitch(ThreadState *thr); |
| 520 | |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 521 | extern "C" void __tsan_trace_switch(); |
| 522 | void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch, |
| 523 | EventType typ, uptr addr) { |
| 524 | StatInc(thr, StatEvents); |
| Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 525 | if (UNLIKELY((epoch % kTracePartSize) == 0)) { |
| 526 | #ifndef TSAN_GO |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 527 | HACKY_CALL(__tsan_trace_switch); |
| Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 528 | #else |
| 529 | TraceSwitch(thr); |
| 530 | #endif |
| 531 | } |
| Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 532 | Event *evp = &thr->trace.events[epoch % kTraceSize]; |
| 533 | Event ev = (u64)addr | ((u64)typ << 61); |
| 534 | *evp = ev; |
| 535 | } |
| 536 | |
| 537 | } // namespace __tsan |
| 538 | |
| 539 | #endif // TSAN_RTL_H |