Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | // Main internal TSan header file. |
| 13 | // |
| 14 | // Ground rules: |
| 15 | // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static |
| 16 | // function-scope locals) |
| 17 | // - All functions/classes/etc reside in namespace __tsan, except for those |
| 18 | // declared in tsan_interface.h. |
| 19 | // - Platform-specific files should be used instead of ifdefs (*). |
| 20 | // - No system headers included in header files (*). |
| 21 | // - Platform specific headres included only into platform-specific files (*). |
| 22 | // |
| 23 | // (*) Except when inlining is critical for performance. |
| 24 | //===----------------------------------------------------------------------===// |
| 25 | |
| 26 | #ifndef TSAN_RTL_H |
| 27 | #define TSAN_RTL_H |
| 28 | |
Kostya Serebryany | 72166ca | 2012-12-05 10:09:15 +0000 | [diff] [blame] | 29 | #include "sanitizer_common/sanitizer_allocator.h" |
Alexey Samsonov | 1f3c2fe | 2013-05-29 09:15:39 +0000 | [diff] [blame] | 30 | #include "sanitizer_common/sanitizer_allocator_internal.h" |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 31 | #include "sanitizer_common/sanitizer_asm.h" |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 32 | #include "sanitizer_common/sanitizer_common.h" |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 33 | #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" |
Dmitry Vyukov | 4af0f21 | 2013-10-03 13:37:17 +0000 | [diff] [blame] | 34 | #include "sanitizer_common/sanitizer_libignore.h" |
Sergey Matveev | a52e5c6 | 2013-06-26 15:37:14 +0000 | [diff] [blame] | 35 | #include "sanitizer_common/sanitizer_suppressions.h" |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 36 | #include "sanitizer_common/sanitizer_thread_registry.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 37 | #include "tsan_clock.h" |
| 38 | #include "tsan_defs.h" |
| 39 | #include "tsan_flags.h" |
| 40 | #include "tsan_sync.h" |
| 41 | #include "tsan_trace.h" |
| 42 | #include "tsan_vector.h" |
| 43 | #include "tsan_report.h" |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 44 | #include "tsan_platform.h" |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 45 | #include "tsan_mutexset.h" |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 46 | #include "tsan_ignoreset.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 47 | |
Kostya Serebryany | 503a3af | 2012-12-04 15:13:30 +0000 | [diff] [blame] | 48 | #if SANITIZER_WORDSIZE != 64 |
| 49 | # error "ThreadSanitizer is supported only on 64-bit platforms" |
| 50 | #endif |
| 51 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 52 | namespace __tsan { |
| 53 | |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 54 | // Descriptor of user's memory block. |
| 55 | struct MBlock { |
Dmitry Vyukov | f51c386 | 2013-03-18 19:47:36 +0000 | [diff] [blame] | 56 | /* |
| 57 | u64 mtx : 1; // must be first |
| 58 | u64 lst : 44; |
| 59 | u64 stk : 31; // on word boundary |
| 60 | u64 tid : kTidBits; |
| 61 | u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 |
| 62 | */ |
| 63 | u64 raw[2]; |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 64 | |
Dmitry Vyukov | f51c386 | 2013-03-18 19:47:36 +0000 | [diff] [blame] | 65 | void Init(uptr siz, u32 tid, u32 stk) { |
| 66 | raw[0] = raw[1] = 0; |
| 67 | raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); |
| 68 | raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); |
| 69 | raw[0] |= (u64)stk << (1 + 44); |
| 70 | raw[1] |= (u64)stk >> (64 - 44 - 1); |
| 71 | DCHECK_EQ(Size(), siz); |
| 72 | DCHECK_EQ(Tid(), tid); |
| 73 | DCHECK_EQ(StackId(), stk); |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 74 | } |
Dmitry Vyukov | f51c386 | 2013-03-18 19:47:36 +0000 | [diff] [blame] | 75 | |
| 76 | u32 Tid() const { |
| 77 | return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); |
| 78 | } |
| 79 | |
| 80 | uptr Size() const { |
| 81 | return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); |
| 82 | } |
| 83 | |
| 84 | u32 StackId() const { |
| 85 | return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); |
| 86 | } |
| 87 | |
| 88 | SyncVar *ListHead() const { |
| 89 | return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); |
| 90 | } |
| 91 | |
| 92 | void ListPush(SyncVar *v) { |
| 93 | SyncVar *lst = ListHead(); |
| 94 | v->next = lst; |
| 95 | u64 x = (u64)v ^ (u64)lst; |
| 96 | x = (x >> 3) << 1; |
| 97 | raw[0] ^= x; |
| 98 | DCHECK_EQ(ListHead(), v); |
| 99 | } |
| 100 | |
| 101 | SyncVar *ListPop() { |
| 102 | SyncVar *lst = ListHead(); |
| 103 | SyncVar *nxt = lst->next; |
| 104 | lst->next = 0; |
| 105 | u64 x = (u64)lst ^ (u64)nxt; |
| 106 | x = (x >> 3) << 1; |
| 107 | raw[0] ^= x; |
| 108 | DCHECK_EQ(ListHead(), nxt); |
| 109 | return lst; |
| 110 | } |
| 111 | |
| 112 | void ListReset() { |
| 113 | SyncVar *lst = ListHead(); |
| 114 | u64 x = (u64)lst; |
| 115 | x = (x >> 3) << 1; |
| 116 | raw[0] ^= x; |
| 117 | DCHECK_EQ(ListHead(), 0); |
| 118 | } |
| 119 | |
| 120 | void Lock(); |
| 121 | void Unlock(); |
| 122 | typedef GenericScopedLock<MBlock> ScopedLock; |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 123 | }; |
| 124 | |
| 125 | #ifndef TSAN_GO |
| 126 | #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW |
Dmitry Vyukov | eee7f73 | 2012-08-16 13:27:25 +0000 | [diff] [blame] | 127 | const uptr kAllocatorSpace = 0x7d0000000000ULL; |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 128 | #else |
| 129 | const uptr kAllocatorSpace = 0x7d0000000000ULL; |
| 130 | #endif |
| 131 | const uptr kAllocatorSize = 0x10000000000ULL; // 1T. |
| 132 | |
Dmitry Vyukov | e93e505 | 2013-03-18 10:32:21 +0000 | [diff] [blame] | 133 | struct MapUnmapCallback; |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 134 | typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), |
Dmitry Vyukov | e93e505 | 2013-03-18 10:32:21 +0000 | [diff] [blame] | 135 | DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; |
Kostya Serebryany | 82de942 | 2012-12-04 14:15:17 +0000 | [diff] [blame] | 136 | typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; |
Dmitry Vyukov | e93e505 | 2013-03-18 10:32:21 +0000 | [diff] [blame] | 137 | typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 138 | typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, |
| 139 | SecondaryAllocator> Allocator; |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 140 | Allocator *allocator(); |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 141 | #endif |
| 142 | |
Alexey Samsonov | 591616d | 2012-09-11 09:44:48 +0000 | [diff] [blame] | 143 | void TsanCheckFailed(const char *file, int line, const char *cond, |
| 144 | u64 v1, u64 v2); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 145 | |
Dmitry Vyukov | 9743d74 | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 146 | const u64 kShadowRodata = (u64)-1; // .rodata shadow marker |
| 147 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 148 | // FastState (from most significant bit): |
Dmitry Vyukov | 0d35d9d | 2012-11-28 10:49:27 +0000 | [diff] [blame] | 149 | // ignore : 1 |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 150 | // tid : kTidBits |
| 151 | // epoch : kClkBits |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 152 | // unused : - |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 153 | // history_size : 3 |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 154 | class FastState { |
| 155 | public: |
| 156 | FastState(u64 tid, u64 epoch) { |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 157 | x_ = tid << kTidShift; |
| 158 | x_ |= epoch << kClkShift; |
Dmitry Vyukov | 0d35d9d | 2012-11-28 10:49:27 +0000 | [diff] [blame] | 159 | DCHECK_EQ(tid, this->tid()); |
| 160 | DCHECK_EQ(epoch, this->epoch()); |
| 161 | DCHECK_EQ(GetIgnoreBit(), false); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | explicit FastState(u64 x) |
| 165 | : x_(x) { |
| 166 | } |
| 167 | |
Dmitry Vyukov | 332c62b | 2012-08-16 15:08:49 +0000 | [diff] [blame] | 168 | u64 raw() const { |
| 169 | return x_; |
| 170 | } |
| 171 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 172 | u64 tid() const { |
Dmitry Vyukov | c8f0a00 | 2012-11-30 20:02:11 +0000 | [diff] [blame] | 173 | u64 res = (x_ & ~kIgnoreBit) >> kTidShift; |
| 174 | return res; |
| 175 | } |
| 176 | |
| 177 | u64 TidWithIgnore() const { |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 178 | u64 res = x_ >> kTidShift; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 179 | return res; |
| 180 | } |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 181 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 182 | u64 epoch() const { |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 183 | u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 184 | return res; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 185 | } |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 186 | |
| 187 | void IncrementEpoch() { |
| 188 | u64 old_epoch = epoch(); |
| 189 | x_ += 1 << kClkShift; |
Dmitry Vyukov | e784ad4 | 2012-05-21 10:20:53 +0000 | [diff] [blame] | 190 | DCHECK_EQ(old_epoch + 1, epoch()); |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 191 | (void)old_epoch; |
| 192 | } |
| 193 | |
| 194 | void SetIgnoreBit() { x_ |= kIgnoreBit; } |
| 195 | void ClearIgnoreBit() { x_ &= ~kIgnoreBit; } |
Dmitry Vyukov | 0d35d9d | 2012-11-28 10:49:27 +0000 | [diff] [blame] | 196 | bool GetIgnoreBit() const { return (s64)x_ < 0; } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 197 | |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 198 | void SetHistorySize(int hs) { |
| 199 | CHECK_GE(hs, 0); |
| 200 | CHECK_LE(hs, 7); |
| 201 | x_ = (x_ & ~7) | hs; |
| 202 | } |
| 203 | |
| 204 | int GetHistorySize() const { |
| 205 | return (int)(x_ & 7); |
| 206 | } |
| 207 | |
| 208 | void ClearHistorySize() { |
| 209 | x_ &= ~7; |
| 210 | } |
| 211 | |
| 212 | u64 GetTracePos() const { |
| 213 | const int hs = GetHistorySize(); |
| 214 | // When hs == 0, the trace consists of 2 parts. |
| 215 | const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1; |
| 216 | return epoch() & mask; |
| 217 | } |
| 218 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 219 | private: |
| 220 | friend class Shadow; |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 221 | static const int kTidShift = 64 - kTidBits - 1; |
| 222 | static const int kClkShift = kTidShift - kClkBits; |
Dmitry Vyukov | 0d35d9d | 2012-11-28 10:49:27 +0000 | [diff] [blame] | 223 | static const u64 kIgnoreBit = 1ull << 63; |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 224 | static const u64 kFreedBit = 1ull << 63; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 225 | u64 x_; |
| 226 | }; |
| 227 | |
| 228 | // Shadow (from most significant bit): |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 229 | // freed : 1 |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 230 | // tid : kTidBits |
| 231 | // epoch : kClkBits |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 232 | // is_atomic : 1 |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 233 | // is_read : 1 |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 234 | // size_log : 2 |
| 235 | // addr0 : 3 |
Dmitry Vyukov | e963666 | 2012-06-27 16:05:06 +0000 | [diff] [blame] | 236 | class Shadow : public FastState { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 237 | public: |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 238 | explicit Shadow(u64 x) |
| 239 | : FastState(x) { |
| 240 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 241 | |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 242 | explicit Shadow(const FastState &s) |
| 243 | : FastState(s.x_) { |
| 244 | ClearHistorySize(); |
| 245 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 246 | |
| 247 | void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) { |
| 248 | DCHECK_EQ(x_ & 31, 0); |
| 249 | DCHECK_LE(addr0, 7); |
| 250 | DCHECK_LE(kAccessSizeLog, 3); |
| 251 | x_ |= (kAccessSizeLog << 3) | addr0; |
| 252 | DCHECK_EQ(kAccessSizeLog, size_log()); |
| 253 | DCHECK_EQ(addr0, this->addr0()); |
| 254 | } |
| 255 | |
| 256 | void SetWrite(unsigned kAccessIsWrite) { |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 257 | DCHECK_EQ(x_ & kReadBit, 0); |
| 258 | if (!kAccessIsWrite) |
| 259 | x_ |= kReadBit; |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 260 | DCHECK_EQ(kAccessIsWrite, IsWrite()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 261 | } |
| 262 | |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 263 | void SetAtomic(bool kIsAtomic) { |
| 264 | DCHECK(!IsAtomic()); |
| 265 | if (kIsAtomic) |
| 266 | x_ |= kAtomicBit; |
| 267 | DCHECK_EQ(IsAtomic(), kIsAtomic); |
| 268 | } |
| 269 | |
| 270 | bool IsAtomic() const { |
| 271 | return x_ & kAtomicBit; |
| 272 | } |
| 273 | |
| 274 | bool IsZero() const { |
| 275 | return x_ == 0; |
| 276 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 277 | |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 278 | static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 279 | u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; |
Dmitry Vyukov | c8f0a00 | 2012-11-30 20:02:11 +0000 | [diff] [blame] | 280 | DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 281 | return shifted_xor == 0; |
| 282 | } |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 283 | |
| 284 | static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 285 | u64 masked_xor = (s1.x_ ^ s2.x_) & 31; |
| 286 | return masked_xor == 0; |
| 287 | } |
| 288 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 289 | static inline bool TwoRangesIntersect(Shadow s1, Shadow s2, |
| 290 | unsigned kS2AccessSize) { |
| 291 | bool res = false; |
| 292 | u64 diff = s1.addr0() - s2.addr0(); |
| 293 | if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT |
| 294 | // if (s1.addr0() + size1) > s2.addr0()) return true; |
| 295 | if (s1.size() > -diff) res = true; |
| 296 | } else { |
| 297 | // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true; |
| 298 | if (kS2AccessSize > diff) res = true; |
| 299 | } |
| 300 | DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2)); |
| 301 | DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1)); |
| 302 | return res; |
| 303 | } |
| 304 | |
| 305 | // The idea behind the offset is as follows. |
| 306 | // Consider that we have 8 bool's contained within a single 8-byte block |
| 307 | // (mapped to a single shadow "cell"). Now consider that we write to the bools |
| 308 | // from a single thread (which we consider the common case). |
| 309 | // W/o offsetting each access will have to scan 4 shadow values at average |
| 310 | // to find the corresponding shadow value for the bool. |
| 311 | // With offsetting we start scanning shadow with the offset so that |
| 312 | // each access hits necessary shadow straight off (at least in an expected |
| 313 | // optimistic case). |
| 314 | // This logic works seamlessly for any layout of user data. For example, |
| 315 | // if user data is {int, short, char, char}, then accesses to the int are |
| 316 | // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses |
| 317 | // from a single thread won't need to scan all 8 shadow values. |
| 318 | unsigned ComputeSearchOffset() { |
| 319 | return x_ & 7; |
| 320 | } |
| 321 | u64 addr0() const { return x_ & 7; } |
| 322 | u64 size() const { return 1ull << size_log(); } |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 323 | bool IsWrite() const { return !IsRead(); } |
| 324 | bool IsRead() const { return x_ & kReadBit; } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 325 | |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 326 | // The idea behind the freed bit is as follows. |
| 327 | // When the memory is freed (or otherwise unaccessible) we write to the shadow |
| 328 | // values with tid/epoch related to the free and the freed bit set. |
| 329 | // During memory accesses processing the freed bit is considered |
| 330 | // as msb of tid. So any access races with shadow with freed bit set |
| 331 | // (it is as if write from a thread with which we never synchronized before). |
| 332 | // This allows us to detect accesses to freed memory w/o additional |
| 333 | // overheads in memory access processing and at the same time restore |
| 334 | // tid/epoch of free. |
| 335 | void MarkAsFreed() { |
| 336 | x_ |= kFreedBit; |
| 337 | } |
| 338 | |
Dmitry Vyukov | 3285866 | 2013-02-01 14:41:58 +0000 | [diff] [blame] | 339 | bool IsFreed() const { |
| 340 | return x_ & kFreedBit; |
| 341 | } |
| 342 | |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 343 | bool GetFreedAndReset() { |
| 344 | bool res = x_ & kFreedBit; |
| 345 | x_ &= ~kFreedBit; |
| 346 | return res; |
| 347 | } |
| 348 | |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 349 | bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 350 | // analyzes 5-th bit (is_read) and 6-th bit (is_atomic) |
| 351 | bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift) |
| 352 | | (kIsAtomic << kAtomicShift)); |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 353 | DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); |
| 354 | return v; |
| 355 | } |
| 356 | |
| 357 | bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 358 | bool v = ((x_ >> kReadShift) & 3) |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 359 | <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); |
| 360 | DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || |
| 361 | (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); |
| 362 | return v; |
| 363 | } |
| 364 | |
| 365 | bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 366 | bool v = ((x_ >> kReadShift) & 3) |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 367 | >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); |
| 368 | DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || |
| 369 | (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); |
| 370 | return v; |
| 371 | } |
| 372 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 373 | private: |
Dmitry Vyukov | 33a040a | 2013-02-01 10:02:55 +0000 | [diff] [blame] | 374 | static const u64 kReadShift = 5; |
| 375 | static const u64 kReadBit = 1ull << kReadShift; |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 376 | static const u64 kAtomicShift = 6; |
| 377 | static const u64 kAtomicBit = 1ull << kAtomicShift; |
| 378 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 379 | u64 size_log() const { return (x_ >> 3) & 3; } |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 380 | |
| 381 | static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { |
| 382 | if (s1.addr0() == s2.addr0()) return true; |
| 383 | if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0()) |
| 384 | return true; |
| 385 | if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0()) |
| 386 | return true; |
| 387 | return false; |
| 388 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 389 | }; |
| 390 | |
Dmitry Vyukov | e963666 | 2012-06-27 16:05:06 +0000 | [diff] [blame] | 391 | struct SignalContext; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 392 | |
Dmitry Vyukov | 8b30c25 | 2013-03-25 10:10:44 +0000 | [diff] [blame] | 393 | struct JmpBuf { |
| 394 | uptr sp; |
| 395 | uptr mangled_sp; |
| 396 | uptr *shadow_stack_pos; |
| 397 | }; |
| 398 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 399 | // This struct is stored in TLS. |
| 400 | struct ThreadState { |
| 401 | FastState fast_state; |
| 402 | // Synch epoch represents the threads's epoch before the last synchronization |
| 403 | // action. It allows to reduce number of shadow state updates. |
| 404 | // For example, fast_synch_epoch=100, last write to addr X was at epoch=150, |
| 405 | // if we are processing write to X from the same thread at epoch=200, |
| 406 | // we do nothing, because both writes happen in the same 'synch epoch'. |
| 407 | // That is, if another memory access does not race with the former write, |
| 408 | // it does not race with the latter as well. |
| 409 | // QUESTION: can we can squeeze this into ThreadState::Fast? |
| 410 | // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are |
| 411 | // taken by epoch between synchs. |
| 412 | // This way we can save one load from tls. |
| 413 | u64 fast_synch_epoch; |
| 414 | // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read. |
| 415 | // We do not distinguish beteween ignoring reads and writes |
| 416 | // for better performance. |
| 417 | int ignore_reads_and_writes; |
Dmitry Vyukov | e1ddbf9 | 2013-10-10 15:58:12 +0000 | [diff] [blame] | 418 | int ignore_sync; |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 419 | // Go does not support ignores. |
| 420 | #ifndef TSAN_GO |
| 421 | IgnoreSet mop_ignore_set; |
| 422 | IgnoreSet sync_ignore_set; |
| 423 | #endif |
Dmitry Vyukov | 01a7ce8 | 2013-10-16 15:35:12 +0000 | [diff] [blame] | 424 | // C/C++ uses fixed size shadow stack embed into Trace. |
| 425 | // Go uses malloc-allocated shadow stack with dynamic size. |
| 426 | uptr *shadow_stack; |
| 427 | uptr *shadow_stack_end; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 428 | uptr *shadow_stack_pos; |
| 429 | u64 *racy_shadow_addr; |
| 430 | u64 racy_state[2]; |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 431 | MutexSet mset; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 432 | ThreadClock clock; |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 433 | #ifndef TSAN_GO |
| 434 | AllocatorCache alloc_cache; |
Alexey Samsonov | 1f3c2fe | 2013-05-29 09:15:39 +0000 | [diff] [blame] | 435 | InternalAllocatorCache internal_alloc_cache; |
Dmitry Vyukov | 8b30c25 | 2013-03-25 10:10:44 +0000 | [diff] [blame] | 436 | Vector<JmpBuf> jmp_bufs; |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 437 | int ignore_interceptors; |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 438 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 439 | u64 stat[StatCnt]; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 440 | const int tid; |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 441 | const int unique_id; |
Dmitry Vyukov | 4e81d0e | 2013-01-29 13:03:07 +0000 | [diff] [blame] | 442 | bool in_symbolizer; |
Dmitry Vyukov | 4af0f21 | 2013-10-03 13:37:17 +0000 | [diff] [blame] | 443 | bool in_ignored_lib; |
Dmitry Vyukov | 1fc03d5 | 2012-06-28 18:07:46 +0000 | [diff] [blame] | 444 | bool is_alive; |
Dmitry Vyukov | 3285866 | 2013-02-01 14:41:58 +0000 | [diff] [blame] | 445 | bool is_freeing; |
Dmitry Vyukov | 0dc47b6 | 2013-03-21 15:37:39 +0000 | [diff] [blame] | 446 | bool is_vptr_access; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 447 | const uptr stk_addr; |
| 448 | const uptr stk_size; |
| 449 | const uptr tls_addr; |
| 450 | const uptr tls_size; |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 451 | ThreadContext *tctx; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 452 | |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 453 | InternalDeadlockDetector internal_deadlock_detector; |
| 454 | DDPhysicalThread *dd_pt; |
| 455 | DDLogicalThread *dd_lt; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 456 | |
| 457 | bool in_signal_handler; |
Dmitry Vyukov | e963666 | 2012-06-27 16:05:06 +0000 | [diff] [blame] | 458 | SignalContext *signal_ctx; |
| 459 | |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 460 | #ifndef TSAN_GO |
| 461 | u32 last_sleep_stack_id; |
| 462 | ThreadClock last_sleep_clock; |
| 463 | #endif |
| 464 | |
Dmitry Vyukov | 9ad7c32 | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 465 | // Set in regions of runtime that must be signal-safe and fork-safe. |
| 466 | // If set, malloc must not be called. |
| 467 | int nomalloc; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 468 | |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 469 | explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 470 | unsigned reuse_count, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 471 | uptr stk_addr, uptr stk_size, |
| 472 | uptr tls_addr, uptr tls_size); |
| 473 | }; |
| 474 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 475 | #ifndef TSAN_GO |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 476 | __attribute__((tls_model("initial-exec"))) |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 477 | extern THREADLOCAL char cur_thread_placeholder[]; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 478 | INLINE ThreadState *cur_thread() { |
| 479 | return reinterpret_cast<ThreadState *>(&cur_thread_placeholder); |
| 480 | } |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 481 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 482 | |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 483 | class ThreadContext : public ThreadContextBase { |
| 484 | public: |
| 485 | explicit ThreadContext(int tid); |
Dmitry Vyukov | 6af642e | 2013-03-18 10:10:15 +0000 | [diff] [blame] | 486 | ~ThreadContext(); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 487 | ThreadState *thr; |
Dmitry Vyukov | 2c5284e | 2013-03-18 09:02:27 +0000 | [diff] [blame] | 488 | u32 creation_stack_id; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 489 | SyncClock sync; |
| 490 | // Epoch at which the thread had started. |
| 491 | // If we see an event from the thread stamped by an older epoch, |
| 492 | // the event is from a dead thread that shared tid with this thread. |
| 493 | u64 epoch0; |
| 494 | u64 epoch1; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 495 | |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 496 | // Override superclass callbacks. |
| 497 | void OnDead(); |
| 498 | void OnJoined(void *arg); |
| 499 | void OnFinished(); |
| 500 | void OnStarted(void *arg); |
| 501 | void OnCreated(void *arg); |
Dmitry Vyukov | ce85e03 | 2013-03-19 12:25:48 +0000 | [diff] [blame] | 502 | void OnReset(); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 503 | }; |
| 504 | |
| 505 | struct RacyStacks { |
| 506 | MD5Hash hash[2]; |
| 507 | bool operator==(const RacyStacks &other) const { |
| 508 | if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) |
| 509 | return true; |
| 510 | if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) |
| 511 | return true; |
| 512 | return false; |
| 513 | } |
| 514 | }; |
| 515 | |
| 516 | struct RacyAddress { |
| 517 | uptr addr_min; |
| 518 | uptr addr_max; |
| 519 | }; |
| 520 | |
Dmitry Vyukov | 158c6ac | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 521 | struct FiredSuppression { |
| 522 | ReportType type; |
| 523 | uptr pc; |
Dmitry Vyukov | f754eb5 | 2013-03-27 17:59:57 +0000 | [diff] [blame] | 524 | Suppression *supp; |
Dmitry Vyukov | 158c6ac | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 525 | }; |
| 526 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 527 | struct Context { |
| 528 | Context(); |
| 529 | |
| 530 | bool initialized; |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 531 | bool after_multithreaded_fork; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 532 | |
| 533 | SyncTab synctab; |
| 534 | |
| 535 | Mutex report_mtx; |
| 536 | int nreported; |
| 537 | int nmissed_expected; |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 538 | atomic_uint64_t last_symbolize_time_ns; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 539 | |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 540 | void *background_thread; |
| 541 | atomic_uint32_t stop_background_thread; |
| 542 | |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 543 | ThreadRegistry *thread_registry; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 544 | |
| 545 | Vector<RacyStacks> racy_stacks; |
| 546 | Vector<RacyAddress> racy_addresses; |
Alexey Samsonov | 0a05e5f | 2013-06-14 11:18:58 +0000 | [diff] [blame] | 547 | // Number of fired suppressions may be large enough. |
| 548 | InternalMmapVector<FiredSuppression> fired_suppressions; |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 549 | DDetector *dd; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 550 | |
| 551 | Flags flags; |
| 552 | |
| 553 | u64 stat[StatCnt]; |
| 554 | u64 int_alloc_cnt[MBlockTypeCount]; |
| 555 | u64 int_alloc_siz[MBlockTypeCount]; |
| 556 | }; |
| 557 | |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 558 | extern Context *ctx; // The one and the only global runtime context. |
| 559 | |
| 560 | struct ScopedIgnoreInterceptors { |
| 561 | ScopedIgnoreInterceptors() { |
| 562 | #ifndef TSAN_GO |
| 563 | cur_thread()->ignore_interceptors++; |
| 564 | #endif |
| 565 | } |
| 566 | |
| 567 | ~ScopedIgnoreInterceptors() { |
| 568 | #ifndef TSAN_GO |
| 569 | cur_thread()->ignore_interceptors--; |
| 570 | #endif |
| 571 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 572 | }; |
| 573 | |
| 574 | class ScopedReport { |
| 575 | public: |
| 576 | explicit ScopedReport(ReportType typ); |
| 577 | ~ScopedReport(); |
| 578 | |
| 579 | void AddStack(const StackTrace *stack); |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 580 | void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, |
| 581 | const MutexSet *mset); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 582 | void AddThread(const ThreadContext *tctx); |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 583 | void AddThread(int unique_tid); |
| 584 | void AddUniqueTid(int unique_tid); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 585 | void AddMutex(const SyncVar *s); |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 586 | u64 AddMutex(u64 id); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 587 | void AddLocation(uptr addr, uptr size); |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 588 | void AddSleep(u32 stack_id); |
Dmitry Vyukov | 4536cb1 | 2013-03-21 16:55:17 +0000 | [diff] [blame] | 589 | void SetCount(int count); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 590 | |
| 591 | const ReportDesc *GetReport() const; |
| 592 | |
| 593 | private: |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 594 | ReportDesc *rep_; |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 595 | // Symbolizer makes lots of intercepted calls. If we try to process them, |
| 596 | // at best it will cause deadlocks on internal mutexes. |
| 597 | ScopedIgnoreInterceptors ignore_interceptors_; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 598 | |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 599 | void AddDeadMutex(u64 id); |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 600 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 601 | ScopedReport(const ScopedReport&); |
| 602 | void operator = (const ScopedReport&); |
| 603 | }; |
| 604 | |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 605 | void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); |
Dmitry Vyukov | 332c62b | 2012-08-16 15:08:49 +0000 | [diff] [blame] | 606 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 607 | void StatAggregate(u64 *dst, u64 *src); |
| 608 | void StatOutput(u64 *stat); |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 609 | void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 610 | if (kCollectStats) |
| 611 | thr->stat[typ] += n; |
| 612 | } |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 613 | void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 614 | if (kCollectStats) |
| 615 | thr->stat[typ] = n; |
| 616 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 617 | |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 618 | void MapShadow(uptr addr, uptr size); |
Dmitry Vyukov | 6535c31 | 2012-12-13 08:14:02 +0000 | [diff] [blame] | 619 | void MapThreadTrace(uptr addr, uptr size); |
Dmitry Vyukov | 7ac33ac | 2013-03-18 15:49:07 +0000 | [diff] [blame] | 620 | void DontNeedShadowFor(uptr addr, uptr size); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 621 | void InitializeShadowMemory(); |
| 622 | void InitializeInterceptors(); |
Dmitry Vyukov | 4af0f21 | 2013-10-03 13:37:17 +0000 | [diff] [blame] | 623 | void InitializeLibIgnore(); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 624 | void InitializeDynamicAnnotations(); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 625 | |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 626 | void ForkBefore(ThreadState *thr, uptr pc); |
| 627 | void ForkParentAfter(ThreadState *thr, uptr pc); |
| 628 | void ForkChildAfter(ThreadState *thr, uptr pc); |
| 629 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 630 | void ReportRace(ThreadState *thr); |
Dmitry Vyukov | 158c6ac | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 631 | bool OutputReport(Context *ctx, |
| 632 | const ScopedReport &srep, |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 633 | const ReportStack *suppress_stack1, |
Dmitry Vyukov | 3996833 | 2013-06-10 15:38:44 +0000 | [diff] [blame] | 634 | const ReportStack *suppress_stack2 = 0, |
| 635 | const ReportLocation *suppress_loc = 0); |
Dmitry Vyukov | 158c6ac | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 636 | bool IsFiredSuppression(Context *ctx, |
| 637 | const ScopedReport &srep, |
| 638 | const StackTrace &trace); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 639 | bool IsExpectedReport(uptr addr, uptr size); |
Dmitry Vyukov | 0fd908c | 2013-03-28 16:21:19 +0000 | [diff] [blame] | 640 | void PrintMatchedBenignRaces(); |
Kostya Serebryany | 2f588f9 | 2013-02-06 14:24:00 +0000 | [diff] [blame] | 641 | bool FrameIsInternal(const ReportStack *frame); |
Alexey Samsonov | 5ba301d | 2013-02-06 16:28:05 +0000 | [diff] [blame] | 642 | ReportStack *SkipTsanInternalFrames(ReportStack *ent); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 643 | |
| 644 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 645 | # define DPrintf Printf |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 646 | #else |
| 647 | # define DPrintf(...) |
| 648 | #endif |
| 649 | |
| 650 | #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2 |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 651 | # define DPrintf2 Printf |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 652 | #else |
| 653 | # define DPrintf2(...) |
| 654 | #endif |
| 655 | |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 656 | u32 CurrentStackId(ThreadState *thr, uptr pc); |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 657 | ReportStack *SymbolizeStackId(u32 stack_id); |
Dmitry Vyukov | 1da1056 | 2012-09-01 12:13:18 +0000 | [diff] [blame] | 658 | void PrintCurrentStack(ThreadState *thr, uptr pc); |
Dmitry Vyukov | 793e761 | 2013-01-29 14:20:12 +0000 | [diff] [blame] | 659 | void PrintCurrentStackSlow(); // uses libunwind |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 660 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 661 | void Initialize(ThreadState *thr); |
| 662 | int Finalize(ThreadState *thr); |
| 663 | |
Dmitry Vyukov | 21cc85d | 2012-12-20 17:29:34 +0000 | [diff] [blame] | 664 | SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, |
| 665 | bool write_lock, bool create); |
| 666 | SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); |
| 667 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 668 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 669 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 670 | void MemoryAccessImpl(ThreadState *thr, uptr addr, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 671 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 672 | u64 *shadow_mem, Shadow cur); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 673 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 674 | uptr size, bool is_write); |
Dmitry Vyukov | eaa0190 | 2013-02-13 13:05:36 +0000 | [diff] [blame] | 675 | void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, |
| 676 | uptr size, uptr step, bool is_write); |
Dmitry Vyukov | 8ecd0e5 | 2013-04-30 11:56:56 +0000 | [diff] [blame] | 677 | void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
| 678 | int size, bool kAccessIsWrite, bool kIsAtomic); |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 679 | |
| 680 | const int kSizeLog1 = 0; |
| 681 | const int kSizeLog2 = 1; |
| 682 | const int kSizeLog4 = 2; |
| 683 | const int kSizeLog8 = 3; |
| 684 | |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 685 | void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 686 | uptr addr, int kAccessSizeLog) { |
| 687 | MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); |
| 688 | } |
| 689 | |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 690 | void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 691 | uptr addr, int kAccessSizeLog) { |
| 692 | MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); |
| 693 | } |
| 694 | |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 695 | void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 696 | uptr addr, int kAccessSizeLog) { |
| 697 | MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); |
| 698 | } |
| 699 | |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 700 | void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 701 | uptr addr, int kAccessSizeLog) { |
| 702 | MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); |
| 703 | } |
| 704 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 705 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); |
| 706 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 707 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); |
Dmitry Vyukov | e1ddbf9 | 2013-10-10 15:58:12 +0000 | [diff] [blame] | 708 | |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 709 | void ThreadIgnoreBegin(ThreadState *thr, uptr pc); |
| 710 | void ThreadIgnoreEnd(ThreadState *thr, uptr pc); |
| 711 | void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc); |
| 712 | void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 713 | |
| 714 | void FuncEntry(ThreadState *thr, uptr pc); |
| 715 | void FuncExit(ThreadState *thr); |
| 716 | |
| 717 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached); |
Dmitry Vyukov | e0023f7 | 2012-10-02 12:58:14 +0000 | [diff] [blame] | 718 | void ThreadStart(ThreadState *thr, int tid, uptr os_id); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 719 | void ThreadFinish(ThreadState *thr); |
| 720 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid); |
| 721 | void ThreadJoin(ThreadState *thr, uptr pc, int tid); |
| 722 | void ThreadDetach(ThreadState *thr, uptr pc, int tid); |
| 723 | void ThreadFinalize(ThreadState *thr); |
Dmitry Vyukov | aecf2e5 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 724 | void ThreadSetName(ThreadState *thr, const char *name); |
Dmitry Vyukov | 54e0a9a | 2012-11-07 16:41:57 +0000 | [diff] [blame] | 725 | int ThreadCount(ThreadState *thr); |
Dmitry Vyukov | ee8ee24 | 2012-11-15 17:40:49 +0000 | [diff] [blame] | 726 | void ProcessPendingSignals(ThreadState *thr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 727 | |
Dmitry Vyukov | c20e9ba | 2012-08-16 13:29:41 +0000 | [diff] [blame] | 728 | void MutexCreate(ThreadState *thr, uptr pc, uptr addr, |
| 729 | bool rw, bool recursive, bool linker_init); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 730 | void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 731 | void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1, |
| 732 | bool try_lock = false); |
Dmitry Vyukov | 8354fae | 2013-05-17 12:03:46 +0000 | [diff] [blame] | 733 | int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 734 | void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 735 | void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); |
| 736 | void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); |
Dmitry Vyukov | 11f5309 | 2013-11-15 16:58:12 +0000 | [diff] [blame] | 737 | void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 738 | |
| 739 | void Acquire(ThreadState *thr, uptr pc, uptr addr); |
Dmitry Vyukov | 538f1ba | 2012-11-07 15:08:20 +0000 | [diff] [blame] | 740 | void AcquireGlobal(ThreadState *thr, uptr pc); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 741 | void Release(ThreadState *thr, uptr pc, uptr addr); |
Dmitry Vyukov | 9d150bd | 2012-07-28 15:27:41 +0000 | [diff] [blame] | 742 | void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 743 | void AfterSleep(ThreadState *thr, uptr pc); |
Dmitry Vyukov | e1ddbf9 | 2013-10-10 15:58:12 +0000 | [diff] [blame] | 744 | void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c); |
| 745 | void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); |
| 746 | void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c); |
| 747 | void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 748 | |
| 749 | // The hacky call uses custom calling convention and an assembly thunk. |
| 750 | // It is considerably faster that a normal call for the caller |
| 751 | // if it is not executed (it is intended for slow paths from hot functions). |
| 752 | // The trick is that the call preserves all registers and the compiler |
| 753 | // does not treat it as a call. |
| 754 | // If it does not work for you, use normal call. |
| 755 | #if TSAN_DEBUG == 0 |
| 756 | // The caller may not create the stack frame for itself at all, |
| 757 | // so we create a reserve stack frame for it (1024b must be enough). |
| 758 | #define HACKY_CALL(f) \ |
Dmitry Vyukov | 41e8153 | 2012-09-02 11:24:07 +0000 | [diff] [blame] | 759 | __asm__ __volatile__("sub $1024, %%rsp;" \ |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 760 | CFI_INL_ADJUST_CFA_OFFSET(1024) \ |
Dmitry Vyukov | f5d526f | 2012-11-26 14:20:26 +0000 | [diff] [blame] | 761 | ".hidden " #f "_thunk;" \ |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 762 | "call " #f "_thunk;" \ |
Dmitry Vyukov | 41e8153 | 2012-09-02 11:24:07 +0000 | [diff] [blame] | 763 | "add $1024, %%rsp;" \ |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 764 | CFI_INL_ADJUST_CFA_OFFSET(-1024) \ |
Dmitry Vyukov | 41e8153 | 2012-09-02 11:24:07 +0000 | [diff] [blame] | 765 | ::: "memory", "cc"); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 766 | #else |
| 767 | #define HACKY_CALL(f) f() |
| 768 | #endif |
| 769 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 770 | void TraceSwitch(ThreadState *thr); |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 771 | uptr TraceTopPC(ThreadState *thr); |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 772 | uptr TraceSize(); |
Dmitry Vyukov | 0415ac0 | 2012-12-04 12:19:53 +0000 | [diff] [blame] | 773 | uptr TraceParts(); |
Dmitry Vyukov | 9743d74 | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 774 | Trace *ThreadTrace(int tid); |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 775 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 776 | extern "C" void __tsan_trace_switch(); |
Timur Iskhodzhanov | abfdbdf | 2013-03-28 18:52:40 +0000 | [diff] [blame] | 777 | void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 778 | EventType typ, u64 addr) { |
Stephen Hines | 2d1fdb2 | 2014-05-28 23:58:16 -0700 | [diff] [blame^] | 779 | if (!kCollectHistory) |
| 780 | return; |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 781 | DCHECK_GE((int)typ, 0); |
| 782 | DCHECK_LE((int)typ, 7); |
| 783 | DCHECK_EQ(GetLsb(addr, 61), addr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 784 | StatInc(thr, StatEvents); |
Dmitry Vyukov | 3fb70e3 | 2012-11-28 13:01:32 +0000 | [diff] [blame] | 785 | u64 pos = fs.GetTracePos(); |
| 786 | if (UNLIKELY((pos % kTracePartSize) == 0)) { |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 787 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 788 | HACKY_CALL(__tsan_trace_switch); |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 789 | #else |
| 790 | TraceSwitch(thr); |
| 791 | #endif |
| 792 | } |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 793 | Event *trace = (Event*)GetThreadTrace(fs.tid()); |
Dmitry Vyukov | 3fb70e3 | 2012-11-28 13:01:32 +0000 | [diff] [blame] | 794 | Event *evp = &trace[pos]; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 795 | Event ev = (u64)addr | ((u64)typ << 61); |
| 796 | *evp = ev; |
| 797 | } |
| 798 | |
| 799 | } // namespace __tsan |
| 800 | |
| 801 | #endif // TSAN_RTL_H |