Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl.cc ---------------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | // Main file (entry points) for the TSan run-time. |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "tsan_defs.h" |
| 16 | #include "tsan_platform.h" |
| 17 | #include "tsan_rtl.h" |
| 18 | #include "tsan_interface.h" |
| 19 | #include "tsan_atomic.h" |
| 20 | #include "tsan_mman.h" |
| 21 | #include "tsan_placement_new.h" |
| 22 | #include "tsan_suppressions.h" |
| 23 | |
| 24 | volatile int __tsan_stop = 0; |
| 25 | |
| 26 | extern "C" void __tsan_resume() { |
| 27 | __tsan_stop = 0; |
| 28 | } |
| 29 | |
| 30 | namespace __tsan { |
| 31 | |
| 32 | THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGN(64); |
| 33 | static char ctx_placeholder[sizeof(Context)] ALIGN(64); |
| 34 | |
| 35 | static Context *ctx; |
| 36 | Context *CTX() { |
| 37 | return ctx; |
| 38 | } |
| 39 | |
| 40 | Context::Context() |
| 41 | : initialized() |
| 42 | , report_mtx(MutexTypeReport, StatMtxReport) |
| 43 | , nreported() |
| 44 | , nmissed_expected() |
| 45 | , thread_mtx(MutexTypeThreads, StatMtxThreads) |
| 46 | , racy_stacks(MBlockRacyStacks) |
| 47 | , racy_addresses(MBlockRacyAddresses) { |
| 48 | } |
| 49 | |
| 50 | // The objects are allocated in TLS, so one may rely on zero-initialization. |
| 51 | ThreadState::ThreadState(Context *ctx, int tid, u64 epoch, |
| 52 | uptr stk_addr, uptr stk_size, |
| 53 | uptr tls_addr, uptr tls_size) |
| 54 | : fast_state(tid, epoch) |
| 55 | // Do not touch these, rely on zero initialization, |
| 56 | // they may be accessed before the ctor. |
| 57 | // , fast_ignore_reads() |
| 58 | // , fast_ignore_writes() |
| 59 | // , in_rtl() |
| 60 | , shadow_stack_pos(&shadow_stack[0]) |
| 61 | , tid(tid) |
| 62 | , func_call_count() |
| 63 | , stk_addr(stk_addr) |
| 64 | , stk_size(stk_size) |
| 65 | , tls_addr(tls_addr) |
| 66 | , tls_size(tls_size) { |
| 67 | } |
| 68 | |
| 69 | ThreadContext::ThreadContext(int tid) |
| 70 | : tid(tid) |
| 71 | , unique_id() |
| 72 | , user_id() |
| 73 | , thr() |
| 74 | , status(ThreadStatusInvalid) |
| 75 | , detached() |
| 76 | , reuse_count() |
| 77 | , epoch0() |
| 78 | , epoch1() |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame^] | 79 | , dead_info() |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 80 | , dead_next() { |
| 81 | } |
| 82 | |
Dmitry Vyukov | 15710c9 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 83 | static void WriteMemoryProfile(char *buf, uptr buf_size, int num) { |
| 84 | uptr shadow = GetShadowMemoryConsumption(); |
| 85 | |
| 86 | int nthread = 0; |
| 87 | int nlivethread = 0; |
| 88 | uptr threadmem = 0; |
| 89 | { |
| 90 | Lock l(&ctx->thread_mtx); |
| 91 | for (unsigned i = 0; i < kMaxTid; i++) { |
| 92 | ThreadContext *tctx = ctx->threads[i]; |
| 93 | if (tctx == 0) |
| 94 | continue; |
| 95 | nthread += 1; |
| 96 | threadmem += sizeof(ThreadContext); |
| 97 | if (tctx->status != ThreadStatusRunning) |
| 98 | continue; |
| 99 | nlivethread += 1; |
| 100 | threadmem += sizeof(ThreadState); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | uptr nsync = 0; |
| 105 | uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync); |
| 106 | |
| 107 | Snprintf(buf, buf_size, "%d: shadow=%luMB" |
| 108 | " thread=%luMB(total=%d/live=%d)" |
| 109 | " sync=%luMB(cnt=%lu)\n", |
| 110 | num, |
| 111 | shadow >> 20, |
| 112 | threadmem >> 20, nthread, nlivethread, |
| 113 | syncmem >> 20, nsync); |
| 114 | } |
| 115 | |
| 116 | static void MemoryProfileThread(void *arg) { |
| 117 | ScopedInRtl in_rtl; |
| 118 | fd_t fd = (fd_t)(uptr)arg; |
| 119 | for (int i = 0; ; i++) { |
| 120 | InternalScopedBuf<char> buf(4096); |
| 121 | WriteMemoryProfile(buf.Ptr(), buf.Size(), i); |
| 122 | internal_write(fd, buf.Ptr(), internal_strlen(buf.Ptr())); |
| 123 | internal_sleep_ms(1000); |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | static void InitializeMemoryProfile() { |
| 128 | if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0) |
| 129 | return; |
| 130 | InternalScopedBuf<char> filename(4096); |
| 131 | Snprintf(filename.Ptr(), filename.Size(), "%s.%d", |
| 132 | flags()->profile_memory, GetPid()); |
| 133 | fd_t fd = internal_open(filename.Ptr(), true); |
| 134 | if (fd == kInvalidFd) { |
| 135 | Printf("Failed to open memory profile file '%s'\n", &filename[0]); |
| 136 | Die(); |
| 137 | } |
| 138 | internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd); |
| 139 | } |
| 140 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 141 | void Initialize(ThreadState *thr) { |
| 142 | // Thread safe because done before all threads exist. |
| 143 | static bool is_initialized = false; |
| 144 | if (is_initialized) |
| 145 | return; |
| 146 | is_initialized = true; |
| 147 | ScopedInRtl in_rtl; |
| 148 | InitializeInterceptors(); |
| 149 | const char *env = InitializePlatform(); |
| 150 | InitializeMutex(); |
| 151 | InitializeDynamicAnnotations(); |
| 152 | ctx = new(ctx_placeholder) Context; |
| 153 | InitializeShadowMemory(); |
| 154 | ctx->dead_list_size = 0; |
| 155 | ctx->dead_list_head = 0; |
| 156 | ctx->dead_list_tail = 0; |
| 157 | InitializeFlags(&ctx->flags, env); |
| 158 | InitializeSuppressions(); |
Dmitry Vyukov | 15710c9 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 159 | InitializeMemoryProfile(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 160 | |
| 161 | if (ctx->flags.verbosity) |
| 162 | Printf("***** Running under ThreadSanitizer v2 (pid=%d) *****\n", GetPid()); |
| 163 | |
| 164 | // Initialize thread 0. |
| 165 | ctx->thread_seq = 0; |
| 166 | int tid = ThreadCreate(thr, 0, 0, true); |
| 167 | CHECK_EQ(tid, 0); |
| 168 | ThreadStart(thr, tid); |
| 169 | CHECK_EQ(thr->in_rtl, 1); |
| 170 | ctx->initialized = true; |
| 171 | |
| 172 | if (__tsan_stop) { |
| 173 | Printf("ThreadSanitizer is suspended at startup.\n"); |
| 174 | while (__tsan_stop); |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | int Finalize(ThreadState *thr) { |
| 179 | ScopedInRtl in_rtl; |
| 180 | Context *ctx = __tsan::ctx; |
| 181 | bool failed = false; |
| 182 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 183 | ThreadFinalize(thr); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 184 | |
| 185 | if (ctx->nreported) { |
| 186 | failed = true; |
| 187 | Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); |
| 188 | } |
| 189 | |
| 190 | if (ctx->nmissed_expected) { |
| 191 | failed = true; |
| 192 | Printf("ThreadSanitizer: missed %d expected races\n", |
| 193 | ctx->nmissed_expected); |
| 194 | } |
| 195 | |
| 196 | StatOutput(ctx->stat); |
Dmitry Vyukov | 19b855f | 2012-05-17 15:00:27 +0000 | [diff] [blame] | 197 | return failed ? flags()->exitcode : 0; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static void TraceSwitch(ThreadState *thr) { |
| 201 | ScopedInRtl in_rtl; |
| 202 | Lock l(&thr->trace.mtx); |
| 203 | unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts; |
| 204 | TraceHeader *hdr = &thr->trace.headers[trace]; |
| 205 | hdr->epoch0 = thr->fast_state.epoch(); |
| 206 | hdr->stack0.ObtainCurrent(thr, 0); |
| 207 | } |
| 208 | |
| 209 | extern "C" void __tsan_trace_switch() { |
| 210 | TraceSwitch(cur_thread()); |
| 211 | } |
| 212 | |
| 213 | extern "C" void __tsan_report_race() { |
| 214 | ReportRace(cur_thread()); |
| 215 | } |
| 216 | |
| 217 | ALWAYS_INLINE |
| 218 | static Shadow LoadShadow(u64 *p) { |
| 219 | u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); |
| 220 | return Shadow(raw); |
| 221 | } |
| 222 | |
| 223 | ALWAYS_INLINE |
| 224 | static void StoreShadow(u64 *sp, u64 s) { |
| 225 | atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); |
| 226 | } |
| 227 | |
| 228 | ALWAYS_INLINE |
| 229 | static void StoreIfNotYetStored(u64 *sp, u64 *s) { |
| 230 | StoreShadow(sp, *s); |
| 231 | *s = 0; |
| 232 | } |
| 233 | |
| 234 | static inline void HandleRace(ThreadState *thr, u64 *shadow_mem, |
| 235 | Shadow cur, Shadow old) { |
| 236 | thr->racy_state[0] = cur.raw(); |
| 237 | thr->racy_state[1] = old.raw(); |
| 238 | thr->racy_shadow_addr = shadow_mem; |
| 239 | HACKY_CALL(__tsan_report_race); |
| 240 | } |
| 241 | |
| 242 | static inline bool BothReads(Shadow s, int kAccessIsWrite) { |
| 243 | return !kAccessIsWrite && !s.is_write(); |
| 244 | } |
| 245 | |
| 246 | static inline bool OldIsRWStronger(Shadow old, int kAccessIsWrite) { |
| 247 | return old.is_write() || !kAccessIsWrite; |
| 248 | } |
| 249 | |
| 250 | static inline bool OldIsRWWeaker(Shadow old, int kAccessIsWrite) { |
| 251 | return !old.is_write() || kAccessIsWrite; |
| 252 | } |
| 253 | |
| 254 | static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { |
| 255 | return old.epoch() >= thr->fast_synch_epoch; |
| 256 | } |
| 257 | |
| 258 | static inline bool HappensBefore(Shadow old, ThreadState *thr) { |
| 259 | return thr->clock.get(old.tid()) >= old.epoch(); |
| 260 | } |
| 261 | |
| 262 | ALWAYS_INLINE |
| 263 | void MemoryAccessImpl(ThreadState *thr, uptr addr, |
| 264 | int kAccessSizeLog, bool kAccessIsWrite, FastState fast_state, |
| 265 | u64 *shadow_mem, Shadow cur) { |
| 266 | StatInc(thr, StatMop); |
| 267 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); |
| 268 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); |
| 269 | |
| 270 | // This potentially can live in an MMX/SSE scratch register. |
| 271 | // The required intrinsics are: |
| 272 | // __m128i _mm_move_epi64(__m128i*); |
| 273 | // _mm_storel_epi64(u64*, __m128i); |
| 274 | u64 store_word = cur.raw(); |
| 275 | |
| 276 | // scan all the shadow values and dispatch to 4 categories: |
| 277 | // same, replace, candidate and race (see comments below). |
| 278 | // we consider only 3 cases regarding access sizes: |
| 279 | // equal, intersect and not intersect. initially I considered |
| 280 | // larger and smaller as well, it allowed to replace some |
| 281 | // 'candidates' with 'same' or 'replace', but I think |
| 282 | // it's just not worth it (performance- and complexity-wise). |
| 283 | |
| 284 | Shadow old(0); |
| 285 | if (kShadowCnt == 1) { |
| 286 | int idx = 0; |
| 287 | #include "tsan_update_shadow_word_inl.h" |
| 288 | } else if (kShadowCnt == 2) { |
| 289 | int idx = 0; |
| 290 | #include "tsan_update_shadow_word_inl.h" |
| 291 | idx = 1; |
| 292 | #include "tsan_update_shadow_word_inl.h" |
| 293 | } else if (kShadowCnt == 4) { |
| 294 | int idx = 0; |
| 295 | #include "tsan_update_shadow_word_inl.h" |
| 296 | idx = 1; |
| 297 | #include "tsan_update_shadow_word_inl.h" |
| 298 | idx = 2; |
| 299 | #include "tsan_update_shadow_word_inl.h" |
| 300 | idx = 3; |
| 301 | #include "tsan_update_shadow_word_inl.h" |
| 302 | } else if (kShadowCnt == 8) { |
| 303 | int idx = 0; |
| 304 | #include "tsan_update_shadow_word_inl.h" |
| 305 | idx = 1; |
| 306 | #include "tsan_update_shadow_word_inl.h" |
| 307 | idx = 2; |
| 308 | #include "tsan_update_shadow_word_inl.h" |
| 309 | idx = 3; |
| 310 | #include "tsan_update_shadow_word_inl.h" |
| 311 | idx = 4; |
| 312 | #include "tsan_update_shadow_word_inl.h" |
| 313 | idx = 5; |
| 314 | #include "tsan_update_shadow_word_inl.h" |
| 315 | idx = 6; |
| 316 | #include "tsan_update_shadow_word_inl.h" |
| 317 | idx = 7; |
| 318 | #include "tsan_update_shadow_word_inl.h" |
| 319 | } else { |
| 320 | CHECK(false); |
| 321 | } |
| 322 | |
| 323 | // we did not find any races and had already stored |
| 324 | // the current access info, so we are done |
| 325 | if (LIKELY(store_word == 0)) |
| 326 | return; |
| 327 | // choose a random candidate slot and replace it |
| 328 | StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); |
| 329 | StatInc(thr, StatShadowReplace); |
| 330 | return; |
| 331 | RACE: |
| 332 | HandleRace(thr, shadow_mem, cur, old); |
| 333 | return; |
| 334 | } |
| 335 | |
| 336 | ALWAYS_INLINE |
| 337 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
| 338 | int kAccessSizeLog, bool kAccessIsWrite) { |
| 339 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 340 | DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d" |
| 341 | " is_write=%d shadow_mem=%p {%llx, %llx, %llx, %llx}\n", |
| 342 | (int)thr->fast_state.tid(), (void*)pc, (void*)addr, |
| 343 | (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, |
| 344 | shadow_mem[0], shadow_mem[1], shadow_mem[2], shadow_mem[3]); |
| 345 | #if TSAN_DEBUG |
| 346 | if (!IsAppMem(addr)) { |
| 347 | Printf("Access to non app mem %lx\n", addr); |
| 348 | DCHECK(IsAppMem(addr)); |
| 349 | } |
| 350 | if (!IsShadowMem((uptr)shadow_mem)) { |
| 351 | Printf("Bad shadow addr %p (%lx)\n", shadow_mem, addr); |
| 352 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 353 | } |
| 354 | #endif |
| 355 | |
| 356 | FastState fast_state = thr->fast_state; |
| 357 | if (fast_state.GetIgnoreBit()) |
| 358 | return; |
| 359 | fast_state.IncrementEpoch(); |
| 360 | thr->fast_state = fast_state; |
| 361 | Shadow cur(fast_state); |
| 362 | cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); |
| 363 | cur.SetWrite(kAccessIsWrite); |
| 364 | |
| 365 | // We must not store to the trace if we do not store to the shadow. |
| 366 | // That is, this call must be moved somewhere below. |
| 367 | TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); |
| 368 | |
| 369 | MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, fast_state, |
| 370 | shadow_mem, cur); |
| 371 | } |
| 372 | |
| 373 | static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, |
| 374 | u64 val) { |
| 375 | if (size == 0) |
| 376 | return; |
| 377 | // FIXME: fix me. |
| 378 | uptr offset = addr % kShadowCell; |
| 379 | if (offset) { |
| 380 | offset = kShadowCell - offset; |
| 381 | if (size <= offset) |
| 382 | return; |
| 383 | addr += offset; |
| 384 | size -= offset; |
| 385 | } |
| 386 | CHECK_EQ(addr % 8, 0); |
| 387 | CHECK(IsAppMem(addr)); |
| 388 | CHECK(IsAppMem(addr + size - 1)); |
| 389 | (void)thr; |
| 390 | (void)pc; |
| 391 | // Some programs mmap like hundreds of GBs but actually used a small part. |
| 392 | // So, it's better to report a false positive on the memory |
| 393 | // then to hang here senselessly. |
| 394 | const uptr kMaxResetSize = 1024*1024*1024; |
| 395 | if (size > kMaxResetSize) |
| 396 | size = kMaxResetSize; |
| 397 | size = (size + 7) & ~7; |
| 398 | u64 *p = (u64*)MemToShadow(addr); |
| 399 | CHECK(IsShadowMem((uptr)p)); |
| 400 | CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); |
| 401 | // FIXME: may overwrite a part outside the region |
| 402 | for (uptr i = 0; i < size * kShadowCnt / kShadowCell; i++) |
| 403 | p[i] = val; |
| 404 | } |
| 405 | |
| 406 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
| 407 | MemoryRangeSet(thr, pc, addr, size, 0); |
| 408 | } |
| 409 | |
| 410 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
| 411 | MemoryAccessRange(thr, pc, addr, size, true); |
Dmitry Vyukov | fee5b7d | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 412 | Shadow s(thr->fast_state); |
| 413 | s.MarkAsFreed(); |
| 414 | s.SetWrite(true); |
| 415 | s.SetAddr0AndSizeLog(0, 3); |
| 416 | MemoryRangeSet(thr, pc, addr, size, s.raw()); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 417 | } |
| 418 | |
| 419 | void FuncEntry(ThreadState *thr, uptr pc) { |
| 420 | DCHECK_EQ(thr->in_rtl, 0); |
| 421 | StatInc(thr, StatFuncEnter); |
| 422 | DPrintf2("#%d: tsan::FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); |
| 423 | thr->fast_state.IncrementEpoch(); |
| 424 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc); |
| 425 | |
| 426 | // Shadow stack maintenance can be replaced with |
| 427 | // stack unwinding during trace switch (which presumably must be faster). |
| 428 | DCHECK(thr->shadow_stack_pos >= &thr->shadow_stack[0]); |
| 429 | DCHECK(thr->shadow_stack_pos < &thr->shadow_stack[kShadowStackSize]); |
| 430 | thr->shadow_stack_pos[0] = pc; |
| 431 | thr->shadow_stack_pos++; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | void FuncExit(ThreadState *thr) { |
| 435 | DCHECK_EQ(thr->in_rtl, 0); |
| 436 | StatInc(thr, StatFuncExit); |
| 437 | DPrintf2("#%d: tsan::FuncExit\n", (int)thr->fast_state.tid()); |
| 438 | thr->fast_state.IncrementEpoch(); |
| 439 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0); |
| 440 | |
| 441 | DCHECK(thr->shadow_stack_pos > &thr->shadow_stack[0]); |
| 442 | DCHECK(thr->shadow_stack_pos < &thr->shadow_stack[kShadowStackSize]); |
| 443 | thr->shadow_stack_pos--; |
| 444 | } |
| 445 | |
| 446 | void IgnoreCtl(ThreadState *thr, bool write, bool begin) { |
| 447 | DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin); |
| 448 | thr->ignore_reads_and_writes += begin ? 1 : -1; |
| 449 | CHECK_GE(thr->ignore_reads_and_writes, 0); |
| 450 | if (thr->ignore_reads_and_writes) |
| 451 | thr->fast_state.SetIgnoreBit(); |
| 452 | else |
| 453 | thr->fast_state.ClearIgnoreBit(); |
| 454 | } |
| 455 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 456 | #if TSAN_DEBUG |
| 457 | void build_consistency_debug() {} |
| 458 | #else |
| 459 | void build_consistency_release() {} |
| 460 | #endif |
| 461 | |
| 462 | #if TSAN_COLLECT_STATS |
| 463 | void build_consistency_stats() {} |
| 464 | #else |
| 465 | void build_consistency_nostats() {} |
| 466 | #endif |
| 467 | |
| 468 | #if TSAN_SHADOW_COUNT == 1 |
| 469 | void build_consistency_shadow1() {} |
| 470 | #elif TSAN_SHADOW_COUNT == 2 |
| 471 | void build_consistency_shadow2() {} |
| 472 | #elif TSAN_SHADOW_COUNT == 4 |
| 473 | void build_consistency_shadow4() {} |
| 474 | #else |
| 475 | void build_consistency_shadow8() {} |
| 476 | #endif |
| 477 | |
| 478 | } // namespace __tsan |
| 479 | |
| 480 | // Must be included in this file to make sure everything is inlined. |
| 481 | #include "tsan_interface_inl.h" |