Alexey Samsonov | 603c4be | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl.cc -------------------------------------------------------===// |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | // Main file (entry points) for the TSan run-time. |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
Dmitry Vyukov | fce5bd4 | 2012-06-29 16:58:33 +0000 | [diff] [blame] | 15 | #include "sanitizer_common/sanitizer_atomic.h" |
Alexey Samsonov | 0969bcf | 2012-06-18 08:44:30 +0000 | [diff] [blame] | 16 | #include "sanitizer_common/sanitizer_common.h" |
Kostya Serebryany | 16e0075 | 2012-05-31 13:42:53 +0000 | [diff] [blame] | 17 | #include "sanitizer_common/sanitizer_libc.h" |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 18 | #include "sanitizer_common/sanitizer_stackdepot.h" |
Alexey Samsonov | 47b1634 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 19 | #include "sanitizer_common/sanitizer_placement_new.h" |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 20 | #include "sanitizer_common/sanitizer_symbolizer.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 21 | #include "tsan_defs.h" |
| 22 | #include "tsan_platform.h" |
| 23 | #include "tsan_rtl.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 24 | #include "tsan_mman.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 25 | #include "tsan_suppressions.h" |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 26 | #include "tsan_symbolize.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 27 | |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 28 | volatile int __tsan_resumed = 0; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 29 | |
| 30 | extern "C" void __tsan_resume() { |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 31 | __tsan_resumed = 1; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 32 | } |
| 33 | |
| 34 | namespace __tsan { |
| 35 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 36 | #ifndef TSAN_GO |
Alexey Samsonov | 0a4c906 | 2012-06-05 13:50:57 +0000 | [diff] [blame] | 37 | THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 38 | #endif |
Alexey Samsonov | 0a4c906 | 2012-06-05 13:50:57 +0000 | [diff] [blame] | 39 | static char ctx_placeholder[sizeof(Context)] ALIGNED(64); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 40 | |
Dmitry Vyukov | 22881ec | 2013-01-30 09:24:00 +0000 | [diff] [blame] | 41 | // Can be overriden by a front-end. |
Dmitry Vyukov | 6a135be | 2013-10-14 06:31:03 +0000 | [diff] [blame] | 42 | #ifdef TSAN_EXTERNAL_HOOKS |
| 43 | bool OnFinalize(bool failed); |
| 44 | #else |
| 45 | bool WEAK OnFinalize(bool failed) { |
Dmitry Vyukov | 22881ec | 2013-01-30 09:24:00 +0000 | [diff] [blame] | 46 | return failed; |
| 47 | } |
Dmitry Vyukov | 6a135be | 2013-10-14 06:31:03 +0000 | [diff] [blame] | 48 | #endif |
Dmitry Vyukov | 22881ec | 2013-01-30 09:24:00 +0000 | [diff] [blame] | 49 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 50 | static Context *ctx; |
| 51 | Context *CTX() { |
| 52 | return ctx; |
| 53 | } |
| 54 | |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 55 | static char thread_registry_placeholder[sizeof(ThreadRegistry)]; |
| 56 | |
| 57 | static ThreadContextBase *CreateThreadContext(u32 tid) { |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 58 | // Map thread trace when context is created. |
| 59 | MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); |
Dmitry Vyukov | 9743d74 | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 60 | MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace)); |
| 61 | new(ThreadTrace(tid)) Trace(); |
| 62 | void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 63 | return new(mem) ThreadContext(tid); |
| 64 | } |
| 65 | |
| 66 | #ifndef TSAN_GO |
| 67 | static const u32 kThreadQuarantineSize = 16; |
| 68 | #else |
| 69 | static const u32 kThreadQuarantineSize = 64; |
| 70 | #endif |
| 71 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 72 | Context::Context() |
| 73 | : initialized() |
| 74 | , report_mtx(MutexTypeReport, StatMtxReport) |
| 75 | , nreported() |
| 76 | , nmissed_expected() |
Alexey Samsonov | 2bbd8be | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 77 | , thread_registry(new(thread_registry_placeholder) ThreadRegistry( |
| 78 | CreateThreadContext, kMaxTid, kThreadQuarantineSize)) |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 79 | , racy_stacks(MBlockRacyStacks) |
Dmitry Vyukov | 158c6ac | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 80 | , racy_addresses(MBlockRacyAddresses) |
Alexey Samsonov | 0a05e5f | 2013-06-14 11:18:58 +0000 | [diff] [blame] | 81 | , fired_suppressions(8) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | // The objects are allocated in TLS, so one may rely on zero-initialization. |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 85 | ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 86 | uptr stk_addr, uptr stk_size, |
| 87 | uptr tls_addr, uptr tls_size) |
| 88 | : fast_state(tid, epoch) |
| 89 | // Do not touch these, rely on zero initialization, |
| 90 | // they may be accessed before the ctor. |
Dmitry Vyukov | dc563c0 | 2013-05-21 08:12:35 +0000 | [diff] [blame] | 91 | // , ignore_reads_and_writes() |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 92 | // , in_rtl() |
| 93 | , shadow_stack_pos(&shadow_stack[0]) |
Dmitry Vyukov | 8b30c25 | 2013-03-25 10:10:44 +0000 | [diff] [blame] | 94 | #ifndef TSAN_GO |
| 95 | , jmp_bufs(MBlockJmpBuf) |
| 96 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 97 | , tid(tid) |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 98 | , unique_id(unique_id) |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 99 | , stk_addr(stk_addr) |
| 100 | , stk_size(stk_size) |
| 101 | , tls_addr(tls_addr) |
| 102 | , tls_size(tls_size) { |
| 103 | } |
| 104 | |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 105 | static void MemoryProfiler(Context *ctx, fd_t fd, int i) { |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 106 | uptr n_threads; |
| 107 | uptr n_running_threads; |
| 108 | ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 109 | InternalScopedBuffer<char> buf(4096); |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 110 | internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n", |
| 111 | i, n_threads, n_running_threads); |
| 112 | internal_write(fd, buf.data(), internal_strlen(buf.data())); |
| 113 | WriteMemoryProfile(buf.data(), buf.size()); |
| 114 | internal_write(fd, buf.data(), internal_strlen(buf.data())); |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 115 | } |
| 116 | |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 117 | static void BackgroundThread(void *arg) { |
| 118 | ScopedInRtl in_rtl; |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 119 | Context *ctx = CTX(); |
Dmitry Vyukov | f63dde3 | 2013-03-21 13:01:50 +0000 | [diff] [blame] | 120 | const u64 kMs2Ns = 1000 * 1000; |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 121 | |
| 122 | fd_t mprof_fd = kInvalidFd; |
| 123 | if (flags()->profile_memory && flags()->profile_memory[0]) { |
| 124 | InternalScopedBuffer<char> filename(4096); |
| 125 | internal_snprintf(filename.data(), filename.size(), "%s.%d", |
Peter Collingbourne | 0b694fc | 2013-05-17 16:56:53 +0000 | [diff] [blame] | 126 | flags()->profile_memory, (int)internal_getpid()); |
Peter Collingbourne | 9578a3e | 2013-05-08 14:43:49 +0000 | [diff] [blame] | 127 | uptr openrv = OpenFile(filename.data(), true); |
| 128 | if (internal_iserror(openrv)) { |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 129 | Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", |
| 130 | &filename[0]); |
Peter Collingbourne | 9578a3e | 2013-05-08 14:43:49 +0000 | [diff] [blame] | 131 | } else { |
| 132 | mprof_fd = openrv; |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 133 | } |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 134 | } |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 135 | |
| 136 | u64 last_flush = NanoTime(); |
Dmitry Vyukov | 92b5479 | 2013-10-03 17:14:35 +0000 | [diff] [blame] | 137 | uptr last_rss = 0; |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 138 | for (int i = 0; ; i++) { |
| 139 | SleepForSeconds(1); |
| 140 | u64 now = NanoTime(); |
| 141 | |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 142 | // Flush memory if requested. |
Dmitry Vyukov | 92b5479 | 2013-10-03 17:14:35 +0000 | [diff] [blame] | 143 | if (flags()->flush_memory_ms > 0) { |
Dmitry Vyukov | f63dde3 | 2013-03-21 13:01:50 +0000 | [diff] [blame] | 144 | if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { |
Dmitry Vyukov | 92b5479 | 2013-10-03 17:14:35 +0000 | [diff] [blame] | 145 | if (flags()->verbosity > 0) |
| 146 | Printf("ThreadSanitizer: periodic memory flush\n"); |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 147 | FlushShadowMemory(); |
| 148 | last_flush = NanoTime(); |
| 149 | } |
| 150 | } |
Dmitry Vyukov | 92b5479 | 2013-10-03 17:14:35 +0000 | [diff] [blame] | 151 | if (flags()->memory_limit_mb > 0) { |
| 152 | uptr rss = GetRSS(); |
| 153 | uptr limit = uptr(flags()->memory_limit_mb) << 20; |
| 154 | if (flags()->verbosity > 0) { |
| 155 | Printf("ThreadSanitizer: memory flush check" |
| 156 | " RSS=%llu LAST=%llu LIMIT=%llu\n", |
| 157 | (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20); |
| 158 | } |
| 159 | if (2 * rss > limit + last_rss) { |
| 160 | if (flags()->verbosity > 0) |
| 161 | Printf("ThreadSanitizer: flushing memory due to RSS\n"); |
| 162 | FlushShadowMemory(); |
| 163 | rss = GetRSS(); |
| 164 | if (flags()->verbosity > 0) |
| 165 | Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); |
| 166 | } |
| 167 | last_rss = rss; |
| 168 | } |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 169 | |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 170 | // Write memory profile if requested. |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 171 | if (mprof_fd != kInvalidFd) |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 172 | MemoryProfiler(ctx, mprof_fd, i); |
| 173 | |
| 174 | #ifndef TSAN_GO |
Dmitry Vyukov | f63dde3 | 2013-03-21 13:01:50 +0000 | [diff] [blame] | 175 | // Flush symbolizer cache if requested. |
| 176 | if (flags()->flush_symbolizer_ms > 0) { |
| 177 | u64 last = atomic_load(&ctx->last_symbolize_time_ns, |
| 178 | memory_order_relaxed); |
| 179 | if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { |
| 180 | Lock l(&ctx->report_mtx); |
Alexey Samsonov | 7ed46ff | 2013-04-05 07:30:29 +0000 | [diff] [blame] | 181 | SpinMutexLock l2(&CommonSanitizerReportMutex); |
Dmitry Vyukov | f63dde3 | 2013-03-21 13:01:50 +0000 | [diff] [blame] | 182 | SymbolizeFlush(); |
| 183 | atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); |
| 184 | } |
Dmitry Vyukov | a38e40f | 2013-03-21 07:02:36 +0000 | [diff] [blame] | 185 | } |
| 186 | #endif |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 187 | } |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 188 | } |
| 189 | |
Dmitry Vyukov | 7ac33ac | 2013-03-18 15:49:07 +0000 | [diff] [blame] | 190 | void DontNeedShadowFor(uptr addr, uptr size) { |
| 191 | uptr shadow_beg = MemToShadow(addr); |
| 192 | uptr shadow_end = MemToShadow(addr + size); |
| 193 | FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); |
| 194 | } |
| 195 | |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 196 | void MapShadow(uptr addr, uptr size) { |
Dmitry Vyukov | 6b641c5 | 2012-11-06 16:48:46 +0000 | [diff] [blame] | 197 | MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 198 | } |
| 199 | |
Dmitry Vyukov | 6535c31 | 2012-12-13 08:14:02 +0000 | [diff] [blame] | 200 | void MapThreadTrace(uptr addr, uptr size) { |
Dmitry Vyukov | dae1251 | 2012-12-21 12:30:52 +0000 | [diff] [blame] | 201 | DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); |
Dmitry Vyukov | 6535c31 | 2012-12-13 08:14:02 +0000 | [diff] [blame] | 202 | CHECK_GE(addr, kTraceMemBegin); |
| 203 | CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize); |
| 204 | if (addr != (uptr)MmapFixedNoReserve(addr, size)) { |
| 205 | Printf("FATAL: ThreadSanitizer can not mmap thread trace\n"); |
| 206 | Die(); |
| 207 | } |
| 208 | } |
| 209 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 210 | void Initialize(ThreadState *thr) { |
| 211 | // Thread safe because done before all threads exist. |
| 212 | static bool is_initialized = false; |
| 213 | if (is_initialized) |
| 214 | return; |
| 215 | is_initialized = true; |
Kostya Serebryany | 859778a | 2013-01-31 14:11:21 +0000 | [diff] [blame] | 216 | SanitizerToolName = "ThreadSanitizer"; |
Alexey Samsonov | 591616d | 2012-09-11 09:44:48 +0000 | [diff] [blame] | 217 | // Install tool-specific callbacks in sanitizer_common. |
| 218 | SetCheckFailedCallback(TsanCheckFailed); |
| 219 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 220 | ScopedInRtl in_rtl; |
Dmitry Vyukov | bbbb20b | 2012-08-16 19:36:45 +0000 | [diff] [blame] | 221 | #ifndef TSAN_GO |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 222 | InitializeAllocator(); |
Dmitry Vyukov | bbbb20b | 2012-08-16 19:36:45 +0000 | [diff] [blame] | 223 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 224 | InitializeInterceptors(); |
| 225 | const char *env = InitializePlatform(); |
| 226 | InitializeMutex(); |
| 227 | InitializeDynamicAnnotations(); |
| 228 | ctx = new(ctx_placeholder) Context; |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 229 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 230 | InitializeShadowMemory(); |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 231 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 232 | InitializeFlags(&ctx->flags, env); |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 233 | // Setup correct file descriptor for error reports. |
Dmitry Vyukov | b48c2b2 | 2013-10-15 12:25:29 +0000 | [diff] [blame^] | 234 | __sanitizer_set_report_path(flags()->log_path); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 235 | InitializeSuppressions(); |
Dmitry Vyukov | 85a6dad | 2012-09-19 04:39:36 +0000 | [diff] [blame] | 236 | #ifndef TSAN_GO |
Dmitry Vyukov | 4af0f21 | 2013-10-03 13:37:17 +0000 | [diff] [blame] | 237 | InitializeLibIgnore(); |
Alexey Samsonov | 68bdcc4 | 2012-09-25 12:35:47 +0000 | [diff] [blame] | 238 | // Initialize external symbolizer before internal threads are started. |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 239 | const char *external_symbolizer = flags()->external_symbolizer_path; |
Alexey Samsonov | 5b89400 | 2013-10-04 13:38:35 +0000 | [diff] [blame] | 240 | bool symbolizer_started = |
| 241 | getSymbolizer()->InitializeExternal(external_symbolizer); |
| 242 | if (external_symbolizer != 0 && external_symbolizer[0] != '\0' && |
| 243 | !symbolizer_started) { |
| 244 | Printf("Failed to start external symbolizer: '%s'\n", |
| 245 | external_symbolizer); |
| 246 | Die(); |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 247 | } |
Dmitry Vyukov | 85a6dad | 2012-09-19 04:39:36 +0000 | [diff] [blame] | 248 | #endif |
Dmitry Vyukov | 4bebe7b | 2013-03-21 06:24:31 +0000 | [diff] [blame] | 249 | internal_start_thread(&BackgroundThread, 0); |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 250 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 251 | if (ctx->flags.verbosity) |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 252 | Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", |
Peter Collingbourne | 0b694fc | 2013-05-17 16:56:53 +0000 | [diff] [blame] | 253 | (int)internal_getpid()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 254 | |
| 255 | // Initialize thread 0. |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 256 | int tid = ThreadCreate(thr, 0, 0, true); |
| 257 | CHECK_EQ(tid, 0); |
Peter Collingbourne | 0b694fc | 2013-05-17 16:56:53 +0000 | [diff] [blame] | 258 | ThreadStart(thr, tid, internal_getpid()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 259 | CHECK_EQ(thr->in_rtl, 1); |
| 260 | ctx->initialized = true; |
| 261 | |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 262 | if (flags()->stop_on_start) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 263 | Printf("ThreadSanitizer is suspended at startup (pid %d)." |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 264 | " Call __tsan_resume().\n", |
Peter Collingbourne | 0b694fc | 2013-05-17 16:56:53 +0000 | [diff] [blame] | 265 | (int)internal_getpid()); |
Alexey Samsonov | ba5e996 | 2013-01-30 07:45:58 +0000 | [diff] [blame] | 266 | while (__tsan_resumed == 0) {} |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 267 | } |
| 268 | } |
| 269 | |
| 270 | int Finalize(ThreadState *thr) { |
| 271 | ScopedInRtl in_rtl; |
| 272 | Context *ctx = __tsan::ctx; |
| 273 | bool failed = false; |
| 274 | |
Dmitry Vyukov | 54e0a9a | 2012-11-07 16:41:57 +0000 | [diff] [blame] | 275 | if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) |
| 276 | SleepForMillis(flags()->atexit_sleep_ms); |
| 277 | |
Dmitry Vyukov | d0a51c0 | 2012-10-02 12:07:16 +0000 | [diff] [blame] | 278 | // Wait for pending reports. |
| 279 | ctx->report_mtx.Lock(); |
Alexey Samsonov | 7ed46ff | 2013-04-05 07:30:29 +0000 | [diff] [blame] | 280 | CommonSanitizerReportMutex.Lock(); |
| 281 | CommonSanitizerReportMutex.Unlock(); |
Dmitry Vyukov | d0a51c0 | 2012-10-02 12:07:16 +0000 | [diff] [blame] | 282 | ctx->report_mtx.Unlock(); |
| 283 | |
Dmitry Vyukov | bdd844c | 2013-01-24 09:08:03 +0000 | [diff] [blame] | 284 | #ifndef TSAN_GO |
| 285 | if (ctx->flags.verbosity) |
| 286 | AllocatorPrintStats(); |
| 287 | #endif |
| 288 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 289 | ThreadFinalize(thr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 290 | |
| 291 | if (ctx->nreported) { |
| 292 | failed = true; |
Dmitry Vyukov | b3b2123 | 2012-10-07 14:21:24 +0000 | [diff] [blame] | 293 | #ifndef TSAN_GO |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 294 | Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); |
Dmitry Vyukov | b3b2123 | 2012-10-07 14:21:24 +0000 | [diff] [blame] | 295 | #else |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 296 | Printf("Found %d data race(s)\n", ctx->nreported); |
Dmitry Vyukov | b3b2123 | 2012-10-07 14:21:24 +0000 | [diff] [blame] | 297 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | if (ctx->nmissed_expected) { |
| 301 | failed = true; |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 302 | Printf("ThreadSanitizer: missed %d expected races\n", |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 303 | ctx->nmissed_expected); |
| 304 | } |
| 305 | |
Dmitry Vyukov | f754eb5 | 2013-03-27 17:59:57 +0000 | [diff] [blame] | 306 | if (flags()->print_suppressions) |
| 307 | PrintMatchedSuppressions(); |
Dmitry Vyukov | 0fd908c | 2013-03-28 16:21:19 +0000 | [diff] [blame] | 308 | #ifndef TSAN_GO |
| 309 | if (flags()->print_benign) |
| 310 | PrintMatchedBenignRaces(); |
| 311 | #endif |
Dmitry Vyukov | f754eb5 | 2013-03-27 17:59:57 +0000 | [diff] [blame] | 312 | |
Dmitry Vyukov | 22881ec | 2013-01-30 09:24:00 +0000 | [diff] [blame] | 313 | failed = OnFinalize(failed); |
Dmitry Vyukov | 22881ec | 2013-01-30 09:24:00 +0000 | [diff] [blame] | 314 | |
Dmitry Vyukov | 08adb18 | 2012-11-13 13:53:43 +0000 | [diff] [blame] | 315 | StatAggregate(ctx->stat, thr->stat); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 316 | StatOutput(ctx->stat); |
Dmitry Vyukov | b7b6b1c | 2012-05-17 15:00:27 +0000 | [diff] [blame] | 317 | return failed ? flags()->exitcode : 0; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 318 | } |
| 319 | |
Dmitry Vyukov | 0ab628c | 2012-09-06 15:18:14 +0000 | [diff] [blame] | 320 | #ifndef TSAN_GO |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 321 | u32 CurrentStackId(ThreadState *thr, uptr pc) { |
| 322 | if (thr->shadow_stack_pos == 0) // May happen during bootstrap. |
| 323 | return 0; |
| 324 | if (pc) { |
| 325 | thr->shadow_stack_pos[0] = pc; |
| 326 | thr->shadow_stack_pos++; |
| 327 | } |
| 328 | u32 id = StackDepotPut(thr->shadow_stack, |
| 329 | thr->shadow_stack_pos - thr->shadow_stack); |
| 330 | if (pc) |
| 331 | thr->shadow_stack_pos--; |
| 332 | return id; |
| 333 | } |
Dmitry Vyukov | 0ab628c | 2012-09-06 15:18:14 +0000 | [diff] [blame] | 334 | #endif |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 335 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 336 | void TraceSwitch(ThreadState *thr) { |
Dmitry Vyukov | 9ad7c32 | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 337 | thr->nomalloc++; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 338 | ScopedInRtl in_rtl; |
Dmitry Vyukov | 9743d74 | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 339 | Trace *thr_trace = ThreadTrace(thr->tid); |
| 340 | Lock l(&thr_trace->mtx); |
Dmitry Vyukov | 0415ac0 | 2012-12-04 12:19:53 +0000 | [diff] [blame] | 341 | unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); |
Dmitry Vyukov | 9743d74 | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 342 | TraceHeader *hdr = &thr_trace->headers[trace]; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 343 | hdr->epoch0 = thr->fast_state.epoch(); |
| 344 | hdr->stack0.ObtainCurrent(thr, 0); |
Dmitry Vyukov | ad9da37 | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 345 | hdr->mset0 = thr->mset; |
Dmitry Vyukov | 9ad7c32 | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 346 | thr->nomalloc--; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 347 | } |
| 348 | |
Dmitry Vyukov | 9743d74 | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 349 | Trace *ThreadTrace(int tid) { |
| 350 | return (Trace*)GetThreadTraceHeader(tid); |
| 351 | } |
| 352 | |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 353 | uptr TraceTopPC(ThreadState *thr) { |
| 354 | Event *events = (Event*)GetThreadTrace(thr->tid); |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 355 | uptr pc = events[thr->fast_state.GetTracePos()]; |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 356 | return pc; |
| 357 | } |
| 358 | |
Dmitry Vyukov | d698edc | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 359 | uptr TraceSize() { |
| 360 | return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); |
| 361 | } |
| 362 | |
Dmitry Vyukov | 0415ac0 | 2012-12-04 12:19:53 +0000 | [diff] [blame] | 363 | uptr TraceParts() { |
| 364 | return TraceSize() / kTracePartSize; |
| 365 | } |
| 366 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 367 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 368 | extern "C" void __tsan_trace_switch() { |
| 369 | TraceSwitch(cur_thread()); |
| 370 | } |
| 371 | |
| 372 | extern "C" void __tsan_report_race() { |
| 373 | ReportRace(cur_thread()); |
| 374 | } |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 375 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 376 | |
| 377 | ALWAYS_INLINE |
Timur Iskhodzhanov | 43c36e4 | 2013-03-28 22:23:03 +0000 | [diff] [blame] | 378 | Shadow LoadShadow(u64 *p) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 379 | u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); |
| 380 | return Shadow(raw); |
| 381 | } |
| 382 | |
| 383 | ALWAYS_INLINE |
Timur Iskhodzhanov | 43c36e4 | 2013-03-28 22:23:03 +0000 | [diff] [blame] | 384 | void StoreShadow(u64 *sp, u64 s) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 385 | atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); |
| 386 | } |
| 387 | |
| 388 | ALWAYS_INLINE |
Timur Iskhodzhanov | 43c36e4 | 2013-03-28 22:23:03 +0000 | [diff] [blame] | 389 | void StoreIfNotYetStored(u64 *sp, u64 *s) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 390 | StoreShadow(sp, *s); |
| 391 | *s = 0; |
| 392 | } |
| 393 | |
| 394 | static inline void HandleRace(ThreadState *thr, u64 *shadow_mem, |
| 395 | Shadow cur, Shadow old) { |
| 396 | thr->racy_state[0] = cur.raw(); |
| 397 | thr->racy_state[1] = old.raw(); |
| 398 | thr->racy_shadow_addr = shadow_mem; |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 399 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 400 | HACKY_CALL(__tsan_report_race); |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 401 | #else |
| 402 | ReportRace(thr); |
| 403 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 404 | } |
| 405 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 406 | static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { |
| 407 | return old.epoch() >= thr->fast_synch_epoch; |
| 408 | } |
| 409 | |
| 410 | static inline bool HappensBefore(Shadow old, ThreadState *thr) { |
Dmitry Vyukov | c8f0a00 | 2012-11-30 20:02:11 +0000 | [diff] [blame] | 411 | return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 412 | } |
| 413 | |
Kostya Serebryany | d475aa8 | 2013-03-29 09:44:16 +0000 | [diff] [blame] | 414 | ALWAYS_INLINE USED |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 415 | void MemoryAccessImpl(ThreadState *thr, uptr addr, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 416 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 417 | u64 *shadow_mem, Shadow cur) { |
| 418 | StatInc(thr, StatMop); |
| 419 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); |
| 420 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); |
| 421 | |
| 422 | // This potentially can live in an MMX/SSE scratch register. |
| 423 | // The required intrinsics are: |
| 424 | // __m128i _mm_move_epi64(__m128i*); |
| 425 | // _mm_storel_epi64(u64*, __m128i); |
| 426 | u64 store_word = cur.raw(); |
| 427 | |
| 428 | // scan all the shadow values and dispatch to 4 categories: |
| 429 | // same, replace, candidate and race (see comments below). |
| 430 | // we consider only 3 cases regarding access sizes: |
| 431 | // equal, intersect and not intersect. initially I considered |
| 432 | // larger and smaller as well, it allowed to replace some |
| 433 | // 'candidates' with 'same' or 'replace', but I think |
| 434 | // it's just not worth it (performance- and complexity-wise). |
| 435 | |
Dmitry Vyukov | 286c914 | 2013-03-20 11:22:03 +0000 | [diff] [blame] | 436 | Shadow old(0); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 437 | if (kShadowCnt == 1) { |
| 438 | int idx = 0; |
| 439 | #include "tsan_update_shadow_word_inl.h" |
| 440 | } else if (kShadowCnt == 2) { |
| 441 | int idx = 0; |
| 442 | #include "tsan_update_shadow_word_inl.h" |
| 443 | idx = 1; |
| 444 | #include "tsan_update_shadow_word_inl.h" |
| 445 | } else if (kShadowCnt == 4) { |
| 446 | int idx = 0; |
| 447 | #include "tsan_update_shadow_word_inl.h" |
| 448 | idx = 1; |
| 449 | #include "tsan_update_shadow_word_inl.h" |
| 450 | idx = 2; |
| 451 | #include "tsan_update_shadow_word_inl.h" |
| 452 | idx = 3; |
| 453 | #include "tsan_update_shadow_word_inl.h" |
| 454 | } else if (kShadowCnt == 8) { |
| 455 | int idx = 0; |
| 456 | #include "tsan_update_shadow_word_inl.h" |
| 457 | idx = 1; |
| 458 | #include "tsan_update_shadow_word_inl.h" |
| 459 | idx = 2; |
| 460 | #include "tsan_update_shadow_word_inl.h" |
| 461 | idx = 3; |
| 462 | #include "tsan_update_shadow_word_inl.h" |
| 463 | idx = 4; |
| 464 | #include "tsan_update_shadow_word_inl.h" |
| 465 | idx = 5; |
| 466 | #include "tsan_update_shadow_word_inl.h" |
| 467 | idx = 6; |
| 468 | #include "tsan_update_shadow_word_inl.h" |
| 469 | idx = 7; |
| 470 | #include "tsan_update_shadow_word_inl.h" |
| 471 | } else { |
| 472 | CHECK(false); |
| 473 | } |
| 474 | |
| 475 | // we did not find any races and had already stored |
| 476 | // the current access info, so we are done |
| 477 | if (LIKELY(store_word == 0)) |
| 478 | return; |
| 479 | // choose a random candidate slot and replace it |
| 480 | StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); |
| 481 | StatInc(thr, StatShadowReplace); |
| 482 | return; |
| 483 | RACE: |
| 484 | HandleRace(thr, shadow_mem, cur, old); |
| 485 | return; |
| 486 | } |
| 487 | |
Dmitry Vyukov | 8ecd0e5 | 2013-04-30 11:56:56 +0000 | [diff] [blame] | 488 | void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
| 489 | int size, bool kAccessIsWrite, bool kIsAtomic) { |
| 490 | while (size) { |
| 491 | int size1 = 1; |
| 492 | int kAccessSizeLog = kSizeLog1; |
| 493 | if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) { |
| 494 | size1 = 8; |
| 495 | kAccessSizeLog = kSizeLog8; |
| 496 | } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) { |
| 497 | size1 = 4; |
| 498 | kAccessSizeLog = kSizeLog4; |
| 499 | } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) { |
| 500 | size1 = 2; |
| 501 | kAccessSizeLog = kSizeLog2; |
| 502 | } |
| 503 | MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); |
| 504 | addr += size1; |
| 505 | size -= size1; |
| 506 | } |
| 507 | } |
| 508 | |
Kostya Serebryany | d475aa8 | 2013-03-29 09:44:16 +0000 | [diff] [blame] | 509 | ALWAYS_INLINE USED |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 510 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 511 | int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 512 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
Dmitry Vyukov | 68230a1 | 2012-12-07 19:23:59 +0000 | [diff] [blame] | 513 | DPrintf2("#%d: MemoryAccess: @%p %p size=%d" |
Alexey Samsonov | e954101 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 514 | " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 515 | (int)thr->fast_state.tid(), (void*)pc, (void*)addr, |
| 516 | (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, |
Alexey Samsonov | e954101 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 517 | (uptr)shadow_mem[0], (uptr)shadow_mem[1], |
| 518 | (uptr)shadow_mem[2], (uptr)shadow_mem[3]); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 519 | #if TSAN_DEBUG |
| 520 | if (!IsAppMem(addr)) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 521 | Printf("Access to non app mem %zx\n", addr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 522 | DCHECK(IsAppMem(addr)); |
| 523 | } |
| 524 | if (!IsShadowMem((uptr)shadow_mem)) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 525 | Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 526 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 527 | } |
| 528 | #endif |
| 529 | |
Dmitry Vyukov | 82dbc51 | 2013-03-20 13:21:50 +0000 | [diff] [blame] | 530 | if (*shadow_mem == kShadowRodata) { |
| 531 | // Access to .rodata section, no races here. |
| 532 | // Measurements show that it can be 10-20% of all memory accesses. |
| 533 | StatInc(thr, StatMop); |
| 534 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); |
| 535 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); |
| 536 | StatInc(thr, StatMopRodata); |
| 537 | return; |
| 538 | } |
| 539 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 540 | FastState fast_state = thr->fast_state; |
| 541 | if (fast_state.GetIgnoreBit()) |
| 542 | return; |
| 543 | fast_state.IncrementEpoch(); |
| 544 | thr->fast_state = fast_state; |
| 545 | Shadow cur(fast_state); |
| 546 | cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); |
| 547 | cur.SetWrite(kAccessIsWrite); |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 548 | cur.SetAtomic(kIsAtomic); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 549 | |
| 550 | // We must not store to the trace if we do not store to the shadow. |
| 551 | // That is, this call must be moved somewhere below. |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 552 | TraceAddEvent(thr, fast_state, EventTypeMop, pc); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 553 | |
Dmitry Vyukov | 334553e | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 554 | MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 555 | shadow_mem, cur); |
| 556 | } |
| 557 | |
| 558 | static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, |
| 559 | u64 val) { |
Dmitry Vyukov | 74172de | 2013-03-18 16:56:48 +0000 | [diff] [blame] | 560 | (void)thr; |
| 561 | (void)pc; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 562 | if (size == 0) |
| 563 | return; |
| 564 | // FIXME: fix me. |
| 565 | uptr offset = addr % kShadowCell; |
| 566 | if (offset) { |
| 567 | offset = kShadowCell - offset; |
| 568 | if (size <= offset) |
| 569 | return; |
| 570 | addr += offset; |
| 571 | size -= offset; |
| 572 | } |
Dmitry Vyukov | aaac6e2 | 2012-09-02 12:04:51 +0000 | [diff] [blame] | 573 | DCHECK_EQ(addr % 8, 0); |
| 574 | // If a user passes some insane arguments (memset(0)), |
| 575 | // let it just crash as usual. |
| 576 | if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) |
| 577 | return; |
Dmitry Vyukov | 74172de | 2013-03-18 16:56:48 +0000 | [diff] [blame] | 578 | // Don't want to touch lots of shadow memory. |
| 579 | // If a program maps 10MB stack, there is no need reset the whole range. |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 580 | size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); |
Dmitry Vyukov | 9c4d7a4 | 2013-06-13 10:15:44 +0000 | [diff] [blame] | 581 | // UnmapOrDie/MmapFixedNoReserve does not work on Windows, |
| 582 | // so we do it only for C/C++. |
| 583 | if (kGoMode || size < 64*1024) { |
Dmitry Vyukov | 74172de | 2013-03-18 16:56:48 +0000 | [diff] [blame] | 584 | u64 *p = (u64*)MemToShadow(addr); |
| 585 | CHECK(IsShadowMem((uptr)p)); |
| 586 | CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); |
| 587 | // FIXME: may overwrite a part outside the region |
| 588 | for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { |
| 589 | p[i++] = val; |
| 590 | for (uptr j = 1; j < kShadowCnt; j++) |
| 591 | p[i++] = 0; |
| 592 | } |
| 593 | } else { |
| 594 | // The region is big, reset only beginning and end. |
| 595 | const uptr kPageSize = 4096; |
| 596 | u64 *begin = (u64*)MemToShadow(addr); |
| 597 | u64 *end = begin + size / kShadowCell * kShadowCnt; |
| 598 | u64 *p = begin; |
| 599 | // Set at least first kPageSize/2 to page boundary. |
| 600 | while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { |
| 601 | *p++ = val; |
| 602 | for (uptr j = 1; j < kShadowCnt; j++) |
| 603 | *p++ = 0; |
| 604 | } |
| 605 | // Reset middle part. |
| 606 | u64 *p1 = p; |
| 607 | p = RoundDown(end, kPageSize); |
| 608 | UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); |
| 609 | MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); |
| 610 | // Set the ending. |
| 611 | while (p < end) { |
| 612 | *p++ = val; |
| 613 | for (uptr j = 1; j < kShadowCnt; j++) |
| 614 | *p++ = 0; |
| 615 | } |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 616 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 617 | } |
| 618 | |
| 619 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
| 620 | MemoryRangeSet(thr, pc, addr, size, 0); |
| 621 | } |
| 622 | |
| 623 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
Dmitry Vyukov | 74172de | 2013-03-18 16:56:48 +0000 | [diff] [blame] | 624 | // Processing more than 1k (4k of shadow) is expensive, |
| 625 | // can cause excessive memory consumption (user does not necessary touch |
| 626 | // the whole range) and most likely unnecessary. |
| 627 | if (size > 1024) |
| 628 | size = 1024; |
Dmitry Vyukov | 3285866 | 2013-02-01 14:41:58 +0000 | [diff] [blame] | 629 | CHECK_EQ(thr->is_freeing, false); |
| 630 | thr->is_freeing = true; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 631 | MemoryAccessRange(thr, pc, addr, size, true); |
Dmitry Vyukov | 3285866 | 2013-02-01 14:41:58 +0000 | [diff] [blame] | 632 | thr->is_freeing = false; |
Dmitry Vyukov | 46fea91 | 2013-04-24 11:16:47 +0000 | [diff] [blame] | 633 | thr->fast_state.IncrementEpoch(); |
| 634 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 635 | Shadow s(thr->fast_state); |
Dmitry Vyukov | 064c847 | 2012-11-30 06:39:01 +0000 | [diff] [blame] | 636 | s.ClearIgnoreBit(); |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 637 | s.MarkAsFreed(); |
| 638 | s.SetWrite(true); |
| 639 | s.SetAddr0AndSizeLog(0, 3); |
| 640 | MemoryRangeSet(thr, pc, addr, size, s.raw()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 641 | } |
| 642 | |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 643 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
Dmitry Vyukov | 46fea91 | 2013-04-24 11:16:47 +0000 | [diff] [blame] | 644 | thr->fast_state.IncrementEpoch(); |
| 645 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 646 | Shadow s(thr->fast_state); |
Dmitry Vyukov | 064c847 | 2012-11-30 06:39:01 +0000 | [diff] [blame] | 647 | s.ClearIgnoreBit(); |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 648 | s.SetWrite(true); |
| 649 | s.SetAddr0AndSizeLog(0, 3); |
| 650 | MemoryRangeSet(thr, pc, addr, size, s.raw()); |
| 651 | } |
| 652 | |
Kostya Serebryany | d475aa8 | 2013-03-29 09:44:16 +0000 | [diff] [blame] | 653 | ALWAYS_INLINE USED |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 654 | void FuncEntry(ThreadState *thr, uptr pc) { |
| 655 | DCHECK_EQ(thr->in_rtl, 0); |
| 656 | StatInc(thr, StatFuncEnter); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 657 | DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 658 | thr->fast_state.IncrementEpoch(); |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 659 | TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 660 | |
| 661 | // Shadow stack maintenance can be replaced with |
| 662 | // stack unwinding during trace switch (which presumably must be faster). |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 663 | DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 664 | #ifndef TSAN_GO |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 665 | DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 666 | #else |
| 667 | if (thr->shadow_stack_pos == thr->shadow_stack_end) { |
| 668 | const int sz = thr->shadow_stack_end - thr->shadow_stack; |
| 669 | const int newsz = 2 * sz; |
| 670 | uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, |
| 671 | newsz * sizeof(uptr)); |
| 672 | internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); |
| 673 | internal_free(thr->shadow_stack); |
| 674 | thr->shadow_stack = newstack; |
| 675 | thr->shadow_stack_pos = newstack + sz; |
| 676 | thr->shadow_stack_end = newstack + newsz; |
| 677 | } |
| 678 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 679 | thr->shadow_stack_pos[0] = pc; |
| 680 | thr->shadow_stack_pos++; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 681 | } |
| 682 | |
Kostya Serebryany | d475aa8 | 2013-03-29 09:44:16 +0000 | [diff] [blame] | 683 | ALWAYS_INLINE USED |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 684 | void FuncExit(ThreadState *thr) { |
| 685 | DCHECK_EQ(thr->in_rtl, 0); |
| 686 | StatInc(thr, StatFuncExit); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 687 | DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 688 | thr->fast_state.IncrementEpoch(); |
Dmitry Vyukov | 385542a | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 689 | TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 690 | |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 691 | DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 692 | #ifndef TSAN_GO |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 693 | DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 694 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 695 | thr->shadow_stack_pos--; |
| 696 | } |
| 697 | |
Dmitry Vyukov | 652f78a | 2013-09-19 04:39:04 +0000 | [diff] [blame] | 698 | void ThreadIgnoreBegin(ThreadState *thr) { |
| 699 | DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); |
| 700 | thr->ignore_reads_and_writes++; |
Dmitry Vyukov | e1ddbf9 | 2013-10-10 15:58:12 +0000 | [diff] [blame] | 701 | CHECK_GT(thr->ignore_reads_and_writes, 0); |
Dmitry Vyukov | 652f78a | 2013-09-19 04:39:04 +0000 | [diff] [blame] | 702 | thr->fast_state.SetIgnoreBit(); |
| 703 | } |
| 704 | |
| 705 | void ThreadIgnoreEnd(ThreadState *thr) { |
| 706 | DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); |
| 707 | thr->ignore_reads_and_writes--; |
| 708 | CHECK_GE(thr->ignore_reads_and_writes, 0); |
| 709 | if (thr->ignore_reads_and_writes == 0) |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 710 | thr->fast_state.ClearIgnoreBit(); |
| 711 | } |
| 712 | |
Dmitry Vyukov | e1ddbf9 | 2013-10-10 15:58:12 +0000 | [diff] [blame] | 713 | void ThreadIgnoreSyncBegin(ThreadState *thr) { |
| 714 | DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); |
| 715 | thr->ignore_sync++; |
| 716 | CHECK_GT(thr->ignore_sync, 0); |
| 717 | } |
| 718 | |
| 719 | void ThreadIgnoreSyncEnd(ThreadState *thr) { |
| 720 | DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); |
| 721 | thr->ignore_sync--; |
| 722 | CHECK_GE(thr->ignore_sync, 0); |
| 723 | } |
| 724 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 725 | bool MD5Hash::operator==(const MD5Hash &other) const { |
| 726 | return hash[0] == other.hash[0] && hash[1] == other.hash[1]; |
| 727 | } |
| 728 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 729 | #if TSAN_DEBUG |
| 730 | void build_consistency_debug() {} |
| 731 | #else |
| 732 | void build_consistency_release() {} |
| 733 | #endif |
| 734 | |
| 735 | #if TSAN_COLLECT_STATS |
| 736 | void build_consistency_stats() {} |
| 737 | #else |
| 738 | void build_consistency_nostats() {} |
| 739 | #endif |
| 740 | |
| 741 | #if TSAN_SHADOW_COUNT == 1 |
| 742 | void build_consistency_shadow1() {} |
| 743 | #elif TSAN_SHADOW_COUNT == 2 |
| 744 | void build_consistency_shadow2() {} |
| 745 | #elif TSAN_SHADOW_COUNT == 4 |
| 746 | void build_consistency_shadow4() {} |
| 747 | #else |
| 748 | void build_consistency_shadow8() {} |
| 749 | #endif |
| 750 | |
| 751 | } // namespace __tsan |
| 752 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 753 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 754 | // Must be included in this file to make sure everything is inlined. |
| 755 | #include "tsan_interface_inl.h" |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 756 | #endif |