Alexey Samsonov | 603c4be | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl.cc -------------------------------------------------------===// |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | // Main file (entry points) for the TSan run-time. |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
Dmitry Vyukov | fce5bd4 | 2012-06-29 16:58:33 +0000 | [diff] [blame] | 15 | #include "sanitizer_common/sanitizer_atomic.h" |
Alexey Samsonov | 0969bcf | 2012-06-18 08:44:30 +0000 | [diff] [blame] | 16 | #include "sanitizer_common/sanitizer_common.h" |
Kostya Serebryany | 16e0075 | 2012-05-31 13:42:53 +0000 | [diff] [blame] | 17 | #include "sanitizer_common/sanitizer_libc.h" |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 18 | #include "sanitizer_common/sanitizer_stackdepot.h" |
Alexey Samsonov | 47b1634 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 19 | #include "sanitizer_common/sanitizer_placement_new.h" |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 20 | #include "sanitizer_common/sanitizer_symbolizer.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 21 | #include "tsan_defs.h" |
| 22 | #include "tsan_platform.h" |
| 23 | #include "tsan_rtl.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 24 | #include "tsan_mman.h" |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 25 | #include "tsan_suppressions.h" |
| 26 | |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 27 | volatile int __tsan_resumed = 0; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 28 | |
| 29 | extern "C" void __tsan_resume() { |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 30 | __tsan_resumed = 1; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | namespace __tsan { |
| 34 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 35 | #ifndef TSAN_GO |
Alexey Samsonov | 0a4c906 | 2012-06-05 13:50:57 +0000 | [diff] [blame] | 36 | THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 37 | #endif |
Alexey Samsonov | 0a4c906 | 2012-06-05 13:50:57 +0000 | [diff] [blame] | 38 | static char ctx_placeholder[sizeof(Context)] ALIGNED(64); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 39 | |
| 40 | static Context *ctx; |
| 41 | Context *CTX() { |
| 42 | return ctx; |
| 43 | } |
| 44 | |
| 45 | Context::Context() |
| 46 | : initialized() |
| 47 | , report_mtx(MutexTypeReport, StatMtxReport) |
| 48 | , nreported() |
| 49 | , nmissed_expected() |
| 50 | , thread_mtx(MutexTypeThreads, StatMtxThreads) |
| 51 | , racy_stacks(MBlockRacyStacks) |
Dmitry Vyukov | 158c6ac | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 52 | , racy_addresses(MBlockRacyAddresses) |
| 53 | , fired_suppressions(MBlockRacyAddresses) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | // The objects are allocated in TLS, so one may rely on zero-initialization. |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 57 | ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 58 | uptr stk_addr, uptr stk_size, |
| 59 | uptr tls_addr, uptr tls_size) |
| 60 | : fast_state(tid, epoch) |
| 61 | // Do not touch these, rely on zero initialization, |
| 62 | // they may be accessed before the ctor. |
| 63 | // , fast_ignore_reads() |
| 64 | // , fast_ignore_writes() |
| 65 | // , in_rtl() |
| 66 | , shadow_stack_pos(&shadow_stack[0]) |
| 67 | , tid(tid) |
Dmitry Vyukov | ff35f1d | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 68 | , unique_id(unique_id) |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 69 | , stk_addr(stk_addr) |
| 70 | , stk_size(stk_size) |
| 71 | , tls_addr(tls_addr) |
| 72 | , tls_size(tls_size) { |
| 73 | } |
| 74 | |
| 75 | ThreadContext::ThreadContext(int tid) |
| 76 | : tid(tid) |
| 77 | , unique_id() |
Dmitry Vyukov | 7dccf3f | 2012-10-02 11:52:05 +0000 | [diff] [blame] | 78 | , os_id() |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 79 | , user_id() |
| 80 | , thr() |
| 81 | , status(ThreadStatusInvalid) |
| 82 | , detached() |
| 83 | , reuse_count() |
| 84 | , epoch0() |
| 85 | , epoch1() |
Dmitry Vyukov | 9d2ffc2 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 86 | , dead_info() |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 87 | , dead_next() { |
| 88 | } |
| 89 | |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 90 | static void WriteMemoryProfile(char *buf, uptr buf_size, int num) { |
| 91 | uptr shadow = GetShadowMemoryConsumption(); |
| 92 | |
| 93 | int nthread = 0; |
| 94 | int nlivethread = 0; |
| 95 | uptr threadmem = 0; |
| 96 | { |
| 97 | Lock l(&ctx->thread_mtx); |
| 98 | for (unsigned i = 0; i < kMaxTid; i++) { |
| 99 | ThreadContext *tctx = ctx->threads[i]; |
| 100 | if (tctx == 0) |
| 101 | continue; |
| 102 | nthread += 1; |
| 103 | threadmem += sizeof(ThreadContext); |
| 104 | if (tctx->status != ThreadStatusRunning) |
| 105 | continue; |
| 106 | nlivethread += 1; |
| 107 | threadmem += sizeof(ThreadState); |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | uptr nsync = 0; |
| 112 | uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync); |
| 113 | |
Alexey Samsonov | de08c02 | 2012-06-19 09:21:57 +0000 | [diff] [blame] | 114 | internal_snprintf(buf, buf_size, "%d: shadow=%zuMB" |
| 115 | " thread=%zuMB(total=%d/live=%d)" |
| 116 | " sync=%zuMB(cnt=%zu)\n", |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 117 | num, |
| 118 | shadow >> 20, |
| 119 | threadmem >> 20, nthread, nlivethread, |
| 120 | syncmem >> 20, nsync); |
| 121 | } |
| 122 | |
| 123 | static void MemoryProfileThread(void *arg) { |
| 124 | ScopedInRtl in_rtl; |
| 125 | fd_t fd = (fd_t)(uptr)arg; |
| 126 | for (int i = 0; ; i++) { |
Alexey Samsonov | 14c8bd7 | 2012-08-22 07:25:52 +0000 | [diff] [blame] | 127 | InternalScopedBuffer<char> buf(4096); |
Alexey Samsonov | 1dc4cf7 | 2012-09-05 07:23:44 +0000 | [diff] [blame] | 128 | WriteMemoryProfile(buf.data(), buf.size(), i); |
| 129 | internal_write(fd, buf.data(), internal_strlen(buf.data())); |
Alexey Samsonov | 0969bcf | 2012-06-18 08:44:30 +0000 | [diff] [blame] | 130 | SleepForSeconds(1); |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 131 | } |
| 132 | } |
| 133 | |
| 134 | static void InitializeMemoryProfile() { |
| 135 | if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0) |
| 136 | return; |
Alexey Samsonov | 14c8bd7 | 2012-08-22 07:25:52 +0000 | [diff] [blame] | 137 | InternalScopedBuffer<char> filename(4096); |
Alexey Samsonov | 1dc4cf7 | 2012-09-05 07:23:44 +0000 | [diff] [blame] | 138 | internal_snprintf(filename.data(), filename.size(), "%s.%d", |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 139 | flags()->profile_memory, GetPid()); |
Alexey Samsonov | 1dc4cf7 | 2012-09-05 07:23:44 +0000 | [diff] [blame] | 140 | fd_t fd = internal_open(filename.data(), true); |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 141 | if (fd == kInvalidFd) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 142 | Printf("Failed to open memory profile file '%s'\n", &filename[0]); |
Dmitry Vyukov | 2612773 | 2012-05-22 11:33:03 +0000 | [diff] [blame] | 143 | Die(); |
| 144 | } |
| 145 | internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd); |
| 146 | } |
| 147 | |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 148 | static void MemoryFlushThread(void *arg) { |
| 149 | ScopedInRtl in_rtl; |
| 150 | for (int i = 0; ; i++) { |
Alexey Samsonov | 0969bcf | 2012-06-18 08:44:30 +0000 | [diff] [blame] | 151 | SleepForMillis(flags()->flush_memory_ms); |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 152 | FlushShadowMemory(); |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | static void InitializeMemoryFlush() { |
| 157 | if (flags()->flush_memory_ms == 0) |
| 158 | return; |
| 159 | if (flags()->flush_memory_ms < 100) |
| 160 | flags()->flush_memory_ms = 100; |
| 161 | internal_start_thread(&MemoryFlushThread, 0); |
| 162 | } |
| 163 | |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 164 | void MapShadow(uptr addr, uptr size) { |
Dmitry Vyukov | 6b641c5 | 2012-11-06 16:48:46 +0000 | [diff] [blame] | 165 | MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 166 | } |
| 167 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 168 | void Initialize(ThreadState *thr) { |
| 169 | // Thread safe because done before all threads exist. |
| 170 | static bool is_initialized = false; |
| 171 | if (is_initialized) |
| 172 | return; |
| 173 | is_initialized = true; |
Alexey Samsonov | 591616d | 2012-09-11 09:44:48 +0000 | [diff] [blame] | 174 | // Install tool-specific callbacks in sanitizer_common. |
| 175 | SetCheckFailedCallback(TsanCheckFailed); |
| 176 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 177 | ScopedInRtl in_rtl; |
Dmitry Vyukov | bbbb20b | 2012-08-16 19:36:45 +0000 | [diff] [blame] | 178 | #ifndef TSAN_GO |
Dmitry Vyukov | 2e87051 | 2012-08-15 15:35:15 +0000 | [diff] [blame] | 179 | InitializeAllocator(); |
Dmitry Vyukov | bbbb20b | 2012-08-16 19:36:45 +0000 | [diff] [blame] | 180 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 181 | InitializeInterceptors(); |
| 182 | const char *env = InitializePlatform(); |
| 183 | InitializeMutex(); |
| 184 | InitializeDynamicAnnotations(); |
| 185 | ctx = new(ctx_placeholder) Context; |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 186 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 187 | InitializeShadowMemory(); |
Dmitry Vyukov | a05fcc1 | 2012-11-06 16:00:16 +0000 | [diff] [blame] | 188 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 189 | ctx->dead_list_size = 0; |
| 190 | ctx->dead_list_head = 0; |
| 191 | ctx->dead_list_tail = 0; |
| 192 | InitializeFlags(&ctx->flags, env); |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 193 | // Setup correct file descriptor for error reports. |
| 194 | __sanitizer_set_report_fd(flags()->log_fileno); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 195 | InitializeSuppressions(); |
Dmitry Vyukov | 85a6dad | 2012-09-19 04:39:36 +0000 | [diff] [blame] | 196 | #ifndef TSAN_GO |
Alexey Samsonov | 68bdcc4 | 2012-09-25 12:35:47 +0000 | [diff] [blame] | 197 | // Initialize external symbolizer before internal threads are started. |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 198 | const char *external_symbolizer = flags()->external_symbolizer_path; |
| 199 | if (external_symbolizer != 0 && external_symbolizer[0] != '\0') { |
Alexey Samsonov | 93b4caf | 2012-11-09 14:45:30 +0000 | [diff] [blame] | 200 | if (!InitializeExternalSymbolizer(external_symbolizer)) { |
| 201 | Printf("Failed to start external symbolizer: '%s'\n", |
| 202 | external_symbolizer); |
| 203 | Die(); |
| 204 | } |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 205 | } |
Dmitry Vyukov | 85a6dad | 2012-09-19 04:39:36 +0000 | [diff] [blame] | 206 | #endif |
Alexey Samsonov | 68bdcc4 | 2012-09-25 12:35:47 +0000 | [diff] [blame] | 207 | InitializeMemoryProfile(); |
| 208 | InitializeMemoryFlush(); |
Alexey Samsonov | 8cc1f81 | 2012-09-06 08:48:43 +0000 | [diff] [blame] | 209 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 210 | if (ctx->flags.verbosity) |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 211 | Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", |
Alexey Samsonov | 67a64dd | 2012-06-06 10:13:27 +0000 | [diff] [blame] | 212 | GetPid()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 213 | |
| 214 | // Initialize thread 0. |
| 215 | ctx->thread_seq = 0; |
| 216 | int tid = ThreadCreate(thr, 0, 0, true); |
| 217 | CHECK_EQ(tid, 0); |
Dmitry Vyukov | 7dccf3f | 2012-10-02 11:52:05 +0000 | [diff] [blame] | 218 | ThreadStart(thr, tid, GetPid()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 219 | CHECK_EQ(thr->in_rtl, 1); |
| 220 | ctx->initialized = true; |
| 221 | |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 222 | if (flags()->stop_on_start) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 223 | Printf("ThreadSanitizer is suspended at startup (pid %d)." |
Dmitry Vyukov | adfb650 | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 224 | " Call __tsan_resume().\n", |
| 225 | GetPid()); |
| 226 | while (__tsan_resumed == 0); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 227 | } |
| 228 | } |
| 229 | |
| 230 | int Finalize(ThreadState *thr) { |
| 231 | ScopedInRtl in_rtl; |
| 232 | Context *ctx = __tsan::ctx; |
| 233 | bool failed = false; |
| 234 | |
Dmitry Vyukov | 54e0a9a | 2012-11-07 16:41:57 +0000 | [diff] [blame] | 235 | if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) |
| 236 | SleepForMillis(flags()->atexit_sleep_ms); |
| 237 | |
Dmitry Vyukov | d0a51c0 | 2012-10-02 12:07:16 +0000 | [diff] [blame] | 238 | // Wait for pending reports. |
| 239 | ctx->report_mtx.Lock(); |
| 240 | ctx->report_mtx.Unlock(); |
| 241 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 242 | ThreadFinalize(thr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 243 | |
| 244 | if (ctx->nreported) { |
| 245 | failed = true; |
Dmitry Vyukov | b3b2123 | 2012-10-07 14:21:24 +0000 | [diff] [blame] | 246 | #ifndef TSAN_GO |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 247 | Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); |
Dmitry Vyukov | b3b2123 | 2012-10-07 14:21:24 +0000 | [diff] [blame] | 248 | #else |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 249 | Printf("Found %d data race(s)\n", ctx->nreported); |
Dmitry Vyukov | b3b2123 | 2012-10-07 14:21:24 +0000 | [diff] [blame] | 250 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 251 | } |
| 252 | |
| 253 | if (ctx->nmissed_expected) { |
| 254 | failed = true; |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 255 | Printf("ThreadSanitizer: missed %d expected races\n", |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 256 | ctx->nmissed_expected); |
| 257 | } |
| 258 | |
Dmitry Vyukov | 08adb18 | 2012-11-13 13:53:43 +0000 | [diff] [blame] | 259 | StatAggregate(ctx->stat, thr->stat); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 260 | StatOutput(ctx->stat); |
Dmitry Vyukov | b7b6b1c | 2012-05-17 15:00:27 +0000 | [diff] [blame] | 261 | return failed ? flags()->exitcode : 0; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 262 | } |
| 263 | |
Dmitry Vyukov | 0ab628c | 2012-09-06 15:18:14 +0000 | [diff] [blame] | 264 | #ifndef TSAN_GO |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 265 | u32 CurrentStackId(ThreadState *thr, uptr pc) { |
| 266 | if (thr->shadow_stack_pos == 0) // May happen during bootstrap. |
| 267 | return 0; |
| 268 | if (pc) { |
| 269 | thr->shadow_stack_pos[0] = pc; |
| 270 | thr->shadow_stack_pos++; |
| 271 | } |
| 272 | u32 id = StackDepotPut(thr->shadow_stack, |
| 273 | thr->shadow_stack_pos - thr->shadow_stack); |
| 274 | if (pc) |
| 275 | thr->shadow_stack_pos--; |
| 276 | return id; |
| 277 | } |
Dmitry Vyukov | 0ab628c | 2012-09-06 15:18:14 +0000 | [diff] [blame] | 278 | #endif |
Dmitry Vyukov | 8485311 | 2012-08-31 17:27:49 +0000 | [diff] [blame] | 279 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 280 | void TraceSwitch(ThreadState *thr) { |
Dmitry Vyukov | 9ad7c32 | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 281 | thr->nomalloc++; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 282 | ScopedInRtl in_rtl; |
| 283 | Lock l(&thr->trace.mtx); |
| 284 | unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts; |
| 285 | TraceHeader *hdr = &thr->trace.headers[trace]; |
| 286 | hdr->epoch0 = thr->fast_state.epoch(); |
| 287 | hdr->stack0.ObtainCurrent(thr, 0); |
Dmitry Vyukov | 9ad7c32 | 2012-06-22 11:08:55 +0000 | [diff] [blame] | 288 | thr->nomalloc--; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 289 | } |
| 290 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 291 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 292 | extern "C" void __tsan_trace_switch() { |
| 293 | TraceSwitch(cur_thread()); |
| 294 | } |
| 295 | |
| 296 | extern "C" void __tsan_report_race() { |
| 297 | ReportRace(cur_thread()); |
| 298 | } |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 299 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 300 | |
| 301 | ALWAYS_INLINE |
| 302 | static Shadow LoadShadow(u64 *p) { |
| 303 | u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); |
| 304 | return Shadow(raw); |
| 305 | } |
| 306 | |
| 307 | ALWAYS_INLINE |
| 308 | static void StoreShadow(u64 *sp, u64 s) { |
| 309 | atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); |
| 310 | } |
| 311 | |
| 312 | ALWAYS_INLINE |
| 313 | static void StoreIfNotYetStored(u64 *sp, u64 *s) { |
| 314 | StoreShadow(sp, *s); |
| 315 | *s = 0; |
| 316 | } |
| 317 | |
| 318 | static inline void HandleRace(ThreadState *thr, u64 *shadow_mem, |
| 319 | Shadow cur, Shadow old) { |
| 320 | thr->racy_state[0] = cur.raw(); |
| 321 | thr->racy_state[1] = old.raw(); |
| 322 | thr->racy_shadow_addr = shadow_mem; |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 323 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 324 | HACKY_CALL(__tsan_report_race); |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 325 | #else |
| 326 | ReportRace(thr); |
| 327 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 328 | } |
| 329 | |
| 330 | static inline bool BothReads(Shadow s, int kAccessIsWrite) { |
| 331 | return !kAccessIsWrite && !s.is_write(); |
| 332 | } |
| 333 | |
Dmitry Vyukov | ed77156 | 2012-11-13 14:05:58 +0000 | [diff] [blame] | 334 | static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 335 | return old.is_write() || !kAccessIsWrite; |
| 336 | } |
| 337 | |
Dmitry Vyukov | ed77156 | 2012-11-13 14:05:58 +0000 | [diff] [blame] | 338 | static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) { |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 339 | return !old.is_write() || kAccessIsWrite; |
| 340 | } |
| 341 | |
| 342 | static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { |
| 343 | return old.epoch() >= thr->fast_synch_epoch; |
| 344 | } |
| 345 | |
| 346 | static inline bool HappensBefore(Shadow old, ThreadState *thr) { |
| 347 | return thr->clock.get(old.tid()) >= old.epoch(); |
| 348 | } |
| 349 | |
| 350 | ALWAYS_INLINE |
| 351 | void MemoryAccessImpl(ThreadState *thr, uptr addr, |
Dmitry Vyukov | bd9f496 | 2012-11-15 18:49:08 +0000 | [diff] [blame] | 352 | int kAccessSizeLog, bool kAccessIsWrite, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 353 | u64 *shadow_mem, Shadow cur) { |
| 354 | StatInc(thr, StatMop); |
| 355 | StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); |
| 356 | StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); |
| 357 | |
| 358 | // This potentially can live in an MMX/SSE scratch register. |
| 359 | // The required intrinsics are: |
| 360 | // __m128i _mm_move_epi64(__m128i*); |
| 361 | // _mm_storel_epi64(u64*, __m128i); |
| 362 | u64 store_word = cur.raw(); |
| 363 | |
| 364 | // scan all the shadow values and dispatch to 4 categories: |
| 365 | // same, replace, candidate and race (see comments below). |
| 366 | // we consider only 3 cases regarding access sizes: |
| 367 | // equal, intersect and not intersect. initially I considered |
| 368 | // larger and smaller as well, it allowed to replace some |
| 369 | // 'candidates' with 'same' or 'replace', but I think |
| 370 | // it's just not worth it (performance- and complexity-wise). |
| 371 | |
| 372 | Shadow old(0); |
| 373 | if (kShadowCnt == 1) { |
| 374 | int idx = 0; |
| 375 | #include "tsan_update_shadow_word_inl.h" |
| 376 | } else if (kShadowCnt == 2) { |
| 377 | int idx = 0; |
| 378 | #include "tsan_update_shadow_word_inl.h" |
| 379 | idx = 1; |
| 380 | #include "tsan_update_shadow_word_inl.h" |
| 381 | } else if (kShadowCnt == 4) { |
| 382 | int idx = 0; |
| 383 | #include "tsan_update_shadow_word_inl.h" |
| 384 | idx = 1; |
| 385 | #include "tsan_update_shadow_word_inl.h" |
| 386 | idx = 2; |
| 387 | #include "tsan_update_shadow_word_inl.h" |
| 388 | idx = 3; |
| 389 | #include "tsan_update_shadow_word_inl.h" |
| 390 | } else if (kShadowCnt == 8) { |
| 391 | int idx = 0; |
| 392 | #include "tsan_update_shadow_word_inl.h" |
| 393 | idx = 1; |
| 394 | #include "tsan_update_shadow_word_inl.h" |
| 395 | idx = 2; |
| 396 | #include "tsan_update_shadow_word_inl.h" |
| 397 | idx = 3; |
| 398 | #include "tsan_update_shadow_word_inl.h" |
| 399 | idx = 4; |
| 400 | #include "tsan_update_shadow_word_inl.h" |
| 401 | idx = 5; |
| 402 | #include "tsan_update_shadow_word_inl.h" |
| 403 | idx = 6; |
| 404 | #include "tsan_update_shadow_word_inl.h" |
| 405 | idx = 7; |
| 406 | #include "tsan_update_shadow_word_inl.h" |
| 407 | } else { |
| 408 | CHECK(false); |
| 409 | } |
| 410 | |
| 411 | // we did not find any races and had already stored |
| 412 | // the current access info, so we are done |
| 413 | if (LIKELY(store_word == 0)) |
| 414 | return; |
| 415 | // choose a random candidate slot and replace it |
| 416 | StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); |
| 417 | StatInc(thr, StatShadowReplace); |
| 418 | return; |
| 419 | RACE: |
| 420 | HandleRace(thr, shadow_mem, cur, old); |
| 421 | return; |
| 422 | } |
| 423 | |
| 424 | ALWAYS_INLINE |
| 425 | void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, |
| 426 | int kAccessSizeLog, bool kAccessIsWrite) { |
| 427 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 428 | DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d" |
Alexey Samsonov | e954101 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 429 | " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 430 | (int)thr->fast_state.tid(), (void*)pc, (void*)addr, |
| 431 | (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, |
Alexey Samsonov | e954101 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 432 | (uptr)shadow_mem[0], (uptr)shadow_mem[1], |
| 433 | (uptr)shadow_mem[2], (uptr)shadow_mem[3]); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 434 | #if TSAN_DEBUG |
| 435 | if (!IsAppMem(addr)) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 436 | Printf("Access to non app mem %zx\n", addr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 437 | DCHECK(IsAppMem(addr)); |
| 438 | } |
| 439 | if (!IsShadowMem((uptr)shadow_mem)) { |
Alexey Samsonov | b1fe302 | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 440 | Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 441 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 442 | } |
| 443 | #endif |
| 444 | |
| 445 | FastState fast_state = thr->fast_state; |
| 446 | if (fast_state.GetIgnoreBit()) |
| 447 | return; |
| 448 | fast_state.IncrementEpoch(); |
| 449 | thr->fast_state = fast_state; |
| 450 | Shadow cur(fast_state); |
| 451 | cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); |
| 452 | cur.SetWrite(kAccessIsWrite); |
| 453 | |
| 454 | // We must not store to the trace if we do not store to the shadow. |
| 455 | // That is, this call must be moved somewhere below. |
| 456 | TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); |
| 457 | |
Dmitry Vyukov | bd9f496 | 2012-11-15 18:49:08 +0000 | [diff] [blame] | 458 | MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 459 | shadow_mem, cur); |
| 460 | } |
| 461 | |
| 462 | static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, |
| 463 | u64 val) { |
| 464 | if (size == 0) |
| 465 | return; |
| 466 | // FIXME: fix me. |
| 467 | uptr offset = addr % kShadowCell; |
| 468 | if (offset) { |
| 469 | offset = kShadowCell - offset; |
| 470 | if (size <= offset) |
| 471 | return; |
| 472 | addr += offset; |
| 473 | size -= offset; |
| 474 | } |
Dmitry Vyukov | aaac6e2 | 2012-09-02 12:04:51 +0000 | [diff] [blame] | 475 | DCHECK_EQ(addr % 8, 0); |
| 476 | // If a user passes some insane arguments (memset(0)), |
| 477 | // let it just crash as usual. |
| 478 | if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) |
| 479 | return; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 480 | (void)thr; |
| 481 | (void)pc; |
| 482 | // Some programs mmap like hundreds of GBs but actually used a small part. |
| 483 | // So, it's better to report a false positive on the memory |
| 484 | // then to hang here senselessly. |
Dmitry Vyukov | 9c6c5a2 | 2012-09-17 03:18:45 +0000 | [diff] [blame] | 485 | const uptr kMaxResetSize = 4ull*1024*1024*1024; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 486 | if (size > kMaxResetSize) |
| 487 | size = kMaxResetSize; |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 488 | size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 489 | u64 *p = (u64*)MemToShadow(addr); |
| 490 | CHECK(IsShadowMem((uptr)p)); |
| 491 | CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); |
| 492 | // FIXME: may overwrite a part outside the region |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 493 | for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) { |
| 494 | p[i++] = val; |
| 495 | for (uptr j = 1; j < kShadowCnt; j++) |
| 496 | p[i++] = 0; |
| 497 | } |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
| 501 | MemoryRangeSet(thr, pc, addr, size, 0); |
| 502 | } |
| 503 | |
| 504 | void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
| 505 | MemoryAccessRange(thr, pc, addr, size, true); |
Dmitry Vyukov | 069ce82 | 2012-05-17 14:17:51 +0000 | [diff] [blame] | 506 | Shadow s(thr->fast_state); |
| 507 | s.MarkAsFreed(); |
| 508 | s.SetWrite(true); |
| 509 | s.SetAddr0AndSizeLog(0, 3); |
| 510 | MemoryRangeSet(thr, pc, addr, size, s.raw()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 511 | } |
| 512 | |
Dmitry Vyukov | 26af893 | 2012-08-15 16:52:19 +0000 | [diff] [blame] | 513 | void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { |
| 514 | Shadow s(thr->fast_state); |
| 515 | s.SetWrite(true); |
| 516 | s.SetAddr0AndSizeLog(0, 3); |
| 517 | MemoryRangeSet(thr, pc, addr, size, s.raw()); |
| 518 | } |
| 519 | |
Dmitry Vyukov | 6fa4cc3 | 2012-11-23 07:14:11 +0000 | [diff] [blame^] | 520 | ALWAYS_INLINE |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 521 | void FuncEntry(ThreadState *thr, uptr pc) { |
| 522 | DCHECK_EQ(thr->in_rtl, 0); |
| 523 | StatInc(thr, StatFuncEnter); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 524 | DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 525 | thr->fast_state.IncrementEpoch(); |
| 526 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc); |
| 527 | |
| 528 | // Shadow stack maintenance can be replaced with |
| 529 | // stack unwinding during trace switch (which presumably must be faster). |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 530 | DCHECK_GE(thr->shadow_stack_pos, &thr->shadow_stack[0]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 531 | #ifndef TSAN_GO |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 532 | DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 533 | #else |
| 534 | if (thr->shadow_stack_pos == thr->shadow_stack_end) { |
| 535 | const int sz = thr->shadow_stack_end - thr->shadow_stack; |
| 536 | const int newsz = 2 * sz; |
| 537 | uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, |
| 538 | newsz * sizeof(uptr)); |
| 539 | internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); |
| 540 | internal_free(thr->shadow_stack); |
| 541 | thr->shadow_stack = newstack; |
| 542 | thr->shadow_stack_pos = newstack + sz; |
| 543 | thr->shadow_stack_end = newstack + newsz; |
| 544 | } |
| 545 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 546 | thr->shadow_stack_pos[0] = pc; |
| 547 | thr->shadow_stack_pos++; |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 548 | } |
| 549 | |
Dmitry Vyukov | 6fa4cc3 | 2012-11-23 07:14:11 +0000 | [diff] [blame^] | 550 | ALWAYS_INLINE |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 551 | void FuncExit(ThreadState *thr) { |
| 552 | DCHECK_EQ(thr->in_rtl, 0); |
| 553 | StatInc(thr, StatFuncExit); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 554 | DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 555 | thr->fast_state.IncrementEpoch(); |
| 556 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0); |
| 557 | |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 558 | DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 559 | #ifndef TSAN_GO |
Dmitry Vyukov | 769544e | 2012-05-28 07:45:35 +0000 | [diff] [blame] | 560 | DCHECK_LT(thr->shadow_stack_pos, &thr->shadow_stack[kShadowStackSize]); |
Dmitry Vyukov | 25d1c79 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 561 | #endif |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 562 | thr->shadow_stack_pos--; |
| 563 | } |
| 564 | |
| 565 | void IgnoreCtl(ThreadState *thr, bool write, bool begin) { |
| 566 | DPrintf("#%d: IgnoreCtl(%d, %d)\n", thr->tid, write, begin); |
| 567 | thr->ignore_reads_and_writes += begin ? 1 : -1; |
| 568 | CHECK_GE(thr->ignore_reads_and_writes, 0); |
| 569 | if (thr->ignore_reads_and_writes) |
| 570 | thr->fast_state.SetIgnoreBit(); |
| 571 | else |
| 572 | thr->fast_state.ClearIgnoreBit(); |
| 573 | } |
| 574 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 575 | bool MD5Hash::operator==(const MD5Hash &other) const { |
| 576 | return hash[0] == other.hash[0] && hash[1] == other.hash[1]; |
| 577 | } |
| 578 | |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 579 | #if TSAN_DEBUG |
| 580 | void build_consistency_debug() {} |
| 581 | #else |
| 582 | void build_consistency_release() {} |
| 583 | #endif |
| 584 | |
| 585 | #if TSAN_COLLECT_STATS |
| 586 | void build_consistency_stats() {} |
| 587 | #else |
| 588 | void build_consistency_nostats() {} |
| 589 | #endif |
| 590 | |
| 591 | #if TSAN_SHADOW_COUNT == 1 |
| 592 | void build_consistency_shadow1() {} |
| 593 | #elif TSAN_SHADOW_COUNT == 2 |
| 594 | void build_consistency_shadow2() {} |
| 595 | #elif TSAN_SHADOW_COUNT == 4 |
| 596 | void build_consistency_shadow4() {} |
| 597 | #else |
| 598 | void build_consistency_shadow8() {} |
| 599 | #endif |
| 600 | |
| 601 | } // namespace __tsan |
| 602 | |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 603 | #ifndef TSAN_GO |
Kostya Serebryany | 7ac4148 | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 604 | // Must be included in this file to make sure everything is inlined. |
| 605 | #include "tsan_interface_inl.h" |
Dmitry Vyukov | b78caa6 | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 606 | #endif |