Alexey Samsonov | 3b2f9f4 | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl_thread.cc ------------------------------------------------===// |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Alexey Samsonov | 8bd9098 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 14 | #include "sanitizer_common/sanitizer_placement_new.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 15 | #include "tsan_rtl.h" |
| 16 | #include "tsan_mman.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 17 | #include "tsan_platform.h" |
| 18 | #include "tsan_report.h" |
| 19 | #include "tsan_sync.h" |
| 20 | |
| 21 | namespace __tsan { |
| 22 | |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 23 | // ThreadContext implementation. |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 24 | |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 25 | ThreadContext::ThreadContext(int tid) |
| 26 | : ThreadContextBase(tid) |
| 27 | , thr() |
| 28 | , sync() |
| 29 | , epoch0() |
Dmitry Vyukov | 79915de | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 30 | , epoch1() { |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 31 | } |
| 32 | |
Dmitry Vyukov | 49e462f | 2013-03-18 10:10:15 +0000 | [diff] [blame] | 33 | #ifndef TSAN_GO |
| 34 | ThreadContext::~ThreadContext() { |
| 35 | } |
| 36 | #endif |
| 37 | |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 38 | void ThreadContext::OnDead() { |
| 39 | sync.Reset(); |
| 40 | } |
| 41 | |
| 42 | void ThreadContext::OnJoined(void *arg) { |
| 43 | ThreadState *caller_thr = static_cast<ThreadState *>(arg); |
| 44 | caller_thr->clock.acquire(&sync); |
| 45 | StatInc(caller_thr, StatSyncAcquire); |
Dmitry Vyukov | 509dab3 | 2013-03-19 10:22:33 +0000 | [diff] [blame] | 46 | sync.Reset(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | struct OnCreatedArgs { |
| 50 | ThreadState *thr; |
| 51 | uptr pc; |
| 52 | }; |
| 53 | |
| 54 | void ThreadContext::OnCreated(void *arg) { |
| 55 | thr = 0; |
Dmitry Vyukov | 5016003 | 2013-03-18 08:52:46 +0000 | [diff] [blame] | 56 | if (tid == 0) |
| 57 | return; |
| 58 | OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); |
| 59 | args->thr->fast_state.IncrementEpoch(); |
| 60 | // Can't increment epoch w/o writing to the trace as well. |
| 61 | TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); |
| 62 | args->thr->clock.set(args->thr->tid, args->thr->fast_state.epoch()); |
| 63 | args->thr->fast_synch_epoch = args->thr->fast_state.epoch(); |
| 64 | args->thr->clock.release(&sync); |
| 65 | StatInc(args->thr, StatSyncRelease); |
Dmitry Vyukov | 7cd2025 | 2013-03-18 09:02:27 +0000 | [diff] [blame] | 66 | #ifdef TSAN_GO |
Dmitry Vyukov | 5016003 | 2013-03-18 08:52:46 +0000 | [diff] [blame] | 67 | creation_stack.ObtainCurrent(args->thr, args->pc); |
Dmitry Vyukov | 7cd2025 | 2013-03-18 09:02:27 +0000 | [diff] [blame] | 68 | #else |
| 69 | creation_stack_id = CurrentStackId(args->thr, args->pc); |
| 70 | #endif |
Dmitry Vyukov | 5016003 | 2013-03-18 08:52:46 +0000 | [diff] [blame] | 71 | if (reuse_count == 0) |
| 72 | StatInc(args->thr, StatThreadMaxTid); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 73 | } |
| 74 | |
Dmitry Vyukov | 4ecfa69 | 2013-03-19 12:25:48 +0000 | [diff] [blame] | 75 | void ThreadContext::OnReset() { |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 76 | sync.Reset(); |
Dmitry Vyukov | 79915de | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 77 | FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event)); |
| 78 | //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace)); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | struct OnStartedArgs { |
| 82 | ThreadState *thr; |
| 83 | uptr stk_addr; |
| 84 | uptr stk_size; |
| 85 | uptr tls_addr; |
| 86 | uptr tls_size; |
| 87 | }; |
| 88 | |
| 89 | void ThreadContext::OnStarted(void *arg) { |
| 90 | OnStartedArgs *args = static_cast<OnStartedArgs*>(arg); |
Dmitry Vyukov | 5016003 | 2013-03-18 08:52:46 +0000 | [diff] [blame] | 91 | thr = args->thr; |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 92 | // RoundUp so that one trace part does not contain events |
| 93 | // from different threads. |
| 94 | epoch0 = RoundUp(epoch1 + 1, kTracePartSize); |
| 95 | epoch1 = (u64)-1; |
Dmitry Vyukov | 5016003 | 2013-03-18 08:52:46 +0000 | [diff] [blame] | 96 | new(thr) ThreadState(CTX(), tid, unique_id, |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 97 | epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); |
| 98 | #ifdef TSAN_GO |
| 99 | // Setup dynamic shadow stack. |
| 100 | const int kInitStackSize = 8; |
| 101 | args->thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, |
| 102 | kInitStackSize * sizeof(uptr)); |
| 103 | args->thr->shadow_stack_pos = thr->shadow_stack; |
| 104 | args->thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; |
| 105 | #endif |
| 106 | #ifndef TSAN_GO |
| 107 | AllocatorThreadStart(args->thr); |
| 108 | #endif |
| 109 | thr = args->thr; |
| 110 | thr->fast_synch_epoch = epoch0; |
| 111 | thr->clock.set(tid, epoch0); |
| 112 | thr->clock.acquire(&sync); |
| 113 | thr->fast_state.SetHistorySize(flags()->history_size); |
| 114 | const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); |
Dmitry Vyukov | 79915de | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 115 | Trace *thr_trace = ThreadTrace(thr->tid); |
| 116 | thr_trace->headers[trace].epoch0 = epoch0; |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 117 | StatInc(thr, StatSyncAcquire); |
Dmitry Vyukov | 79915de | 2013-03-20 10:31:53 +0000 | [diff] [blame] | 118 | sync.Reset(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 119 | DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " |
| 120 | "tls_addr=%zx tls_size=%zx\n", |
Alexey Samsonov | b5d10f6 | 2013-03-18 09:45:22 +0000 | [diff] [blame] | 121 | tid, (uptr)epoch0, args->stk_addr, args->stk_size, |
| 122 | args->tls_addr, args->tls_size); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 123 | thr->is_alive = true; |
| 124 | } |
| 125 | |
| 126 | void ThreadContext::OnFinished() { |
| 127 | if (!detached) { |
| 128 | thr->fast_state.IncrementEpoch(); |
| 129 | // Can't increment epoch w/o writing to the trace as well. |
| 130 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); |
| 131 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 132 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 133 | thr->clock.release(&sync); |
| 134 | StatInc(thr, StatSyncRelease); |
| 135 | } |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 136 | epoch1 = thr->fast_state.epoch(); |
| 137 | |
| 138 | #ifndef TSAN_GO |
| 139 | AllocatorThreadFinish(thr); |
| 140 | #endif |
| 141 | thr->~ThreadState(); |
| 142 | StatAggregate(CTX()->stat, thr->stat); |
| 143 | thr = 0; |
| 144 | } |
| 145 | |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 146 | #ifndef TSAN_GO |
| 147 | struct ThreadLeak { |
| 148 | ThreadContext *tctx; |
| 149 | int count; |
| 150 | }; |
| 151 | |
| 152 | static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) { |
| 153 | Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg; |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 154 | ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 155 | if (tctx->detached || tctx->status != ThreadStatusFinished) |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 156 | return; |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 157 | for (uptr i = 0; i < leaks.Size(); i++) { |
| 158 | if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) { |
| 159 | leaks[i].count++; |
| 160 | return; |
| 161 | } |
| 162 | } |
| 163 | ThreadLeak leak = {tctx, 1}; |
| 164 | leaks.PushBack(leak); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 165 | } |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 166 | #endif |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 167 | |
| 168 | void ThreadFinalize(ThreadState *thr) { |
| 169 | CHECK_GT(thr->in_rtl, 0); |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 170 | #ifndef TSAN_GO |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 171 | if (!flags()->report_thread_leaks) |
| 172 | return; |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 173 | ThreadRegistryLock l(CTX()->thread_registry); |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 174 | Vector<ThreadLeak> leaks(MBlockScopedBuf); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 175 | CTX()->thread_registry->RunCallbackForEachThreadLocked( |
Dmitry Vyukov | ebf63d0 | 2013-03-21 16:55:17 +0000 | [diff] [blame^] | 176 | MaybeReportThreadLeak, &leaks); |
| 177 | for (uptr i = 0; i < leaks.Size(); i++) { |
| 178 | ScopedReport rep(ReportTypeThreadLeak); |
| 179 | rep.AddThread(leaks[i].tctx); |
| 180 | rep.SetCount(leaks[i].count); |
| 181 | OutputReport(CTX(), rep); |
| 182 | } |
| 183 | #endif |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 184 | } |
| 185 | |
Dmitry Vyukov | 67dc570 | 2012-11-07 16:41:57 +0000 | [diff] [blame] | 186 | int ThreadCount(ThreadState *thr) { |
| 187 | CHECK_GT(thr->in_rtl, 0); |
| 188 | Context *ctx = CTX(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 189 | uptr result; |
| 190 | ctx->thread_registry->GetNumberOfThreads(0, 0, &result); |
| 191 | return (int)result; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { |
| 195 | CHECK_GT(thr->in_rtl, 0); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 196 | StatInc(thr, StatThreadCreate); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 197 | Context *ctx = CTX(); |
| 198 | OnCreatedArgs args = { thr, pc }; |
| 199 | int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 200 | DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 201 | StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 202 | return tid; |
| 203 | } |
| 204 | |
Dmitry Vyukov | 56faa55 | 2012-10-02 12:58:14 +0000 | [diff] [blame] | 205 | void ThreadStart(ThreadState *thr, int tid, uptr os_id) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 206 | CHECK_GT(thr->in_rtl, 0); |
| 207 | uptr stk_addr = 0; |
| 208 | uptr stk_size = 0; |
| 209 | uptr tls_addr = 0; |
| 210 | uptr tls_size = 0; |
Dmitry Vyukov | 7339eb1 | 2012-05-25 11:15:04 +0000 | [diff] [blame] | 211 | GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 212 | |
Dmitry Vyukov | 2d4e3c1 | 2012-05-28 07:44:34 +0000 | [diff] [blame] | 213 | if (tid) { |
Dmitry Vyukov | 2e7f29f | 2013-03-18 15:49:07 +0000 | [diff] [blame] | 214 | if (stk_addr && stk_size) |
Dmitry Vyukov | ce26a0a | 2013-03-18 16:56:48 +0000 | [diff] [blame] | 215 | MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 216 | |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 217 | if (tls_addr && tls_size) { |
| 218 | // Check that the thr object is in tls; |
| 219 | const uptr thr_beg = (uptr)thr; |
| 220 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 221 | CHECK_GE(thr_beg, tls_addr); |
| 222 | CHECK_LE(thr_beg, tls_addr + tls_size); |
| 223 | CHECK_GE(thr_end, tls_addr); |
| 224 | CHECK_LE(thr_end, tls_addr + tls_size); |
| 225 | // Since the thr object is huge, skip it. |
Dmitry Vyukov | ce26a0a | 2013-03-18 16:56:48 +0000 | [diff] [blame] | 226 | MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); |
| 227 | MemoryRangeImitateWrite(thr, /*pc=*/ 2, |
| 228 | thr_end, tls_addr + tls_size - thr_end); |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 229 | } |
Dmitry Vyukov | 2d4e3c1 | 2012-05-28 07:44:34 +0000 | [diff] [blame] | 230 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 231 | |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 232 | OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; |
| 233 | CTX()->thread_registry->StartThread(tid, os_id, &args); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | void ThreadFinish(ThreadState *thr) { |
| 237 | CHECK_GT(thr->in_rtl, 0); |
| 238 | StatInc(thr, StatThreadFinish); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 239 | if (thr->stk_addr && thr->stk_size) |
Dmitry Vyukov | 2e7f29f | 2013-03-18 15:49:07 +0000 | [diff] [blame] | 240 | DontNeedShadowFor(thr->stk_addr, thr->stk_size); |
| 241 | if (thr->tls_addr && thr->tls_size) |
| 242 | DontNeedShadowFor(thr->tls_addr, thr->tls_size); |
Dmitry Vyukov | fa985a0 | 2012-06-28 18:07:46 +0000 | [diff] [blame] | 243 | thr->is_alive = false; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 244 | Context *ctx = CTX(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 245 | ctx->thread_registry->FinishThread(thr->tid); |
| 246 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 247 | |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 248 | static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { |
| 249 | uptr uid = (uptr)arg; |
| 250 | if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { |
| 251 | tctx->user_id = 0; |
| 252 | return true; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 253 | } |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 254 | return false; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { |
| 258 | CHECK_GT(thr->in_rtl, 0); |
Dmitry Vyukov | 880bb66 | 2012-05-28 17:32:50 +0000 | [diff] [blame] | 259 | Context *ctx = CTX(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 260 | int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 261 | DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); |
Dmitry Vyukov | 880bb66 | 2012-05-28 17:32:50 +0000 | [diff] [blame] | 262 | return res; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | void ThreadJoin(ThreadState *thr, uptr pc, int tid) { |
| 266 | CHECK_GT(thr->in_rtl, 0); |
| 267 | CHECK_GT(tid, 0); |
| 268 | CHECK_LT(tid, kMaxTid); |
| 269 | DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); |
| 270 | Context *ctx = CTX(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 271 | ctx->thread_registry->JoinThread(tid, thr); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | void ThreadDetach(ThreadState *thr, uptr pc, int tid) { |
| 275 | CHECK_GT(thr->in_rtl, 0); |
| 276 | CHECK_GT(tid, 0); |
| 277 | CHECK_LT(tid, kMaxTid); |
| 278 | Context *ctx = CTX(); |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 279 | ctx->thread_registry->DetachThread(tid); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 280 | } |
| 281 | |
Dmitry Vyukov | 1b46993 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 282 | void ThreadSetName(ThreadState *thr, const char *name) { |
Alexey Samsonov | 9aecdfe | 2013-03-15 13:48:44 +0000 | [diff] [blame] | 283 | CHECK_GT(thr->in_rtl, 0); |
| 284 | CTX()->thread_registry->SetThreadName(thr->tid, name); |
Dmitry Vyukov | 1b46993 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 285 | } |
| 286 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 287 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
| 288 | uptr size, bool is_write) { |
| 289 | if (size == 0) |
| 290 | return; |
| 291 | |
| 292 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 293 | DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", |
| 294 | thr->tid, (void*)pc, (void*)addr, |
| 295 | (int)size, is_write); |
| 296 | |
| 297 | #if TSAN_DEBUG |
| 298 | if (!IsAppMem(addr)) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 299 | Printf("Access to non app mem %zx\n", addr); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 300 | DCHECK(IsAppMem(addr)); |
| 301 | } |
| 302 | if (!IsAppMem(addr + size - 1)) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 303 | Printf("Access to non app mem %zx\n", addr + size - 1); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 304 | DCHECK(IsAppMem(addr + size - 1)); |
| 305 | } |
| 306 | if (!IsShadowMem((uptr)shadow_mem)) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 307 | Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 308 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 309 | } |
| 310 | if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 311 | Printf("Bad shadow addr %p (%zx)\n", |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 312 | shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 313 | DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); |
| 314 | } |
| 315 | #endif |
| 316 | |
| 317 | StatInc(thr, StatMopRange); |
| 318 | |
Dmitry Vyukov | b62c158 | 2013-03-20 13:21:50 +0000 | [diff] [blame] | 319 | if (*shadow_mem == kShadowRodata) { |
| 320 | // Access to .rodata section, no races here. |
| 321 | // Measurements show that it can be 10-20% of all memory accesses. |
| 322 | StatInc(thr, StatMopRangeRodata); |
| 323 | return; |
| 324 | } |
| 325 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 326 | FastState fast_state = thr->fast_state; |
| 327 | if (fast_state.GetIgnoreBit()) |
| 328 | return; |
| 329 | |
| 330 | fast_state.IncrementEpoch(); |
| 331 | thr->fast_state = fast_state; |
Dmitry Vyukov | 2429b02 | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 332 | TraceAddEvent(thr, fast_state, EventTypeMop, pc); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 333 | |
| 334 | bool unaligned = (addr % kShadowCell) != 0; |
| 335 | |
| 336 | // Handle unaligned beginning, if any. |
| 337 | for (; addr % kShadowCell && size; addr++, size--) { |
| 338 | int const kAccessSizeLog = 0; |
| 339 | Shadow cur(fast_state); |
| 340 | cur.SetWrite(is_write); |
| 341 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
Dmitry Vyukov | ba42914 | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 342 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 343 | shadow_mem, cur); |
| 344 | } |
| 345 | if (unaligned) |
| 346 | shadow_mem += kShadowCnt; |
| 347 | // Handle middle part, if any. |
| 348 | for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { |
| 349 | int const kAccessSizeLog = 3; |
| 350 | Shadow cur(fast_state); |
| 351 | cur.SetWrite(is_write); |
| 352 | cur.SetAddr0AndSizeLog(0, kAccessSizeLog); |
Dmitry Vyukov | ba42914 | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 353 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 354 | shadow_mem, cur); |
| 355 | shadow_mem += kShadowCnt; |
| 356 | } |
| 357 | // Handle ending, if any. |
| 358 | for (; size; addr++, size--) { |
| 359 | int const kAccessSizeLog = 0; |
| 360 | Shadow cur(fast_state); |
| 361 | cur.SetWrite(is_write); |
| 362 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
Dmitry Vyukov | ba42914 | 2013-02-01 09:42:06 +0000 | [diff] [blame] | 363 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 364 | shadow_mem, cur); |
| 365 | } |
| 366 | } |
Dmitry Vyukov | 3c2489e | 2013-02-13 13:05:36 +0000 | [diff] [blame] | 367 | |
| 368 | void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, |
| 369 | uptr size, uptr step, bool is_write) { |
| 370 | if (size == 0) |
| 371 | return; |
| 372 | FastState fast_state = thr->fast_state; |
| 373 | if (fast_state.GetIgnoreBit()) |
| 374 | return; |
| 375 | StatInc(thr, StatMopRange); |
| 376 | fast_state.IncrementEpoch(); |
| 377 | thr->fast_state = fast_state; |
| 378 | TraceAddEvent(thr, fast_state, EventTypeMop, pc); |
| 379 | |
| 380 | for (uptr addr_end = addr + size; addr < addr_end; addr += step) { |
| 381 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 382 | Shadow cur(fast_state); |
| 383 | cur.SetWrite(is_write); |
| 384 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kSizeLog1); |
| 385 | MemoryAccessImpl(thr, addr, kSizeLog1, is_write, false, |
| 386 | shadow_mem, cur); |
| 387 | } |
| 388 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 389 | } // namespace __tsan |