Alexey Samsonov | 3b2f9f4 | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl_thread.cc ------------------------------------------------===// |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Alexey Samsonov | 8bd9098 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 14 | #include "sanitizer_common/sanitizer_placement_new.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 15 | #include "tsan_rtl.h" |
| 16 | #include "tsan_mman.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 17 | #include "tsan_platform.h" |
| 18 | #include "tsan_report.h" |
| 19 | #include "tsan_sync.h" |
| 20 | |
| 21 | namespace __tsan { |
| 22 | |
Dmitry Vyukov | dc36d69 | 2012-07-16 16:03:16 +0000 | [diff] [blame] | 23 | #ifndef TSAN_GO |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 24 | const int kThreadQuarantineSize = 16; |
Dmitry Vyukov | dc36d69 | 2012-07-16 16:03:16 +0000 | [diff] [blame] | 25 | #else |
| 26 | const int kThreadQuarantineSize = 64; |
| 27 | #endif |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 28 | |
| 29 | static void MaybeReportThreadLeak(ThreadContext *tctx) { |
| 30 | if (tctx->detached) |
| 31 | return; |
| 32 | if (tctx->status != ThreadStatusCreated |
| 33 | && tctx->status != ThreadStatusRunning |
| 34 | && tctx->status != ThreadStatusFinished) |
| 35 | return; |
| 36 | ScopedReport rep(ReportTypeThreadLeak); |
| 37 | rep.AddThread(tctx); |
Dmitry Vyukov | 90c9cbf | 2012-10-05 15:51:32 +0000 | [diff] [blame] | 38 | OutputReport(CTX(), rep); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 39 | } |
| 40 | |
| 41 | void ThreadFinalize(ThreadState *thr) { |
| 42 | CHECK_GT(thr->in_rtl, 0); |
| 43 | if (!flags()->report_thread_leaks) |
| 44 | return; |
| 45 | Context *ctx = CTX(); |
| 46 | Lock l(&ctx->thread_mtx); |
Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 47 | for (unsigned i = 0; i < kMaxTid; i++) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 48 | ThreadContext *tctx = ctx->threads[i]; |
| 49 | if (tctx == 0) |
| 50 | continue; |
| 51 | MaybeReportThreadLeak(tctx); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 52 | } |
| 53 | } |
| 54 | |
Dmitry Vyukov | 67dc570 | 2012-11-07 16:41:57 +0000 | [diff] [blame] | 55 | int ThreadCount(ThreadState *thr) { |
| 56 | CHECK_GT(thr->in_rtl, 0); |
| 57 | Context *ctx = CTX(); |
| 58 | Lock l(&ctx->thread_mtx); |
| 59 | int cnt = 0; |
| 60 | for (unsigned i = 0; i < kMaxTid; i++) { |
| 61 | ThreadContext *tctx = ctx->threads[i]; |
| 62 | if (tctx == 0) |
| 63 | continue; |
| 64 | if (tctx->status != ThreadStatusCreated |
| 65 | && tctx->status != ThreadStatusRunning) |
| 66 | continue; |
| 67 | cnt++; |
| 68 | } |
| 69 | return cnt; |
| 70 | } |
| 71 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 72 | static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { |
| 73 | Context *ctx = CTX(); |
| 74 | CHECK_GT(thr->in_rtl, 0); |
| 75 | CHECK(tctx->status == ThreadStatusRunning |
| 76 | || tctx->status == ThreadStatusFinished); |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 77 | DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 78 | tctx->status = ThreadStatusDead; |
| 79 | tctx->user_id = 0; |
| 80 | tctx->sync.Reset(); |
| 81 | |
| 82 | // Put to dead list. |
| 83 | tctx->dead_next = 0; |
| 84 | if (ctx->dead_list_size == 0) |
| 85 | ctx->dead_list_head = tctx; |
| 86 | else |
| 87 | ctx->dead_list_tail->dead_next = tctx; |
| 88 | ctx->dead_list_tail = tctx; |
| 89 | ctx->dead_list_size++; |
| 90 | } |
| 91 | |
| 92 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { |
| 93 | CHECK_GT(thr->in_rtl, 0); |
| 94 | Context *ctx = CTX(); |
| 95 | Lock l(&ctx->thread_mtx); |
| 96 | StatInc(thr, StatThreadCreate); |
| 97 | int tid = -1; |
| 98 | ThreadContext *tctx = 0; |
| 99 | if (ctx->dead_list_size > kThreadQuarantineSize |
| 100 | || ctx->thread_seq >= kMaxTid) { |
Dmitry Vyukov | 1b46993 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 101 | // Reusing old thread descriptor and tid. |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 102 | if (ctx->dead_list_size == 0) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 103 | Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", |
Alexey Samsonov | ac4c290 | 2012-06-06 10:13:27 +0000 | [diff] [blame] | 104 | kMaxTid); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 105 | Die(); |
| 106 | } |
| 107 | StatInc(thr, StatThreadReuse); |
| 108 | tctx = ctx->dead_list_head; |
| 109 | ctx->dead_list_head = tctx->dead_next; |
| 110 | ctx->dead_list_size--; |
| 111 | if (ctx->dead_list_size == 0) { |
| 112 | CHECK_EQ(tctx->dead_next, 0); |
| 113 | ctx->dead_list_head = 0; |
| 114 | } |
| 115 | CHECK_EQ(tctx->status, ThreadStatusDead); |
| 116 | tctx->status = ThreadStatusInvalid; |
| 117 | tctx->reuse_count++; |
Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 118 | tctx->sync.Reset(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 119 | tid = tctx->tid; |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 120 | DestroyAndFree(tctx->dead_info); |
Dmitry Vyukov | 1b46993 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 121 | if (tctx->name) { |
| 122 | internal_free(tctx->name); |
| 123 | tctx->name = 0; |
| 124 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 125 | } else { |
Dmitry Vyukov | 1b46993 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 126 | // Allocating new thread descriptor and tid. |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 127 | StatInc(thr, StatThreadMaxTid); |
| 128 | tid = ctx->thread_seq++; |
| 129 | void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); |
| 130 | tctx = new(mem) ThreadContext(tid); |
| 131 | ctx->threads[tid] = tctx; |
Dmitry Vyukov | e1a7f33 | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 132 | MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 133 | } |
| 134 | CHECK_NE(tctx, 0); |
| 135 | CHECK_GE(tid, 0); |
| 136 | CHECK_LT(tid, kMaxTid); |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 137 | DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 138 | CHECK_EQ(tctx->status, ThreadStatusInvalid); |
| 139 | ctx->alive_threads++; |
| 140 | if (ctx->max_alive_threads < ctx->alive_threads) { |
| 141 | ctx->max_alive_threads++; |
| 142 | CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads); |
| 143 | StatInc(thr, StatThreadMaxAlive); |
| 144 | } |
| 145 | tctx->status = ThreadStatusCreated; |
| 146 | tctx->thr = 0; |
| 147 | tctx->user_id = uid; |
| 148 | tctx->unique_id = ctx->unique_thread_seq++; |
| 149 | tctx->detached = detached; |
| 150 | if (tid) { |
| 151 | thr->fast_state.IncrementEpoch(); |
| 152 | // Can't increment epoch w/o writing to the trace as well. |
Dmitry Vyukov | 2429b02 | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 153 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 154 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 155 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 156 | thr->clock.release(&tctx->sync); |
| 157 | StatInc(thr, StatSyncRelease); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 158 | tctx->creation_stack.ObtainCurrent(thr, pc); |
Dmitry Vyukov | 09b0dbf | 2012-12-17 16:28:15 +0000 | [diff] [blame] | 159 | tctx->creation_tid = thr->tid; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 160 | } |
| 161 | return tid; |
| 162 | } |
| 163 | |
Dmitry Vyukov | 56faa55 | 2012-10-02 12:58:14 +0000 | [diff] [blame] | 164 | void ThreadStart(ThreadState *thr, int tid, uptr os_id) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 165 | CHECK_GT(thr->in_rtl, 0); |
| 166 | uptr stk_addr = 0; |
| 167 | uptr stk_size = 0; |
| 168 | uptr tls_addr = 0; |
| 169 | uptr tls_size = 0; |
Dmitry Vyukov | 7339eb1 | 2012-05-25 11:15:04 +0000 | [diff] [blame] | 170 | GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 171 | |
Dmitry Vyukov | 2d4e3c1 | 2012-05-28 07:44:34 +0000 | [diff] [blame] | 172 | if (tid) { |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 173 | if (stk_addr && stk_size) { |
| 174 | MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size); |
| 175 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 176 | |
Dmitry Vyukov | 03d32ec | 2012-07-05 16:18:28 +0000 | [diff] [blame] | 177 | if (tls_addr && tls_size) { |
| 178 | // Check that the thr object is in tls; |
| 179 | const uptr thr_beg = (uptr)thr; |
| 180 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 181 | CHECK_GE(thr_beg, tls_addr); |
| 182 | CHECK_LE(thr_beg, tls_addr + tls_size); |
| 183 | CHECK_GE(thr_end, tls_addr); |
| 184 | CHECK_LE(thr_end, tls_addr + tls_size); |
| 185 | // Since the thr object is huge, skip it. |
| 186 | MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); |
| 187 | MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end); |
| 188 | } |
Dmitry Vyukov | 2d4e3c1 | 2012-05-28 07:44:34 +0000 | [diff] [blame] | 189 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 190 | |
| 191 | Lock l(&CTX()->thread_mtx); |
| 192 | ThreadContext *tctx = CTX()->threads[tid]; |
| 193 | CHECK_NE(tctx, 0); |
| 194 | CHECK_EQ(tctx->status, ThreadStatusCreated); |
| 195 | tctx->status = ThreadStatusRunning; |
Dmitry Vyukov | 27d5b37 | 2012-10-02 11:52:05 +0000 | [diff] [blame] | 196 | tctx->os_id = os_id; |
Dmitry Vyukov | 55b47ca | 2012-12-04 12:19:53 +0000 | [diff] [blame] | 197 | // RoundUp so that one trace part does not contain events |
| 198 | // from different threads. |
| 199 | tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 200 | tctx->epoch1 = (u64)-1; |
Dmitry Vyukov | 191f2f7 | 2012-08-30 13:02:30 +0000 | [diff] [blame] | 201 | new(thr) ThreadState(CTX(), tid, tctx->unique_id, |
| 202 | tctx->epoch0, stk_addr, stk_size, |
| 203 | tls_addr, tls_size); |
Dmitry Vyukov | 5bfac97 | 2012-07-16 16:44:47 +0000 | [diff] [blame] | 204 | #ifdef TSAN_GO |
| 205 | // Setup dynamic shadow stack. |
| 206 | const int kInitStackSize = 8; |
| 207 | thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, |
| 208 | kInitStackSize * sizeof(uptr)); |
| 209 | thr->shadow_stack_pos = thr->shadow_stack; |
| 210 | thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; |
| 211 | #endif |
Dmitry Vyukov | 6e406cd | 2013-01-24 09:08:03 +0000 | [diff] [blame] | 212 | #ifndef TSAN_GO |
| 213 | AllocatorThreadStart(thr); |
| 214 | #endif |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 215 | tctx->thr = thr; |
| 216 | thr->fast_synch_epoch = tctx->epoch0; |
| 217 | thr->clock.set(tid, tctx->epoch0); |
| 218 | thr->clock.acquire(&tctx->sync); |
Dmitry Vyukov | e1a7f33 | 2012-11-28 12:19:50 +0000 | [diff] [blame] | 219 | thr->fast_state.SetHistorySize(flags()->history_size); |
Dmitry Vyukov | 55b47ca | 2012-12-04 12:19:53 +0000 | [diff] [blame] | 220 | const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts(); |
| 221 | thr->trace.headers[trace].epoch0 = tctx->epoch0; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 222 | StatInc(thr, StatSyncAcquire); |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 223 | DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " |
| 224 | "tls_addr=%zx tls_size=%zx\n", |
| 225 | tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size); |
Dmitry Vyukov | fa985a0 | 2012-06-28 18:07:46 +0000 | [diff] [blame] | 226 | thr->is_alive = true; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | void ThreadFinish(ThreadState *thr) { |
| 230 | CHECK_GT(thr->in_rtl, 0); |
| 231 | StatInc(thr, StatThreadFinish); |
| 232 | // FIXME: Treat it as write. |
| 233 | if (thr->stk_addr && thr->stk_size) |
| 234 | MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size); |
| 235 | if (thr->tls_addr && thr->tls_size) { |
| 236 | const uptr thr_beg = (uptr)thr; |
| 237 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 238 | // Since the thr object is huge, skip it. |
| 239 | MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr); |
| 240 | MemoryResetRange(thr, /*pc=*/ 5, |
| 241 | thr_end, thr->tls_addr + thr->tls_size - thr_end); |
| 242 | } |
Dmitry Vyukov | fa985a0 | 2012-06-28 18:07:46 +0000 | [diff] [blame] | 243 | thr->is_alive = false; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 244 | Context *ctx = CTX(); |
| 245 | Lock l(&ctx->thread_mtx); |
| 246 | ThreadContext *tctx = ctx->threads[thr->tid]; |
| 247 | CHECK_NE(tctx, 0); |
| 248 | CHECK_EQ(tctx->status, ThreadStatusRunning); |
| 249 | CHECK_GT(ctx->alive_threads, 0); |
| 250 | ctx->alive_threads--; |
| 251 | if (tctx->detached) { |
| 252 | ThreadDead(thr, tctx); |
| 253 | } else { |
| 254 | thr->fast_state.IncrementEpoch(); |
| 255 | // Can't increment epoch w/o writing to the trace as well. |
Dmitry Vyukov | 2429b02 | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 256 | TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 257 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 258 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 259 | thr->clock.release(&tctx->sync); |
| 260 | StatInc(thr, StatSyncRelease); |
| 261 | tctx->status = ThreadStatusFinished; |
| 262 | } |
| 263 | |
| 264 | // Save from info about the thread. |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 265 | tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) |
| 266 | ThreadDeadInfo(); |
Dmitry Vyukov | 55b47ca | 2012-12-04 12:19:53 +0000 | [diff] [blame] | 267 | for (uptr i = 0; i < TraceParts(); i++) { |
Dmitry Vyukov | 2429b02 | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 268 | tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0; |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 269 | tctx->dead_info->trace.headers[i].stack0.CopyFrom( |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 270 | thr->trace.headers[i].stack0); |
| 271 | } |
Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 272 | tctx->epoch1 = thr->fast_state.epoch(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 273 | |
Dmitry Vyukov | 9f143c5 | 2012-08-16 19:36:45 +0000 | [diff] [blame] | 274 | #ifndef TSAN_GO |
Dmitry Vyukov | 6e406cd | 2013-01-24 09:08:03 +0000 | [diff] [blame] | 275 | AllocatorThreadFinish(thr); |
Dmitry Vyukov | 9f143c5 | 2012-08-16 19:36:45 +0000 | [diff] [blame] | 276 | #endif |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 277 | thr->~ThreadState(); |
| 278 | StatAggregate(ctx->stat, thr->stat); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 279 | tctx->thr = 0; |
| 280 | } |
| 281 | |
| 282 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { |
| 283 | CHECK_GT(thr->in_rtl, 0); |
Dmitry Vyukov | 880bb66 | 2012-05-28 17:32:50 +0000 | [diff] [blame] | 284 | Context *ctx = CTX(); |
| 285 | Lock l(&ctx->thread_mtx); |
| 286 | int res = -1; |
Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 287 | for (unsigned tid = 0; tid < kMaxTid; tid++) { |
Dmitry Vyukov | 880bb66 | 2012-05-28 17:32:50 +0000 | [diff] [blame] | 288 | ThreadContext *tctx = ctx->threads[tid]; |
| 289 | if (tctx != 0 && tctx->user_id == uid |
| 290 | && tctx->status != ThreadStatusInvalid) { |
| 291 | tctx->user_id = 0; |
| 292 | res = tid; |
| 293 | break; |
| 294 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 295 | } |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 296 | DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); |
Dmitry Vyukov | 880bb66 | 2012-05-28 17:32:50 +0000 | [diff] [blame] | 297 | return res; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | void ThreadJoin(ThreadState *thr, uptr pc, int tid) { |
| 301 | CHECK_GT(thr->in_rtl, 0); |
| 302 | CHECK_GT(tid, 0); |
| 303 | CHECK_LT(tid, kMaxTid); |
| 304 | DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); |
| 305 | Context *ctx = CTX(); |
| 306 | Lock l(&ctx->thread_mtx); |
| 307 | ThreadContext *tctx = ctx->threads[tid]; |
| 308 | if (tctx->status == ThreadStatusInvalid) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 309 | Printf("ThreadSanitizer: join of non-existent thread\n"); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 310 | return; |
| 311 | } |
Dmitry Vyukov | fd5ebcd | 2012-12-06 12:16:15 +0000 | [diff] [blame] | 312 | // FIXME(dvyukov): print message and continue (it's user error). |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 313 | CHECK_EQ(tctx->detached, false); |
| 314 | CHECK_EQ(tctx->status, ThreadStatusFinished); |
| 315 | thr->clock.acquire(&tctx->sync); |
| 316 | StatInc(thr, StatSyncAcquire); |
| 317 | ThreadDead(thr, tctx); |
| 318 | } |
| 319 | |
| 320 | void ThreadDetach(ThreadState *thr, uptr pc, int tid) { |
| 321 | CHECK_GT(thr->in_rtl, 0); |
| 322 | CHECK_GT(tid, 0); |
| 323 | CHECK_LT(tid, kMaxTid); |
| 324 | Context *ctx = CTX(); |
| 325 | Lock l(&ctx->thread_mtx); |
| 326 | ThreadContext *tctx = ctx->threads[tid]; |
| 327 | if (tctx->status == ThreadStatusInvalid) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 328 | Printf("ThreadSanitizer: detach of non-existent thread\n"); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 329 | return; |
| 330 | } |
| 331 | if (tctx->status == ThreadStatusFinished) { |
| 332 | ThreadDead(thr, tctx); |
| 333 | } else { |
| 334 | tctx->detached = true; |
| 335 | } |
| 336 | } |
| 337 | |
Dmitry Vyukov | 1b46993 | 2012-12-04 15:46:05 +0000 | [diff] [blame] | 338 | void ThreadSetName(ThreadState *thr, const char *name) { |
| 339 | Context *ctx = CTX(); |
| 340 | Lock l(&ctx->thread_mtx); |
| 341 | ThreadContext *tctx = ctx->threads[thr->tid]; |
| 342 | CHECK_NE(tctx, 0); |
| 343 | CHECK_EQ(tctx->status, ThreadStatusRunning); |
| 344 | if (tctx->name) { |
| 345 | internal_free(tctx->name); |
| 346 | tctx->name = 0; |
| 347 | } |
| 348 | if (name) |
| 349 | tctx->name = internal_strdup(name); |
| 350 | } |
| 351 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 352 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
| 353 | uptr size, bool is_write) { |
| 354 | if (size == 0) |
| 355 | return; |
| 356 | |
| 357 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 358 | DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", |
| 359 | thr->tid, (void*)pc, (void*)addr, |
| 360 | (int)size, is_write); |
| 361 | |
| 362 | #if TSAN_DEBUG |
| 363 | if (!IsAppMem(addr)) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 364 | Printf("Access to non app mem %zx\n", addr); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 365 | DCHECK(IsAppMem(addr)); |
| 366 | } |
| 367 | if (!IsAppMem(addr + size - 1)) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 368 | Printf("Access to non app mem %zx\n", addr + size - 1); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 369 | DCHECK(IsAppMem(addr + size - 1)); |
| 370 | } |
| 371 | if (!IsShadowMem((uptr)shadow_mem)) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 372 | Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 373 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 374 | } |
| 375 | if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { |
Alexey Samsonov | ad9d65f | 2012-11-02 12:17:51 +0000 | [diff] [blame] | 376 | Printf("Bad shadow addr %p (%zx)\n", |
Alexey Samsonov | 51ae983 | 2012-06-06 13:11:29 +0000 | [diff] [blame] | 377 | shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 378 | DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); |
| 379 | } |
| 380 | #endif |
| 381 | |
| 382 | StatInc(thr, StatMopRange); |
| 383 | |
| 384 | FastState fast_state = thr->fast_state; |
| 385 | if (fast_state.GetIgnoreBit()) |
| 386 | return; |
| 387 | |
| 388 | fast_state.IncrementEpoch(); |
| 389 | thr->fast_state = fast_state; |
Dmitry Vyukov | 2429b02 | 2012-11-28 10:35:31 +0000 | [diff] [blame] | 390 | TraceAddEvent(thr, fast_state, EventTypeMop, pc); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 391 | |
| 392 | bool unaligned = (addr % kShadowCell) != 0; |
| 393 | |
| 394 | // Handle unaligned beginning, if any. |
| 395 | for (; addr % kShadowCell && size; addr++, size--) { |
| 396 | int const kAccessSizeLog = 0; |
| 397 | Shadow cur(fast_state); |
| 398 | cur.SetWrite(is_write); |
| 399 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
Dmitry Vyukov | ba42914 | 2013-02-01 09:42:06 +0000 | [diff] [blame^] | 400 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 401 | shadow_mem, cur); |
| 402 | } |
| 403 | if (unaligned) |
| 404 | shadow_mem += kShadowCnt; |
| 405 | // Handle middle part, if any. |
| 406 | for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { |
| 407 | int const kAccessSizeLog = 3; |
| 408 | Shadow cur(fast_state); |
| 409 | cur.SetWrite(is_write); |
| 410 | cur.SetAddr0AndSizeLog(0, kAccessSizeLog); |
Dmitry Vyukov | ba42914 | 2013-02-01 09:42:06 +0000 | [diff] [blame^] | 411 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 412 | shadow_mem, cur); |
| 413 | shadow_mem += kShadowCnt; |
| 414 | } |
| 415 | // Handle ending, if any. |
| 416 | for (; size; addr++, size--) { |
| 417 | int const kAccessSizeLog = 0; |
| 418 | Shadow cur(fast_state); |
| 419 | cur.SetWrite(is_write); |
| 420 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
Dmitry Vyukov | ba42914 | 2013-02-01 09:42:06 +0000 | [diff] [blame^] | 421 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 422 | shadow_mem, cur); |
| 423 | } |
| 424 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 425 | } // namespace __tsan |