Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl_thread.cc --------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "tsan_rtl.h" |
| 15 | #include "tsan_mman.h" |
| 16 | #include "tsan_placement_new.h" |
| 17 | #include "tsan_platform.h" |
| 18 | #include "tsan_report.h" |
| 19 | #include "tsan_sync.h" |
| 20 | |
| 21 | namespace __tsan { |
| 22 | |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 23 | const int kThreadQuarantineSize = 16; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 24 | |
| 25 | static void MaybeReportThreadLeak(ThreadContext *tctx) { |
| 26 | if (tctx->detached) |
| 27 | return; |
| 28 | if (tctx->status != ThreadStatusCreated |
| 29 | && tctx->status != ThreadStatusRunning |
| 30 | && tctx->status != ThreadStatusFinished) |
| 31 | return; |
| 32 | ScopedReport rep(ReportTypeThreadLeak); |
| 33 | rep.AddThread(tctx); |
| 34 | OutputReport(rep); |
| 35 | } |
| 36 | |
| 37 | void ThreadFinalize(ThreadState *thr) { |
| 38 | CHECK_GT(thr->in_rtl, 0); |
| 39 | if (!flags()->report_thread_leaks) |
| 40 | return; |
| 41 | Context *ctx = CTX(); |
| 42 | Lock l(&ctx->thread_mtx); |
Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 43 | for (unsigned i = 0; i < kMaxTid; i++) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 44 | ThreadContext *tctx = ctx->threads[i]; |
| 45 | if (tctx == 0) |
| 46 | continue; |
| 47 | MaybeReportThreadLeak(tctx); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 48 | } |
| 49 | } |
| 50 | |
| 51 | static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { |
| 52 | Context *ctx = CTX(); |
| 53 | CHECK_GT(thr->in_rtl, 0); |
| 54 | CHECK(tctx->status == ThreadStatusRunning |
| 55 | || tctx->status == ThreadStatusFinished); |
| 56 | DPrintf("#%d: ThreadDead uid=%lu\n", thr->tid, tctx->user_id); |
| 57 | tctx->status = ThreadStatusDead; |
| 58 | tctx->user_id = 0; |
| 59 | tctx->sync.Reset(); |
| 60 | |
| 61 | // Put to dead list. |
| 62 | tctx->dead_next = 0; |
| 63 | if (ctx->dead_list_size == 0) |
| 64 | ctx->dead_list_head = tctx; |
| 65 | else |
| 66 | ctx->dead_list_tail->dead_next = tctx; |
| 67 | ctx->dead_list_tail = tctx; |
| 68 | ctx->dead_list_size++; |
| 69 | } |
| 70 | |
| 71 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { |
| 72 | CHECK_GT(thr->in_rtl, 0); |
| 73 | Context *ctx = CTX(); |
| 74 | Lock l(&ctx->thread_mtx); |
| 75 | StatInc(thr, StatThreadCreate); |
| 76 | int tid = -1; |
| 77 | ThreadContext *tctx = 0; |
| 78 | if (ctx->dead_list_size > kThreadQuarantineSize |
| 79 | || ctx->thread_seq >= kMaxTid) { |
| 80 | if (ctx->dead_list_size == 0) { |
| 81 | Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", kMaxTid); |
| 82 | Die(); |
| 83 | } |
| 84 | StatInc(thr, StatThreadReuse); |
| 85 | tctx = ctx->dead_list_head; |
| 86 | ctx->dead_list_head = tctx->dead_next; |
| 87 | ctx->dead_list_size--; |
| 88 | if (ctx->dead_list_size == 0) { |
| 89 | CHECK_EQ(tctx->dead_next, 0); |
| 90 | ctx->dead_list_head = 0; |
| 91 | } |
| 92 | CHECK_EQ(tctx->status, ThreadStatusDead); |
| 93 | tctx->status = ThreadStatusInvalid; |
| 94 | tctx->reuse_count++; |
Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 95 | tctx->sync.Reset(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 96 | tid = tctx->tid; |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 97 | DestroyAndFree(tctx->dead_info); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 98 | } else { |
| 99 | StatInc(thr, StatThreadMaxTid); |
| 100 | tid = ctx->thread_seq++; |
| 101 | void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); |
| 102 | tctx = new(mem) ThreadContext(tid); |
| 103 | ctx->threads[tid] = tctx; |
| 104 | } |
| 105 | CHECK_NE(tctx, 0); |
| 106 | CHECK_GE(tid, 0); |
| 107 | CHECK_LT(tid, kMaxTid); |
| 108 | DPrintf("#%d: ThreadCreate tid=%d uid=%lu\n", thr->tid, tid, uid); |
| 109 | CHECK_EQ(tctx->status, ThreadStatusInvalid); |
| 110 | ctx->alive_threads++; |
| 111 | if (ctx->max_alive_threads < ctx->alive_threads) { |
| 112 | ctx->max_alive_threads++; |
| 113 | CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads); |
| 114 | StatInc(thr, StatThreadMaxAlive); |
| 115 | } |
| 116 | tctx->status = ThreadStatusCreated; |
| 117 | tctx->thr = 0; |
| 118 | tctx->user_id = uid; |
| 119 | tctx->unique_id = ctx->unique_thread_seq++; |
| 120 | tctx->detached = detached; |
| 121 | if (tid) { |
| 122 | thr->fast_state.IncrementEpoch(); |
| 123 | // Can't increment epoch w/o writing to the trace as well. |
| 124 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); |
| 125 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 126 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 127 | thr->clock.release(&tctx->sync); |
| 128 | StatInc(thr, StatSyncRelease); |
| 129 | |
| 130 | tctx->creation_stack.ObtainCurrent(thr, pc); |
| 131 | } |
| 132 | return tid; |
| 133 | } |
| 134 | |
| 135 | void ThreadStart(ThreadState *thr, int tid) { |
| 136 | CHECK_GT(thr->in_rtl, 0); |
| 137 | uptr stk_addr = 0; |
| 138 | uptr stk_size = 0; |
| 139 | uptr tls_addr = 0; |
| 140 | uptr tls_size = 0; |
Dmitry Vyukov | 7339eb1 | 2012-05-25 11:15:04 +0000 | [diff] [blame^] | 141 | GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 142 | |
| 143 | MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size); |
| 144 | |
| 145 | // Check that the thr object is in tls; |
| 146 | const uptr thr_beg = (uptr)thr; |
| 147 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 148 | CHECK_GE(thr_beg, tls_addr); |
| 149 | CHECK_LE(thr_beg, tls_addr + tls_size); |
| 150 | CHECK_GE(thr_end, tls_addr); |
| 151 | CHECK_LE(thr_end, tls_addr + tls_size); |
| 152 | // Since the thr object is huge, skip it. |
| 153 | MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); |
| 154 | MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end); |
| 155 | |
| 156 | Lock l(&CTX()->thread_mtx); |
| 157 | ThreadContext *tctx = CTX()->threads[tid]; |
| 158 | CHECK_NE(tctx, 0); |
| 159 | CHECK_EQ(tctx->status, ThreadStatusCreated); |
| 160 | tctx->status = ThreadStatusRunning; |
| 161 | tctx->epoch0 = tctx->epoch1 + 1; |
| 162 | tctx->epoch1 = (u64)-1; |
| 163 | new(thr) ThreadState(CTX(), tid, tctx->epoch0, stk_addr, stk_size, |
| 164 | tls_addr, tls_size); |
| 165 | tctx->thr = thr; |
| 166 | thr->fast_synch_epoch = tctx->epoch0; |
| 167 | thr->clock.set(tid, tctx->epoch0); |
| 168 | thr->clock.acquire(&tctx->sync); |
| 169 | StatInc(thr, StatSyncAcquire); |
| 170 | DPrintf("#%d: ThreadStart epoch=%llu stk_addr=%lx stk_size=%lx " |
| 171 | "tls_addr=%lx tls_size=%lx\n", |
| 172 | tid, tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size); |
| 173 | } |
| 174 | |
| 175 | void ThreadFinish(ThreadState *thr) { |
| 176 | CHECK_GT(thr->in_rtl, 0); |
| 177 | StatInc(thr, StatThreadFinish); |
| 178 | // FIXME: Treat it as write. |
| 179 | if (thr->stk_addr && thr->stk_size) |
| 180 | MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size); |
| 181 | if (thr->tls_addr && thr->tls_size) { |
| 182 | const uptr thr_beg = (uptr)thr; |
| 183 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 184 | // Since the thr object is huge, skip it. |
| 185 | MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr); |
| 186 | MemoryResetRange(thr, /*pc=*/ 5, |
| 187 | thr_end, thr->tls_addr + thr->tls_size - thr_end); |
| 188 | } |
| 189 | Context *ctx = CTX(); |
| 190 | Lock l(&ctx->thread_mtx); |
| 191 | ThreadContext *tctx = ctx->threads[thr->tid]; |
| 192 | CHECK_NE(tctx, 0); |
| 193 | CHECK_EQ(tctx->status, ThreadStatusRunning); |
| 194 | CHECK_GT(ctx->alive_threads, 0); |
| 195 | ctx->alive_threads--; |
| 196 | if (tctx->detached) { |
| 197 | ThreadDead(thr, tctx); |
| 198 | } else { |
| 199 | thr->fast_state.IncrementEpoch(); |
| 200 | // Can't increment epoch w/o writing to the trace as well. |
| 201 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); |
| 202 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 203 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 204 | thr->clock.release(&tctx->sync); |
| 205 | StatInc(thr, StatSyncRelease); |
| 206 | tctx->status = ThreadStatusFinished; |
| 207 | } |
| 208 | |
| 209 | // Save from info about the thread. |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 210 | tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) |
| 211 | ThreadDeadInfo(); |
| 212 | internal_memcpy(&tctx->dead_info->trace.events[0], |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 213 | &thr->trace.events[0], sizeof(thr->trace.events)); |
| 214 | for (int i = 0; i < kTraceParts; i++) { |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 215 | tctx->dead_info->trace.headers[i].stack0.CopyFrom( |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 216 | thr->trace.headers[i].stack0); |
| 217 | } |
Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 218 | tctx->epoch1 = thr->fast_state.epoch(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 219 | |
| 220 | thr->~ThreadState(); |
| 221 | StatAggregate(ctx->stat, thr->stat); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 222 | tctx->thr = 0; |
| 223 | } |
| 224 | |
| 225 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { |
| 226 | CHECK_GT(thr->in_rtl, 0); |
| 227 | DPrintf("#%d: ThreadTid uid=%lu\n", thr->tid, uid); |
| 228 | Lock l(&CTX()->thread_mtx); |
Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 229 | for (unsigned tid = 0; tid < kMaxTid; tid++) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 230 | if (CTX()->threads[tid] != 0 |
| 231 | && CTX()->threads[tid]->user_id == uid |
| 232 | && CTX()->threads[tid]->status != ThreadStatusInvalid) |
| 233 | return tid; |
| 234 | } |
| 235 | return -1; |
| 236 | } |
| 237 | |
| 238 | void ThreadJoin(ThreadState *thr, uptr pc, int tid) { |
| 239 | CHECK_GT(thr->in_rtl, 0); |
| 240 | CHECK_GT(tid, 0); |
| 241 | CHECK_LT(tid, kMaxTid); |
| 242 | DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); |
| 243 | Context *ctx = CTX(); |
| 244 | Lock l(&ctx->thread_mtx); |
| 245 | ThreadContext *tctx = ctx->threads[tid]; |
| 246 | if (tctx->status == ThreadStatusInvalid) { |
| 247 | Printf("ThreadSanitizer: join of non-existent thread\n"); |
| 248 | return; |
| 249 | } |
| 250 | CHECK_EQ(tctx->detached, false); |
| 251 | CHECK_EQ(tctx->status, ThreadStatusFinished); |
| 252 | thr->clock.acquire(&tctx->sync); |
| 253 | StatInc(thr, StatSyncAcquire); |
| 254 | ThreadDead(thr, tctx); |
| 255 | } |
| 256 | |
| 257 | void ThreadDetach(ThreadState *thr, uptr pc, int tid) { |
| 258 | CHECK_GT(thr->in_rtl, 0); |
| 259 | CHECK_GT(tid, 0); |
| 260 | CHECK_LT(tid, kMaxTid); |
| 261 | Context *ctx = CTX(); |
| 262 | Lock l(&ctx->thread_mtx); |
| 263 | ThreadContext *tctx = ctx->threads[tid]; |
| 264 | if (tctx->status == ThreadStatusInvalid) { |
| 265 | Printf("ThreadSanitizer: detach of non-existent thread\n"); |
| 266 | return; |
| 267 | } |
| 268 | if (tctx->status == ThreadStatusFinished) { |
| 269 | ThreadDead(thr, tctx); |
| 270 | } else { |
| 271 | tctx->detached = true; |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
| 276 | uptr size, bool is_write) { |
| 277 | if (size == 0) |
| 278 | return; |
| 279 | |
| 280 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 281 | DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", |
| 282 | thr->tid, (void*)pc, (void*)addr, |
| 283 | (int)size, is_write); |
| 284 | |
| 285 | #if TSAN_DEBUG |
| 286 | if (!IsAppMem(addr)) { |
| 287 | Printf("Access to non app mem %lx\n", addr); |
| 288 | DCHECK(IsAppMem(addr)); |
| 289 | } |
| 290 | if (!IsAppMem(addr + size - 1)) { |
| 291 | Printf("Access to non app mem %lx\n", addr + size - 1); |
| 292 | DCHECK(IsAppMem(addr + size - 1)); |
| 293 | } |
| 294 | if (!IsShadowMem((uptr)shadow_mem)) { |
| 295 | Printf("Bad shadow addr %p (%lx)\n", shadow_mem, addr); |
| 296 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 297 | } |
| 298 | if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { |
| 299 | Printf("Bad shadow addr %p (%lx)\n", |
| 300 | shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); |
| 301 | DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); |
| 302 | } |
| 303 | #endif |
| 304 | |
| 305 | StatInc(thr, StatMopRange); |
| 306 | |
| 307 | FastState fast_state = thr->fast_state; |
| 308 | if (fast_state.GetIgnoreBit()) |
| 309 | return; |
| 310 | |
| 311 | fast_state.IncrementEpoch(); |
| 312 | thr->fast_state = fast_state; |
| 313 | TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); |
| 314 | |
| 315 | bool unaligned = (addr % kShadowCell) != 0; |
| 316 | |
| 317 | // Handle unaligned beginning, if any. |
| 318 | for (; addr % kShadowCell && size; addr++, size--) { |
| 319 | int const kAccessSizeLog = 0; |
| 320 | Shadow cur(fast_state); |
| 321 | cur.SetWrite(is_write); |
| 322 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
| 323 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, |
| 324 | shadow_mem, cur); |
| 325 | } |
| 326 | if (unaligned) |
| 327 | shadow_mem += kShadowCnt; |
| 328 | // Handle middle part, if any. |
| 329 | for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { |
| 330 | int const kAccessSizeLog = 3; |
| 331 | Shadow cur(fast_state); |
| 332 | cur.SetWrite(is_write); |
| 333 | cur.SetAddr0AndSizeLog(0, kAccessSizeLog); |
| 334 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, |
| 335 | shadow_mem, cur); |
| 336 | shadow_mem += kShadowCnt; |
| 337 | } |
| 338 | // Handle ending, if any. |
| 339 | for (; size; addr++, size--) { |
| 340 | int const kAccessSizeLog = 0; |
| 341 | Shadow cur(fast_state); |
| 342 | cur.SetWrite(is_write); |
| 343 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
| 344 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, |
| 345 | shadow_mem, cur); |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 350 | MemoryAccess(thr, pc, addr, 0, 0); |
| 351 | } |
| 352 | |
| 353 | void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 354 | MemoryAccess(thr, pc, addr, 0, 1); |
| 355 | } |
| 356 | |
| 357 | void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 358 | MemoryAccess(thr, pc, addr, 3, 0); |
| 359 | } |
| 360 | |
| 361 | void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 362 | MemoryAccess(thr, pc, addr, 3, 1); |
| 363 | } |
| 364 | } // namespace __tsan |