Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 1 | //===-- tsan_rtl_thread.cc --------------------------------------*- C++ -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "tsan_rtl.h" |
| 15 | #include "tsan_mman.h" |
| 16 | #include "tsan_placement_new.h" |
| 17 | #include "tsan_platform.h" |
| 18 | #include "tsan_report.h" |
| 19 | #include "tsan_sync.h" |
| 20 | |
| 21 | namespace __tsan { |
| 22 | |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 23 | const int kThreadQuarantineSize = 16; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 24 | |
| 25 | static void MaybeReportThreadLeak(ThreadContext *tctx) { |
| 26 | if (tctx->detached) |
| 27 | return; |
| 28 | if (tctx->status != ThreadStatusCreated |
| 29 | && tctx->status != ThreadStatusRunning |
| 30 | && tctx->status != ThreadStatusFinished) |
| 31 | return; |
| 32 | ScopedReport rep(ReportTypeThreadLeak); |
| 33 | rep.AddThread(tctx); |
| 34 | OutputReport(rep); |
| 35 | } |
| 36 | |
| 37 | void ThreadFinalize(ThreadState *thr) { |
| 38 | CHECK_GT(thr->in_rtl, 0); |
| 39 | if (!flags()->report_thread_leaks) |
| 40 | return; |
| 41 | Context *ctx = CTX(); |
| 42 | Lock l(&ctx->thread_mtx); |
Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 43 | for (unsigned i = 0; i < kMaxTid; i++) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 44 | ThreadContext *tctx = ctx->threads[i]; |
| 45 | if (tctx == 0) |
| 46 | continue; |
| 47 | MaybeReportThreadLeak(tctx); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 48 | } |
| 49 | } |
| 50 | |
| 51 | static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { |
| 52 | Context *ctx = CTX(); |
| 53 | CHECK_GT(thr->in_rtl, 0); |
| 54 | CHECK(tctx->status == ThreadStatusRunning |
| 55 | || tctx->status == ThreadStatusFinished); |
| 56 | DPrintf("#%d: ThreadDead uid=%lu\n", thr->tid, tctx->user_id); |
| 57 | tctx->status = ThreadStatusDead; |
| 58 | tctx->user_id = 0; |
| 59 | tctx->sync.Reset(); |
| 60 | |
| 61 | // Put to dead list. |
| 62 | tctx->dead_next = 0; |
| 63 | if (ctx->dead_list_size == 0) |
| 64 | ctx->dead_list_head = tctx; |
| 65 | else |
| 66 | ctx->dead_list_tail->dead_next = tctx; |
| 67 | ctx->dead_list_tail = tctx; |
| 68 | ctx->dead_list_size++; |
| 69 | } |
| 70 | |
| 71 | int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { |
| 72 | CHECK_GT(thr->in_rtl, 0); |
| 73 | Context *ctx = CTX(); |
| 74 | Lock l(&ctx->thread_mtx); |
| 75 | StatInc(thr, StatThreadCreate); |
| 76 | int tid = -1; |
| 77 | ThreadContext *tctx = 0; |
| 78 | if (ctx->dead_list_size > kThreadQuarantineSize |
| 79 | || ctx->thread_seq >= kMaxTid) { |
| 80 | if (ctx->dead_list_size == 0) { |
| 81 | Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", kMaxTid); |
| 82 | Die(); |
| 83 | } |
| 84 | StatInc(thr, StatThreadReuse); |
| 85 | tctx = ctx->dead_list_head; |
| 86 | ctx->dead_list_head = tctx->dead_next; |
| 87 | ctx->dead_list_size--; |
| 88 | if (ctx->dead_list_size == 0) { |
| 89 | CHECK_EQ(tctx->dead_next, 0); |
| 90 | ctx->dead_list_head = 0; |
| 91 | } |
| 92 | CHECK_EQ(tctx->status, ThreadStatusDead); |
| 93 | tctx->status = ThreadStatusInvalid; |
| 94 | tctx->reuse_count++; |
Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 95 | tctx->sync.Reset(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 96 | tid = tctx->tid; |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 97 | DestroyAndFree(tctx->dead_info); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 98 | } else { |
| 99 | StatInc(thr, StatThreadMaxTid); |
| 100 | tid = ctx->thread_seq++; |
| 101 | void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); |
| 102 | tctx = new(mem) ThreadContext(tid); |
| 103 | ctx->threads[tid] = tctx; |
| 104 | } |
| 105 | CHECK_NE(tctx, 0); |
| 106 | CHECK_GE(tid, 0); |
| 107 | CHECK_LT(tid, kMaxTid); |
| 108 | DPrintf("#%d: ThreadCreate tid=%d uid=%lu\n", thr->tid, tid, uid); |
| 109 | CHECK_EQ(tctx->status, ThreadStatusInvalid); |
| 110 | ctx->alive_threads++; |
| 111 | if (ctx->max_alive_threads < ctx->alive_threads) { |
| 112 | ctx->max_alive_threads++; |
| 113 | CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads); |
| 114 | StatInc(thr, StatThreadMaxAlive); |
| 115 | } |
| 116 | tctx->status = ThreadStatusCreated; |
| 117 | tctx->thr = 0; |
| 118 | tctx->user_id = uid; |
| 119 | tctx->unique_id = ctx->unique_thread_seq++; |
| 120 | tctx->detached = detached; |
| 121 | if (tid) { |
| 122 | thr->fast_state.IncrementEpoch(); |
| 123 | // Can't increment epoch w/o writing to the trace as well. |
| 124 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); |
| 125 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 126 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 127 | thr->clock.release(&tctx->sync); |
| 128 | StatInc(thr, StatSyncRelease); |
| 129 | |
| 130 | tctx->creation_stack.ObtainCurrent(thr, pc); |
| 131 | } |
| 132 | return tid; |
| 133 | } |
| 134 | |
| 135 | void ThreadStart(ThreadState *thr, int tid) { |
| 136 | CHECK_GT(thr->in_rtl, 0); |
| 137 | uptr stk_addr = 0; |
| 138 | uptr stk_size = 0; |
| 139 | uptr tls_addr = 0; |
| 140 | uptr tls_size = 0; |
Dmitry Vyukov | 7339eb1 | 2012-05-25 11:15:04 +0000 | [diff] [blame] | 141 | GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 142 | |
Dmitry Vyukov | 2d4e3c1 | 2012-05-28 07:44:34 +0000 | [diff] [blame^] | 143 | if (tid) { |
| 144 | MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 145 | |
Dmitry Vyukov | 2d4e3c1 | 2012-05-28 07:44:34 +0000 | [diff] [blame^] | 146 | // Check that the thr object is in tls; |
| 147 | const uptr thr_beg = (uptr)thr; |
| 148 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 149 | CHECK_GE(thr_beg, tls_addr); |
| 150 | CHECK_LE(thr_beg, tls_addr + tls_size); |
| 151 | CHECK_GE(thr_end, tls_addr); |
| 152 | CHECK_LE(thr_end, tls_addr + tls_size); |
| 153 | // Since the thr object is huge, skip it. |
| 154 | MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); |
| 155 | MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end); |
| 156 | } |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 157 | |
| 158 | Lock l(&CTX()->thread_mtx); |
| 159 | ThreadContext *tctx = CTX()->threads[tid]; |
| 160 | CHECK_NE(tctx, 0); |
| 161 | CHECK_EQ(tctx->status, ThreadStatusCreated); |
| 162 | tctx->status = ThreadStatusRunning; |
| 163 | tctx->epoch0 = tctx->epoch1 + 1; |
| 164 | tctx->epoch1 = (u64)-1; |
| 165 | new(thr) ThreadState(CTX(), tid, tctx->epoch0, stk_addr, stk_size, |
| 166 | tls_addr, tls_size); |
| 167 | tctx->thr = thr; |
| 168 | thr->fast_synch_epoch = tctx->epoch0; |
| 169 | thr->clock.set(tid, tctx->epoch0); |
| 170 | thr->clock.acquire(&tctx->sync); |
| 171 | StatInc(thr, StatSyncAcquire); |
| 172 | DPrintf("#%d: ThreadStart epoch=%llu stk_addr=%lx stk_size=%lx " |
| 173 | "tls_addr=%lx tls_size=%lx\n", |
| 174 | tid, tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size); |
| 175 | } |
| 176 | |
| 177 | void ThreadFinish(ThreadState *thr) { |
| 178 | CHECK_GT(thr->in_rtl, 0); |
| 179 | StatInc(thr, StatThreadFinish); |
| 180 | // FIXME: Treat it as write. |
| 181 | if (thr->stk_addr && thr->stk_size) |
| 182 | MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size); |
| 183 | if (thr->tls_addr && thr->tls_size) { |
| 184 | const uptr thr_beg = (uptr)thr; |
| 185 | const uptr thr_end = (uptr)thr + sizeof(*thr); |
| 186 | // Since the thr object is huge, skip it. |
| 187 | MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr); |
| 188 | MemoryResetRange(thr, /*pc=*/ 5, |
| 189 | thr_end, thr->tls_addr + thr->tls_size - thr_end); |
| 190 | } |
| 191 | Context *ctx = CTX(); |
| 192 | Lock l(&ctx->thread_mtx); |
| 193 | ThreadContext *tctx = ctx->threads[thr->tid]; |
| 194 | CHECK_NE(tctx, 0); |
| 195 | CHECK_EQ(tctx->status, ThreadStatusRunning); |
| 196 | CHECK_GT(ctx->alive_threads, 0); |
| 197 | ctx->alive_threads--; |
| 198 | if (tctx->detached) { |
| 199 | ThreadDead(thr, tctx); |
| 200 | } else { |
| 201 | thr->fast_state.IncrementEpoch(); |
| 202 | // Can't increment epoch w/o writing to the trace as well. |
| 203 | TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0); |
| 204 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 205 | thr->fast_synch_epoch = thr->fast_state.epoch(); |
| 206 | thr->clock.release(&tctx->sync); |
| 207 | StatInc(thr, StatSyncRelease); |
| 208 | tctx->status = ThreadStatusFinished; |
| 209 | } |
| 210 | |
| 211 | // Save from info about the thread. |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 212 | tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) |
| 213 | ThreadDeadInfo(); |
| 214 | internal_memcpy(&tctx->dead_info->trace.events[0], |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 215 | &thr->trace.events[0], sizeof(thr->trace.events)); |
| 216 | for (int i = 0; i < kTraceParts; i++) { |
Dmitry Vyukov | f6985e3 | 2012-05-22 14:34:43 +0000 | [diff] [blame] | 217 | tctx->dead_info->trace.headers[i].stack0.CopyFrom( |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 218 | thr->trace.headers[i].stack0); |
| 219 | } |
Dmitry Vyukov | 302cebb | 2012-05-22 18:07:45 +0000 | [diff] [blame] | 220 | tctx->epoch1 = thr->fast_state.epoch(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 221 | |
| 222 | thr->~ThreadState(); |
| 223 | StatAggregate(ctx->stat, thr->stat); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 224 | tctx->thr = 0; |
| 225 | } |
| 226 | |
| 227 | int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { |
| 228 | CHECK_GT(thr->in_rtl, 0); |
| 229 | DPrintf("#%d: ThreadTid uid=%lu\n", thr->tid, uid); |
| 230 | Lock l(&CTX()->thread_mtx); |
Kostya Serebryany | 07c4805 | 2012-05-11 14:42:24 +0000 | [diff] [blame] | 231 | for (unsigned tid = 0; tid < kMaxTid; tid++) { |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 232 | if (CTX()->threads[tid] != 0 |
| 233 | && CTX()->threads[tid]->user_id == uid |
| 234 | && CTX()->threads[tid]->status != ThreadStatusInvalid) |
| 235 | return tid; |
| 236 | } |
| 237 | return -1; |
| 238 | } |
| 239 | |
| 240 | void ThreadJoin(ThreadState *thr, uptr pc, int tid) { |
| 241 | CHECK_GT(thr->in_rtl, 0); |
| 242 | CHECK_GT(tid, 0); |
| 243 | CHECK_LT(tid, kMaxTid); |
| 244 | DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); |
| 245 | Context *ctx = CTX(); |
| 246 | Lock l(&ctx->thread_mtx); |
| 247 | ThreadContext *tctx = ctx->threads[tid]; |
| 248 | if (tctx->status == ThreadStatusInvalid) { |
| 249 | Printf("ThreadSanitizer: join of non-existent thread\n"); |
| 250 | return; |
| 251 | } |
| 252 | CHECK_EQ(tctx->detached, false); |
| 253 | CHECK_EQ(tctx->status, ThreadStatusFinished); |
| 254 | thr->clock.acquire(&tctx->sync); |
| 255 | StatInc(thr, StatSyncAcquire); |
| 256 | ThreadDead(thr, tctx); |
| 257 | } |
| 258 | |
| 259 | void ThreadDetach(ThreadState *thr, uptr pc, int tid) { |
| 260 | CHECK_GT(thr->in_rtl, 0); |
| 261 | CHECK_GT(tid, 0); |
| 262 | CHECK_LT(tid, kMaxTid); |
| 263 | Context *ctx = CTX(); |
| 264 | Lock l(&ctx->thread_mtx); |
| 265 | ThreadContext *tctx = ctx->threads[tid]; |
| 266 | if (tctx->status == ThreadStatusInvalid) { |
| 267 | Printf("ThreadSanitizer: detach of non-existent thread\n"); |
| 268 | return; |
| 269 | } |
| 270 | if (tctx->status == ThreadStatusFinished) { |
| 271 | ThreadDead(thr, tctx); |
| 272 | } else { |
| 273 | tctx->detached = true; |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, |
| 278 | uptr size, bool is_write) { |
| 279 | if (size == 0) |
| 280 | return; |
| 281 | |
| 282 | u64 *shadow_mem = (u64*)MemToShadow(addr); |
| 283 | DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", |
| 284 | thr->tid, (void*)pc, (void*)addr, |
| 285 | (int)size, is_write); |
| 286 | |
| 287 | #if TSAN_DEBUG |
| 288 | if (!IsAppMem(addr)) { |
| 289 | Printf("Access to non app mem %lx\n", addr); |
| 290 | DCHECK(IsAppMem(addr)); |
| 291 | } |
| 292 | if (!IsAppMem(addr + size - 1)) { |
| 293 | Printf("Access to non app mem %lx\n", addr + size - 1); |
| 294 | DCHECK(IsAppMem(addr + size - 1)); |
| 295 | } |
| 296 | if (!IsShadowMem((uptr)shadow_mem)) { |
| 297 | Printf("Bad shadow addr %p (%lx)\n", shadow_mem, addr); |
| 298 | DCHECK(IsShadowMem((uptr)shadow_mem)); |
| 299 | } |
| 300 | if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { |
| 301 | Printf("Bad shadow addr %p (%lx)\n", |
| 302 | shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); |
| 303 | DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); |
| 304 | } |
| 305 | #endif |
| 306 | |
| 307 | StatInc(thr, StatMopRange); |
| 308 | |
| 309 | FastState fast_state = thr->fast_state; |
| 310 | if (fast_state.GetIgnoreBit()) |
| 311 | return; |
| 312 | |
| 313 | fast_state.IncrementEpoch(); |
| 314 | thr->fast_state = fast_state; |
| 315 | TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc); |
| 316 | |
| 317 | bool unaligned = (addr % kShadowCell) != 0; |
| 318 | |
| 319 | // Handle unaligned beginning, if any. |
| 320 | for (; addr % kShadowCell && size; addr++, size--) { |
| 321 | int const kAccessSizeLog = 0; |
| 322 | Shadow cur(fast_state); |
| 323 | cur.SetWrite(is_write); |
| 324 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
| 325 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, |
| 326 | shadow_mem, cur); |
| 327 | } |
| 328 | if (unaligned) |
| 329 | shadow_mem += kShadowCnt; |
| 330 | // Handle middle part, if any. |
| 331 | for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) { |
| 332 | int const kAccessSizeLog = 3; |
| 333 | Shadow cur(fast_state); |
| 334 | cur.SetWrite(is_write); |
| 335 | cur.SetAddr0AndSizeLog(0, kAccessSizeLog); |
| 336 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, |
| 337 | shadow_mem, cur); |
| 338 | shadow_mem += kShadowCnt; |
| 339 | } |
| 340 | // Handle ending, if any. |
| 341 | for (; size; addr++, size--) { |
| 342 | int const kAccessSizeLog = 0; |
| 343 | Shadow cur(fast_state); |
| 344 | cur.SetWrite(is_write); |
| 345 | cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); |
| 346 | MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, fast_state, |
| 347 | shadow_mem, cur); |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 352 | MemoryAccess(thr, pc, addr, 0, 0); |
| 353 | } |
| 354 | |
| 355 | void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 356 | MemoryAccess(thr, pc, addr, 0, 1); |
| 357 | } |
| 358 | |
| 359 | void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 360 | MemoryAccess(thr, pc, addr, 3, 0); |
| 361 | } |
| 362 | |
| 363 | void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) { |
| 364 | MemoryAccess(thr, pc, addr, 3, 1); |
| 365 | } |
| 366 | } // namespace __tsan |