blob: 5aff6ca56adfc03841ede95591a1383a47529ca0 [file] [log] [blame]
Alexey Samsonov6fbecdd2012-07-17 09:39:59 +00001//===-- tsan_rtl_report.cc ------------------------------------------------===//
Kostya Serebryany7ac41482012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
Alexey Samsonovc0d78c12012-06-04 13:27:49 +000014#include "sanitizer_common/sanitizer_libc.h"
Alexey Samsonov47b16342012-06-07 09:50:16 +000015#include "sanitizer_common/sanitizer_placement_new.h"
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +000016#include "sanitizer_common/sanitizer_stackdepot.h"
Dmitry Vyukovad9da372012-12-06 12:16:15 +000017#include "sanitizer_common/sanitizer_common.h"
Dmitry Vyukov793e7612013-01-29 14:20:12 +000018#include "sanitizer_common/sanitizer_stacktrace.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000019#include "tsan_platform.h"
20#include "tsan_rtl.h"
21#include "tsan_suppressions.h"
22#include "tsan_symbolize.h"
23#include "tsan_report.h"
24#include "tsan_sync.h"
25#include "tsan_mman.h"
26#include "tsan_flags.h"
Dmitry Vyukovc2234cd2012-12-18 06:57:34 +000027#include "tsan_fd.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000028
Alexey Samsonov591616d2012-09-11 09:44:48 +000029namespace __tsan {
Alexey Samsonov15a77612012-06-06 15:22:20 +000030
Dmitry Vyukovad9da372012-12-06 12:16:15 +000031using namespace __sanitizer; // NOLINT
32
Stephen Hines6d186232014-11-26 17:56:19 -080033static ReportStack *SymbolizeStack(StackTrace trace);
Dmitry Vyukov793e7612013-01-29 14:20:12 +000034
Alexey Samsonov591616d2012-09-11 09:44:48 +000035void TsanCheckFailed(const char *file, int line, const char *cond,
36 u64 v1, u64 v2) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -070037 // There is high probability that interceptors will check-fail as well,
38 // on the other hand there is no sense in processing interceptors
39 // since we are going to die soon.
40 ScopedIgnoreInterceptors ignore;
Alexey Samsonovb1fe3022012-11-02 12:17:51 +000041 Printf("FATAL: ThreadSanitizer CHECK failed: "
42 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
43 file, line, cond, (uptr)v1, (uptr)v2);
Stephen Hines6d186232014-11-26 17:56:19 -080044 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
Alexey Samsonov15a77612012-06-06 15:22:20 +000045 Die();
46}
47
Kostya Serebryany7ac41482012-05-10 13:48:04 +000048// Can be overriden by an application/test to intercept reports.
Dmitry Vyukov87dbdf52012-07-25 14:30:51 +000049#ifdef TSAN_EXTERNAL_HOOKS
50bool OnReport(const ReportDesc *rep, bool suppressed);
51#else
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080052SANITIZER_WEAK_CXX_DEFAULT_IMPL
53bool OnReport(const ReportDesc *rep, bool suppressed) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +000054 (void)rep;
55 return suppressed;
56}
Dmitry Vyukov87dbdf52012-07-25 14:30:51 +000057#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +000058
Stephen Hines86277eb2015-03-23 12:06:32 -070059static void StackStripMain(SymbolizedStack *frames) {
60 SymbolizedStack *last_frame = nullptr;
61 SymbolizedStack *last_frame2 = nullptr;
62 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +000063 last_frame2 = last_frame;
Stephen Hines86277eb2015-03-23 12:06:32 -070064 last_frame = cur;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000065 }
66
67 if (last_frame2 == 0)
68 return;
Stephen Hines86277eb2015-03-23 12:06:32 -070069#ifndef SANITIZER_GO
Stephen Hines6d186232014-11-26 17:56:19 -080070 const char *last = last_frame->info.function;
Stephen Hines6d186232014-11-26 17:56:19 -080071 const char *last2 = last_frame2->info.function;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000072 // Strip frame above 'main'
73 if (last2 && 0 == internal_strcmp(last2, "main")) {
Stephen Hines86277eb2015-03-23 12:06:32 -070074 last_frame->ClearAll();
75 last_frame2->next = nullptr;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000076 // Strip our internal thread start routine.
77 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
Stephen Hines86277eb2015-03-23 12:06:32 -070078 last_frame->ClearAll();
79 last_frame2->next = nullptr;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000080 // Strip global ctors init.
81 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
Stephen Hines86277eb2015-03-23 12:06:32 -070082 last_frame->ClearAll();
83 last_frame2->next = nullptr;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000084 // If both are 0, then we probably just failed to symbolize.
85 } else if (last || last2) {
86 // Ensure that we recovered stack completely. Trimmed stack
87 // can actually happen if we do not instrument some code,
Dmitry Vyukov0ab628c2012-09-06 15:18:14 +000088 // so it's only a debug print. However we must try hard to not miss it
Kostya Serebryany7ac41482012-05-10 13:48:04 +000089 // due to our fault.
Stephen Hines86277eb2015-03-23 12:06:32 -070090 DPrintf("Bottom stack frame is missed\n");
Kostya Serebryany7ac41482012-05-10 13:48:04 +000091 }
Dmitry Vyukovcb3a6b82012-07-06 20:23:59 +000092#else
Dmitry Vyukov1dc5f392013-06-06 13:31:35 +000093 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
Stephen Hines86277eb2015-03-23 12:06:32 -070094 last_frame->ClearAll();
95 last_frame2->next = nullptr;
Dmitry Vyukovc510a2f2012-07-06 14:54:25 +000096#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +000097}
98
Stephen Hines2d1fdb22014-05-28 23:58:16 -070099ReportStack *SymbolizeStackId(u32 stack_id) {
100 if (stack_id == 0)
101 return 0;
Stephen Hines6d186232014-11-26 17:56:19 -0800102 StackTrace stack = StackDepotGet(stack_id);
103 if (stack.trace == nullptr)
104 return nullptr;
105 return SymbolizeStack(stack);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700106}
107
Stephen Hines6d186232014-11-26 17:56:19 -0800108static ReportStack *SymbolizeStack(StackTrace trace) {
109 if (trace.size == 0)
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000110 return 0;
Stephen Hines86277eb2015-03-23 12:06:32 -0700111 SymbolizedStack *top = nullptr;
Stephen Hines6d186232014-11-26 17:56:19 -0800112 for (uptr si = 0; si < trace.size; si++) {
113 const uptr pc = trace.trace[si];
Dmitry Vyukove7718bc2013-06-17 19:57:03 +0000114 uptr pc1 = pc;
Stephen Hines86277eb2015-03-23 12:06:32 -0700115 // We obtain the return address, but we're interested in the previous
116 // instruction.
117 if ((pc & kExternalPCBit) == 0)
118 pc1 = StackTrace::GetPreviousInstructionPc(pc);
119 SymbolizedStack *ent = SymbolizeCode(pc1);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000120 CHECK_NE(ent, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700121 SymbolizedStack *last = ent;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000122 while (last->next) {
Stephen Hines6d186232014-11-26 17:56:19 -0800123 last->info.address = pc; // restore original pc for report
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000124 last = last->next;
125 }
Stephen Hines6d186232014-11-26 17:56:19 -0800126 last->info.address = pc; // restore original pc for report
Stephen Hines86277eb2015-03-23 12:06:32 -0700127 last->next = top;
128 top = ent;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000129 }
Stephen Hines86277eb2015-03-23 12:06:32 -0700130 StackStripMain(top);
131
132 ReportStack *stack = ReportStack::New();
133 stack->frames = top;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000134 return stack;
135}
136
137ScopedReport::ScopedReport(ReportType typ) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700138 ctx->thread_registry->CheckLocked();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000139 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
140 rep_ = new(mem) ReportDesc;
141 rep_->typ = typ;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700142 ctx->report_mtx.Lock();
Alexey Samsonov7ed46ff2013-04-05 07:30:29 +0000143 CommonSanitizerReportMutex.Lock();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000144}
145
146ScopedReport::~ScopedReport() {
Alexey Samsonov7ed46ff2013-04-05 07:30:29 +0000147 CommonSanitizerReportMutex.Unlock();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700148 ctx->report_mtx.Unlock();
Dmitry Vyukovaecf2e52012-12-04 15:46:05 +0000149 DestroyAndFree(rep_);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000150}
151
Stephen Hines6d186232014-11-26 17:56:19 -0800152void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000153 ReportStack **rs = rep_->stacks.PushBack();
Stephen Hines6d186232014-11-26 17:56:19 -0800154 *rs = SymbolizeStack(stack);
Stephen Hines6a211c52014-07-21 00:49:56 -0700155 (*rs)->suppressable = suppressable;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000156}
157
Stephen Hines6d186232014-11-26 17:56:19 -0800158void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
159 const MutexSet *mset) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000160 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
161 ReportMop *mop = new(mem) ReportMop;
162 rep_->mops.PushBack(mop);
163 mop->tid = s.tid();
164 mop->addr = addr + s.addr0();
165 mop->size = s.size();
Dmitry Vyukov334553e2013-02-01 09:42:06 +0000166 mop->write = s.IsWrite();
Dmitry Vyukov0a07b352013-02-01 11:10:53 +0000167 mop->atomic = s.IsAtomic();
Stephen Hines6d186232014-11-26 17:56:19 -0800168 mop->stack = SymbolizeStack(stack);
Stephen Hines6a211c52014-07-21 00:49:56 -0700169 if (mop->stack)
170 mop->stack->suppressable = true;
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000171 for (uptr i = 0; i < mset->Size(); i++) {
172 MutexSet::Desc d = mset->Get(i);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700173 u64 mid = this->AddMutex(d.id);
174 ReportMopMutex mtx = {mid, d.write};
175 mop->mset.PushBack(mtx);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000176 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000177}
178
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700179void ScopedReport::AddUniqueTid(int unique_tid) {
180 rep_->unique_tids.PushBack(unique_tid);
181}
182
Stephen Hines6a211c52014-07-21 00:49:56 -0700183void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000184 for (uptr i = 0; i < rep_->threads.Size(); i++) {
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +0000185 if ((u32)rep_->threads[i]->id == tctx->tid)
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000186 return;
187 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000188 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800189 ReportThread *rt = new(mem) ReportThread;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000190 rep_->threads.PushBack(rt);
191 rt->id = tctx->tid;
Dmitry Vyukov7dccf3f2012-10-02 11:52:05 +0000192 rt->pid = tctx->os_id;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000193 rt->running = (tctx->status == ThreadStatusRunning);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700194 rt->name = internal_strdup(tctx->name);
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +0000195 rt->parent_tid = tctx->parent_tid;
Dmitry Vyukov2c5284e2013-03-18 09:02:27 +0000196 rt->stack = 0;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700197 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
Stephen Hines6a211c52014-07-21 00:49:56 -0700198 if (rt->stack)
199 rt->stack->suppressable = suppressable;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000200}
201
Stephen Hines86277eb2015-03-23 12:06:32 -0700202#ifndef SANITIZER_GO
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800203static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
204 int unique_id = *(int *)arg;
205 return tctx->unique_id == (u32)unique_id;
206}
207
Alexey Samsonovdf2ca172013-03-18 07:02:08 +0000208static ThreadContext *FindThreadByUidLocked(int unique_id) {
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +0000209 ctx->thread_registry->CheckLocked();
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800210 return static_cast<ThreadContext *>(
211 ctx->thread_registry->FindThreadContextLocked(
212 FindThreadByUidLockedCallback, &unique_id));
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000213}
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000214
Dmitry Vyukovf51c3862013-03-18 19:47:36 +0000215static ThreadContext *FindThreadByTidLocked(int tid) {
Dmitry Vyukovf51c3862013-03-18 19:47:36 +0000216 ctx->thread_registry->CheckLocked();
217 return static_cast<ThreadContext*>(
218 ctx->thread_registry->GetThreadLocked(tid));
219}
220
Alexey Samsonovdf2ca172013-03-18 07:02:08 +0000221static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
222 uptr addr = (uptr)arg;
223 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
224 if (tctx->status != ThreadStatusRunning)
225 return false;
226 ThreadState *thr = tctx->thr;
227 CHECK(thr);
228 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
229 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
230}
231
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000232ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +0000233 ctx->thread_registry->CheckLocked();
Alexey Samsonovdf2ca172013-03-18 07:02:08 +0000234 ThreadContext *tctx = static_cast<ThreadContext*>(
235 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
236 (void*)addr));
237 if (!tctx)
238 return 0;
239 ThreadState *thr = tctx->thr;
240 CHECK(thr);
241 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
242 return tctx;
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000243}
Dmitry Vyukov0ab628c2012-09-06 15:18:14 +0000244#endif
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000245
Stephen Hines6a211c52014-07-21 00:49:56 -0700246void ScopedReport::AddThread(int unique_tid, bool suppressable) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700247#ifndef SANITIZER_GO
248 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
249 AddThread(tctx, suppressable);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700250#endif
251}
252
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000253void ScopedReport::AddMutex(const SyncVar *s) {
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000254 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
255 if (rep_->mutexes[i]->id == s->uid)
256 return;
257 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000258 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800259 ReportMutex *rm = new(mem) ReportMutex;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000260 rep_->mutexes.PushBack(rm);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000261 rm->id = s->uid;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700262 rm->addr = s->addr;
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000263 rm->destroyed = false;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700264 rm->stack = SymbolizeStackId(s->creation_stack_id);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000265}
266
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700267u64 ScopedReport::AddMutex(u64 id) {
268 u64 uid = 0;
269 u64 mid = id;
270 uptr addr = SyncVar::SplitId(id, &uid);
Stephen Hines6a211c52014-07-21 00:49:56 -0700271 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700272 // Check that the mutex is still alive.
273 // Another mutex can be created at the same address,
274 // so check uid as well.
275 if (s && s->CheckId(uid)) {
276 mid = s->uid;
277 AddMutex(s);
278 } else {
279 AddDeadMutex(id);
280 }
281 if (s)
Stephen Hines6a211c52014-07-21 00:49:56 -0700282 s->mtx.Unlock();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700283 return mid;
284}
285
286void ScopedReport::AddDeadMutex(u64 id) {
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000287 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
288 if (rep_->mutexes[i]->id == id)
289 return;
290 }
291 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800292 ReportMutex *rm = new(mem) ReportMutex;
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000293 rep_->mutexes.PushBack(rm);
294 rm->id = id;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700295 rm->addr = 0;
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000296 rm->destroyed = true;
297 rm->stack = 0;
298}
299
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000300void ScopedReport::AddLocation(uptr addr, uptr size) {
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000301 if (addr == 0)
302 return;
Stephen Hines86277eb2015-03-23 12:06:32 -0700303#ifndef SANITIZER_GO
Dmitry Vyukovc2234cd2012-12-18 06:57:34 +0000304 int fd = -1;
305 int creat_tid = -1;
306 u32 creat_stack = 0;
Stephen Hines6d186232014-11-26 17:56:19 -0800307 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
308 ReportLocation *loc = ReportLocation::New(ReportLocationFD);
Dmitry Vyukovc2234cd2012-12-18 06:57:34 +0000309 loc->fd = fd;
310 loc->tid = creat_tid;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700311 loc->stack = SymbolizeStackId(creat_stack);
Stephen Hines6d186232014-11-26 17:56:19 -0800312 rep_->locs.PushBack(loc);
Alexey Samsonovdf2ca172013-03-18 07:02:08 +0000313 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
Dmitry Vyukovc2234cd2012-12-18 06:57:34 +0000314 if (tctx)
315 AddThread(tctx);
316 return;
317 }
Dmitry Vyukov03049412013-04-24 09:20:25 +0000318 MBlock *b = 0;
Stephen Hines6a211c52014-07-21 00:49:56 -0700319 Allocator *a = allocator();
320 if (a->PointerIsMine((void*)addr)) {
321 void *block_begin = a->GetBlockBegin((void*)addr);
322 if (block_begin)
323 b = ctx->metamap.GetBlock((uptr)block_begin);
324 }
325 if (b != 0) {
326 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
Stephen Hines6d186232014-11-26 17:56:19 -0800327 ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
328 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
329 loc->heap_chunk_size = b->siz;
Stephen Hines6a211c52014-07-21 00:49:56 -0700330 loc->tid = tctx ? tctx->tid : b->tid;
Stephen Hines6a211c52014-07-21 00:49:56 -0700331 loc->stack = SymbolizeStackId(b->stk);
Stephen Hines6d186232014-11-26 17:56:19 -0800332 rep_->locs.PushBack(loc);
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000333 if (tctx)
334 AddThread(tctx);
335 return;
336 }
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000337 bool is_stack = false;
338 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
Stephen Hines6d186232014-11-26 17:56:19 -0800339 ReportLocation *loc =
340 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000341 loc->tid = tctx->tid;
Stephen Hines6d186232014-11-26 17:56:19 -0800342 rep_->locs.PushBack(loc);
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000343 AddThread(tctx);
344 }
Stephen Hines6d186232014-11-26 17:56:19 -0800345 if (ReportLocation *loc = SymbolizeData(addr)) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700346 loc->suppressable = true;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000347 rep_->locs.PushBack(loc);
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000348 return;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000349 }
Dmitry Vyukovfb917e92013-01-14 10:00:03 +0000350#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000351}
352
Stephen Hines86277eb2015-03-23 12:06:32 -0700353#ifndef SANITIZER_GO
Dmitry Vyukov84853112012-08-31 17:27:49 +0000354void ScopedReport::AddSleep(u32 stack_id) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700355 rep_->sleep = SymbolizeStackId(stack_id);
Dmitry Vyukov84853112012-08-31 17:27:49 +0000356}
Dmitry Vyukov0ab628c2012-09-06 15:18:14 +0000357#endif
Dmitry Vyukov84853112012-08-31 17:27:49 +0000358
Dmitry Vyukov4536cb12013-03-21 16:55:17 +0000359void ScopedReport::SetCount(int count) {
360 rep_->count = count;
361}
362
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000363const ReportDesc *ScopedReport::GetReport() const {
364 return rep_;
365}
366
Stephen Hines6d186232014-11-26 17:56:19 -0800367void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
368 MutexSet *mset) {
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000369 // This function restores stack trace and mutex set for the thread/epoch.
370 // It does so by getting stack trace and mutex set at the beginning of
371 // trace part, and then replaying the trace till the given epoch.
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800372 Trace* trace = ThreadTrace(tid);
373 ReadLock l(&trace->mtx);
Dmitry Vyukov0415ac02012-12-04 12:19:53 +0000374 const int partidx = (epoch / kTracePartSize) % TraceParts();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000375 TraceHeader* hdr = &trace->headers[partidx];
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800376 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000377 return;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800378 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000379 const u64 epoch0 = RoundDown(epoch, TraceSize());
Dmitry Vyukovd698edc2012-11-28 12:19:50 +0000380 const u64 eend = epoch % TraceSize();
Dmitry Vyukov0415ac02012-12-04 12:19:53 +0000381 const u64 ebegin = RoundDown(eend, kTracePartSize);
Alexey Samsonove9541012012-06-06 13:11:29 +0000382 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
383 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800384 Vector<uptr> stack(MBlockReportStack);
385 stack.Resize(hdr->stack0.size + 64);
Stephen Hines6d186232014-11-26 17:56:19 -0800386 for (uptr i = 0; i < hdr->stack0.size; i++) {
387 stack[i] = hdr->stack0.trace[i];
Stephen Hines86277eb2015-03-23 12:06:32 -0700388 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000389 }
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000390 if (mset)
391 *mset = hdr->mset0;
Stephen Hines6d186232014-11-26 17:56:19 -0800392 uptr pos = hdr->stack0.size;
Dmitry Vyukov385542a2012-11-28 10:35:31 +0000393 Event *events = (Event*)GetThreadTrace(tid);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000394 for (uptr i = ebegin; i <= eend; i++) {
Dmitry Vyukov385542a2012-11-28 10:35:31 +0000395 Event ev = events[i];
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000396 EventType typ = (EventType)(ev >> 61);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000397 uptr pc = (uptr)(ev & ((1ull << 61) - 1));
Alexey Samsonove9541012012-06-06 13:11:29 +0000398 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000399 if (typ == EventTypeMop) {
400 stack[pos] = pc;
401 } else if (typ == EventTypeFuncEnter) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800402 if (stack.Size() < pos + 2)
403 stack.Resize(pos + 2);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000404 stack[pos++] = pc;
405 } else if (typ == EventTypeFuncExit) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000406 if (pos > 0)
407 pos--;
408 }
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000409 if (mset) {
410 if (typ == EventTypeLock) {
411 mset->Add(pc, true, epoch0 + i);
412 } else if (typ == EventTypeUnlock) {
413 mset->Del(pc, true);
414 } else if (typ == EventTypeRLock) {
415 mset->Add(pc, false, epoch0 + i);
416 } else if (typ == EventTypeRUnlock) {
417 mset->Del(pc, false);
418 }
419 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000420 for (uptr j = 0; j <= pos; j++)
Alexey Samsonove9541012012-06-06 13:11:29 +0000421 DPrintf2(" #%zu: %zx\n", j, stack[j]);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000422 }
423 if (pos == 0 && stack[0] == 0)
424 return;
425 pos++;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800426 stk->Init(&stack[0], pos);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000427}
428
Stephen Hines6d186232014-11-26 17:56:19 -0800429static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
430 uptr addr_min, uptr addr_max) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000431 bool equal_stack = false;
Dmitry Vyukove0c45612013-06-11 11:44:43 +0000432 RacyStacks hash;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000433 bool equal_address = false;
434 RacyAddress ra0 = {addr_min, addr_max};
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800435 {
436 ReadLock lock(&ctx->racy_mtx);
437 if (flags()->suppress_equal_stacks) {
438 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
439 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
440 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
441 if (hash == ctx->racy_stacks[i]) {
442 VPrintf(2,
443 "ThreadSanitizer: suppressing report as doubled (stack)\n");
444 equal_stack = true;
445 break;
446 }
447 }
448 }
449 if (flags()->suppress_equal_addresses) {
450 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
451 RacyAddress ra2 = ctx->racy_addresses[i];
452 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
453 uptr minend = min(ra0.addr_max, ra2.addr_max);
454 if (maxbeg < minend) {
455 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
456 equal_address = true;
457 break;
458 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000459 }
460 }
461 }
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800462 if (!equal_stack && !equal_address)
463 return false;
464 if (!equal_stack) {
465 Lock lock(&ctx->racy_mtx);
466 ctx->racy_stacks.PushBack(hash);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000467 }
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800468 if (!equal_address) {
469 Lock lock(&ctx->racy_mtx);
470 ctx->racy_addresses.PushBack(ra0);
471 }
472 return true;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000473}
474
Stephen Hines6d186232014-11-26 17:56:19 -0800475static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
476 uptr addr_min, uptr addr_max) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800477 Lock lock(&ctx->racy_mtx);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000478 if (flags()->suppress_equal_stacks) {
Dmitry Vyukove0c45612013-06-11 11:44:43 +0000479 RacyStacks hash;
Stephen Hines6d186232014-11-26 17:56:19 -0800480 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
481 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000482 ctx->racy_stacks.PushBack(hash);
483 }
484 if (flags()->suppress_equal_addresses) {
485 RacyAddress ra0 = {addr_min, addr_max};
486 ctx->racy_addresses.PushBack(ra0);
487 }
488}
489
Stephen Hines6a211c52014-07-21 00:49:56 -0700490bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800491 if (!flags()->report_bugs)
492 return false;
493 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000494 const ReportDesc *rep = srep.GetReport();
Dmitry Vyukovf754eb52013-03-27 17:59:57 +0000495 Suppression *supp = 0;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800496 uptr pc_or_addr = 0;
497 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
498 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
499 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
500 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
501 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
502 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
503 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
504 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
505 if (pc_or_addr != 0) {
506 Lock lock(&ctx->fired_suppressions_mtx);
507 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
Alexey Samsonov0a05e5f2013-06-14 11:18:58 +0000508 ctx->fired_suppressions.push_back(s);
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000509 }
Stephen Hines6a211c52014-07-21 00:49:56 -0700510 {
511 bool old_is_freeing = thr->is_freeing;
512 thr->is_freeing = false;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800513 bool suppressed = OnReport(rep, pc_or_addr != 0);
Stephen Hines6a211c52014-07-21 00:49:56 -0700514 thr->is_freeing = old_is_freeing;
515 if (suppressed)
516 return false;
517 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000518 PrintReport(rep);
Dmitry Vyukovdbac0a42013-08-13 15:33:00 +0000519 ctx->nreported++;
520 if (flags()->halt_on_error)
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800521 Die();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000522 return true;
523}
524
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800525bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
526 ReadLock lock(&ctx->fired_suppressions_mtx);
Alexey Samsonov0a05e5f2013-06-14 11:18:58 +0000527 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800528 if (ctx->fired_suppressions[k].type != type)
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000529 continue;
Stephen Hines6d186232014-11-26 17:56:19 -0800530 for (uptr j = 0; j < trace.size; j++) {
Dmitry Vyukovf754eb52013-03-27 17:59:57 +0000531 FiredSuppression *s = &ctx->fired_suppressions[k];
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800532 if (trace.trace[j] == s->pc_or_addr) {
Dmitry Vyukovf754eb52013-03-27 17:59:57 +0000533 if (s->supp)
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800534 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000535 return true;
Dmitry Vyukovf754eb52013-03-27 17:59:57 +0000536 }
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000537 }
538 }
539 return false;
540}
541
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800542static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
543 ReadLock lock(&ctx->fired_suppressions_mtx);
Alexey Samsonov0a05e5f2013-06-14 11:18:58 +0000544 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800545 if (ctx->fired_suppressions[k].type != type)
Dmitry Vyukov39968332013-06-10 15:38:44 +0000546 continue;
547 FiredSuppression *s = &ctx->fired_suppressions[k];
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800548 if (addr == s->pc_or_addr) {
Dmitry Vyukov39968332013-06-10 15:38:44 +0000549 if (s->supp)
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800550 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
Dmitry Vyukov39968332013-06-10 15:38:44 +0000551 return true;
552 }
553 }
554 return false;
555}
556
Dmitry Vyukov32858662013-02-01 14:41:58 +0000557static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
558 Shadow s0(thr->racy_state[0]);
559 Shadow s1(thr->racy_state[1]);
560 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
561 if (!s0.IsAtomic() && !s1.IsAtomic())
562 return true;
563 if (s0.IsAtomic() && s1.IsFreed())
564 return true;
565 if (s1.IsAtomic() && thr->is_freeing)
566 return true;
567 return false;
568}
569
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000570void ReportRace(ThreadState *thr) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700571 CheckNoLocks(thr);
572
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700573 // Symbolizer makes lots of intercepted calls. If we try to process them,
574 // at best it will cause deadlocks on internal mutexes.
575 ScopedIgnoreInterceptors ignore;
576
Dmitry Vyukov8a326772012-11-07 16:14:12 +0000577 if (!flags()->report_bugs)
578 return;
Dmitry Vyukov32858662013-02-01 14:41:58 +0000579 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
580 return;
581
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000582 bool freed = false;
583 {
584 Shadow s(thr->racy_state[1]);
585 freed = s.GetFreedAndReset();
586 thr->racy_state[1] = s.raw();
587 }
588
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000589 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
590 uptr addr_min = 0;
591 uptr addr_max = 0;
592 {
593 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
594 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
595 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
596 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
597 addr_min = min(a0, a1);
598 addr_max = max(e0, e1);
599 if (IsExpectedReport(addr_min, addr_max - addr_min))
600 return;
601 }
602
Dmitry Vyukov0dc47b62013-03-21 15:37:39 +0000603 ReportType typ = ReportTypeRace;
Stephen Hines6d186232014-11-26 17:56:19 -0800604 if (thr->is_vptr_access && freed)
605 typ = ReportTypeVptrUseAfterFree;
606 else if (thr->is_vptr_access)
Dmitry Vyukov0dc47b62013-03-21 15:37:39 +0000607 typ = ReportTypeVptrRace;
608 else if (freed)
609 typ = ReportTypeUseAfterFree;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800610
611 if (IsFiredSuppression(ctx, typ, addr))
Dmitry Vyukov39968332013-06-10 15:38:44 +0000612 return;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800613
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000614 const uptr kMop = 2;
Stephen Hines6d186232014-11-26 17:56:19 -0800615 VarSizeStackTrace traces[kMop];
Dmitry Vyukov385542a2012-11-28 10:35:31 +0000616 const uptr toppc = TraceTopPC(thr);
Stephen Hines6d186232014-11-26 17:56:19 -0800617 ObtainCurrentStack(thr, toppc, &traces[0]);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800618 if (IsFiredSuppression(ctx, typ, traces[0]))
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000619 return;
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800620
621 // MutexSet is too large to live on stack.
622 Vector<u64> mset_buffer(MBlockScopedBuf);
623 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
624 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
625
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000626 Shadow s2(thr->racy_state[1]);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800627 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
628 if (IsFiredSuppression(ctx, typ, traces[1]))
Dmitry Vyukov39968332013-06-10 15:38:44 +0000629 return;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000630
631 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
632 return;
633
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800634 ThreadRegistryLock l0(ctx->thread_registry);
635 ScopedReport rep(typ);
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000636 for (uptr i = 0; i < kMop; i++) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000637 Shadow s(thr->racy_state[i]);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800638 rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000639 }
640
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000641 for (uptr i = 0; i < kMop; i++) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000642 FastState s(thr->racy_state[i]);
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +0000643 ThreadContext *tctx = static_cast<ThreadContext*>(
644 ctx->thread_registry->GetThreadLocked(s.tid()));
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000645 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
646 continue;
647 rep.AddThread(tctx);
648 }
649
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000650 rep.AddLocation(addr_min, addr_max - addr_min);
651
Stephen Hines86277eb2015-03-23 12:06:32 -0700652#ifndef SANITIZER_GO
Dmitry Vyukov84853112012-08-31 17:27:49 +0000653 { // NOLINT
654 Shadow s(thr->racy_state[1]);
655 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
656 rep.AddSleep(thr->last_sleep_stack_id);
657 }
658#endif
659
Stephen Hines6a211c52014-07-21 00:49:56 -0700660 if (!OutputReport(thr, rep))
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000661 return;
662
663 AddRacyStacks(thr, traces, addr_min, addr_max);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000664}
665
Dmitry Vyukov1da10562012-09-01 12:13:18 +0000666void PrintCurrentStack(ThreadState *thr, uptr pc) {
Stephen Hines6d186232014-11-26 17:56:19 -0800667 VarSizeStackTrace trace;
668 ObtainCurrentStack(thr, pc, &trace);
Dmitry Vyukov1da10562012-09-01 12:13:18 +0000669 PrintStack(SymbolizeStack(trace));
670}
671
Stephen Hines6d186232014-11-26 17:56:19 -0800672void PrintCurrentStackSlow(uptr pc) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700673#ifndef SANITIZER_GO
Stephen Hines6d186232014-11-26 17:56:19 -0800674 BufferedStackTrace *ptrace =
675 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
676 BufferedStackTrace();
677 ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
Dmitry Vyukov924047f2013-04-30 11:57:32 +0000678 for (uptr i = 0; i < ptrace->size / 2; i++) {
Stephen Hines6d186232014-11-26 17:56:19 -0800679 uptr tmp = ptrace->trace_buffer[i];
680 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
681 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
Dmitry Vyukov924047f2013-04-30 11:57:32 +0000682 }
Stephen Hines6d186232014-11-26 17:56:19 -0800683 PrintStack(SymbolizeStack(*ptrace));
Dmitry Vyukov793e7612013-01-29 14:20:12 +0000684#endif
685}
686
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000687} // namespace __tsan
Stephen Hines6d186232014-11-26 17:56:19 -0800688
689using namespace __tsan;
690
691extern "C" {
692SANITIZER_INTERFACE_ATTRIBUTE
693void __sanitizer_print_stack_trace() {
694 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
695}
696} // extern "C"