blob: 4df4db557a24648d23e3716db4508c87c295d88f [file] [log] [blame]
Alexey Samsonov603c4be2012-06-04 13:55:19 +00001//===-- tsan_rtl.cc -------------------------------------------------------===//
Kostya Serebryany7ac41482012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12// Main file (entry points) for the TSan run-time.
13//===----------------------------------------------------------------------===//
14
Dmitry Vyukovfce5bd42012-06-29 16:58:33 +000015#include "sanitizer_common/sanitizer_atomic.h"
Alexey Samsonov0969bcf2012-06-18 08:44:30 +000016#include "sanitizer_common/sanitizer_common.h"
Kostya Serebryany16e00752012-05-31 13:42:53 +000017#include "sanitizer_common/sanitizer_libc.h"
Dmitry Vyukov84853112012-08-31 17:27:49 +000018#include "sanitizer_common/sanitizer_stackdepot.h"
Alexey Samsonov47b16342012-06-07 09:50:16 +000019#include "sanitizer_common/sanitizer_placement_new.h"
Alexey Samsonov8cc1f812012-09-06 08:48:43 +000020#include "sanitizer_common/sanitizer_symbolizer.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000021#include "tsan_defs.h"
22#include "tsan_platform.h"
23#include "tsan_rtl.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000024#include "tsan_mman.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000025#include "tsan_suppressions.h"
Dmitry Vyukova38e40f2013-03-21 07:02:36 +000026#include "tsan_symbolize.h"
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -070027#include "ubsan/ubsan_init.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +000028
Stephen Hines6a211c52014-07-21 00:49:56 -070029#ifdef __SSE3__
30// <emmintrin.h> transitively includes <stdlib.h>,
31// and it's prohibited to include std headers into tsan runtime.
32// So we do this dirty trick.
33#define _MM_MALLOC_H_INCLUDED
34#define __MM_MALLOC_H
35#include <emmintrin.h>
36typedef __m128i m128;
37#endif
38
Dmitry Vyukovadfb6502012-05-22 18:07:45 +000039volatile int __tsan_resumed = 0;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000040
41extern "C" void __tsan_resume() {
Dmitry Vyukovadfb6502012-05-22 18:07:45 +000042 __tsan_resumed = 1;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000043}
44
45namespace __tsan {
46
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080047#if !defined(SANITIZER_GO) && !SANITIZER_MAC
Alexey Samsonov0a4c9062012-06-05 13:50:57 +000048THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
Dmitry Vyukovb78caa62012-07-05 16:18:28 +000049#endif
Alexey Samsonov0a4c9062012-06-05 13:50:57 +000050static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
Stephen Hines2d1fdb22014-05-28 23:58:16 -070051Context *ctx;
Kostya Serebryany7ac41482012-05-10 13:48:04 +000052
Dmitry Vyukov22881ec2013-01-30 09:24:00 +000053// Can be overriden by a front-end.
Dmitry Vyukov6a135be2013-10-14 06:31:03 +000054#ifdef TSAN_EXTERNAL_HOOKS
55bool OnFinalize(bool failed);
Stephen Hines2d1fdb22014-05-28 23:58:16 -070056void OnInitialize();
Dmitry Vyukov6a135be2013-10-14 06:31:03 +000057#else
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080058SANITIZER_WEAK_CXX_DEFAULT_IMPL
59bool OnFinalize(bool failed) {
Dmitry Vyukov22881ec2013-01-30 09:24:00 +000060 return failed;
61}
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -080062SANITIZER_WEAK_CXX_DEFAULT_IMPL
63void OnInitialize() {}
Dmitry Vyukov6a135be2013-10-14 06:31:03 +000064#endif
Dmitry Vyukov22881ec2013-01-30 09:24:00 +000065
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +000066static char thread_registry_placeholder[sizeof(ThreadRegistry)];
67
68static ThreadContextBase *CreateThreadContext(u32 tid) {
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +000069 // Map thread trace when context is created.
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -070070 char name[50];
71 internal_snprintf(name, sizeof(name), "trace %u", tid);
72 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
Stephen Hines86277eb2015-03-23 12:06:32 -070073 const uptr hdr = GetThreadTraceHeader(tid);
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -070074 internal_snprintf(name, sizeof(name), "trace header %u", tid);
75 MapThreadTrace(hdr, sizeof(Trace), name);
Stephen Hines86277eb2015-03-23 12:06:32 -070076 new((void*)hdr) Trace();
77 // We are going to use only a small part of the trace with the default
78 // value of history_size. However, the constructor writes to the whole trace.
79 // Unmap the unused part.
80 uptr hdr_end = hdr + sizeof(Trace);
81 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
82 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
83 if (hdr_end < hdr + sizeof(Trace))
84 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
Dmitry Vyukov9743d742013-03-20 10:31:53 +000085 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +000086 return new(mem) ThreadContext(tid);
87}
88
Stephen Hines86277eb2015-03-23 12:06:32 -070089#ifndef SANITIZER_GO
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +000090static const u32 kThreadQuarantineSize = 16;
91#else
92static const u32 kThreadQuarantineSize = 64;
93#endif
94
Kostya Serebryany7ac41482012-05-10 13:48:04 +000095Context::Context()
96 : initialized()
97 , report_mtx(MutexTypeReport, StatMtxReport)
98 , nreported()
99 , nmissed_expected()
Alexey Samsonov2bbd8be2013-03-15 13:48:44 +0000100 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700101 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800102 , racy_mtx(MutexTypeRacy, StatMtxRacy)
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000103 , racy_stacks(MBlockRacyStacks)
Dmitry Vyukov158c6ac2012-10-05 15:51:32 +0000104 , racy_addresses(MBlockRacyAddresses)
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800105 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
Alexey Samsonov0a05e5f2013-06-14 11:18:58 +0000106 , fired_suppressions(8) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000107}
108
109// The objects are allocated in TLS, so one may rely on zero-initialization.
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000110ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700111 unsigned reuse_count,
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000112 uptr stk_addr, uptr stk_size,
113 uptr tls_addr, uptr tls_size)
114 : fast_state(tid, epoch)
115 // Do not touch these, rely on zero initialization,
116 // they may be accessed before the ctor.
Dmitry Vyukovdc563c02013-05-21 08:12:35 +0000117 // , ignore_reads_and_writes()
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700118 // , ignore_interceptors()
119 , clock(tid, reuse_count)
Stephen Hines86277eb2015-03-23 12:06:32 -0700120#ifndef SANITIZER_GO
Dmitry Vyukov8b30c252013-03-25 10:10:44 +0000121 , jmp_bufs(MBlockJmpBuf)
122#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000123 , tid(tid)
Dmitry Vyukovff35f1d2012-08-30 13:02:30 +0000124 , unique_id(unique_id)
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000125 , stk_addr(stk_addr)
126 , stk_size(stk_size)
127 , tls_addr(tls_addr)
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700128 , tls_size(tls_size)
Stephen Hines86277eb2015-03-23 12:06:32 -0700129#ifndef SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700130 , last_sleep_clock(tid)
131#endif
132{
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000133}
134
Stephen Hines86277eb2015-03-23 12:06:32 -0700135#ifndef SANITIZER_GO
Dmitry Vyukova38e40f2013-03-21 07:02:36 +0000136static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000137 uptr n_threads;
138 uptr n_running_threads;
139 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
Dmitry Vyukova38e40f2013-03-21 07:02:36 +0000140 InternalScopedBuffer<char> buf(4096);
Stephen Hines6a211c52014-07-21 00:49:56 -0700141 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700142 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
Dmitry Vyukov26127732012-05-22 11:33:03 +0000143}
144
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000145static void BackgroundThread(void *arg) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700146 // This is a non-initialized non-user thread, nothing to see here.
147 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
148 // enabled even when the thread function exits (e.g. during pthread thread
149 // shutdown code).
150 cur_thread()->ignore_interceptors++;
Dmitry Vyukovf63dde32013-03-21 13:01:50 +0000151 const u64 kMs2Ns = 1000 * 1000;
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000152
153 fd_t mprof_fd = kInvalidFd;
154 if (flags()->profile_memory && flags()->profile_memory[0]) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700155 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
156 mprof_fd = 1;
157 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
158 mprof_fd = 2;
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000159 } else {
Stephen Hines86277eb2015-03-23 12:06:32 -0700160 InternalScopedString filename(kMaxPathLength);
161 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700162 fd_t fd = OpenFile(filename.data(), WrOnly);
163 if (fd == kInvalidFd) {
Stephen Hines6a211c52014-07-21 00:49:56 -0700164 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
165 &filename[0]);
166 } else {
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700167 mprof_fd = fd;
Stephen Hines6a211c52014-07-21 00:49:56 -0700168 }
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000169 }
Dmitry Vyukov26127732012-05-22 11:33:03 +0000170 }
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000171
172 u64 last_flush = NanoTime();
Dmitry Vyukov92b54792013-10-03 17:14:35 +0000173 uptr last_rss = 0;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700174 for (int i = 0;
175 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
176 i++) {
177 SleepForMillis(100);
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000178 u64 now = NanoTime();
179
Dmitry Vyukova38e40f2013-03-21 07:02:36 +0000180 // Flush memory if requested.
Dmitry Vyukov92b54792013-10-03 17:14:35 +0000181 if (flags()->flush_memory_ms > 0) {
Dmitry Vyukovf63dde32013-03-21 13:01:50 +0000182 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
Stephen Hines6d186232014-11-26 17:56:19 -0800183 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000184 FlushShadowMemory();
185 last_flush = NanoTime();
186 }
187 }
Stephen Hines6a211c52014-07-21 00:49:56 -0700188 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
Stephen Hines6d186232014-11-26 17:56:19 -0800189 if (flags()->memory_limit_mb > 0) {
Dmitry Vyukov92b54792013-10-03 17:14:35 +0000190 uptr rss = GetRSS();
191 uptr limit = uptr(flags()->memory_limit_mb) << 20;
Stephen Hines6d186232014-11-26 17:56:19 -0800192 VPrintf(1, "ThreadSanitizer: memory flush check"
193 " RSS=%llu LAST=%llu LIMIT=%llu\n",
194 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
Dmitry Vyukov92b54792013-10-03 17:14:35 +0000195 if (2 * rss > limit + last_rss) {
Stephen Hines6d186232014-11-26 17:56:19 -0800196 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
Dmitry Vyukov92b54792013-10-03 17:14:35 +0000197 FlushShadowMemory();
198 rss = GetRSS();
Stephen Hines6d186232014-11-26 17:56:19 -0800199 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
Dmitry Vyukov92b54792013-10-03 17:14:35 +0000200 }
201 last_rss = rss;
202 }
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000203
Dmitry Vyukova38e40f2013-03-21 07:02:36 +0000204 // Write memory profile if requested.
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000205 if (mprof_fd != kInvalidFd)
Dmitry Vyukova38e40f2013-03-21 07:02:36 +0000206 MemoryProfiler(ctx, mprof_fd, i);
207
Dmitry Vyukovf63dde32013-03-21 13:01:50 +0000208 // Flush symbolizer cache if requested.
209 if (flags()->flush_symbolizer_ms > 0) {
210 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
211 memory_order_relaxed);
212 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
213 Lock l(&ctx->report_mtx);
Alexey Samsonov7ed46ff2013-04-05 07:30:29 +0000214 SpinMutexLock l2(&CommonSanitizerReportMutex);
Dmitry Vyukovf63dde32013-03-21 13:01:50 +0000215 SymbolizeFlush();
216 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
217 }
Dmitry Vyukova38e40f2013-03-21 07:02:36 +0000218 }
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000219 }
Dmitry Vyukov26127732012-05-22 11:33:03 +0000220}
221
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700222static void StartBackgroundThread() {
223 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
224}
225
Stephen Hines86277eb2015-03-23 12:06:32 -0700226#ifndef __mips__
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700227static void StopBackgroundThread() {
228 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
229 internal_join_thread(ctx->background_thread);
230 ctx->background_thread = 0;
231}
232#endif
Stephen Hines86277eb2015-03-23 12:06:32 -0700233#endif
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700234
Dmitry Vyukov7ac33ac2013-03-18 15:49:07 +0000235void DontNeedShadowFor(uptr addr, uptr size) {
236 uptr shadow_beg = MemToShadow(addr);
237 uptr shadow_end = MemToShadow(addr + size);
238 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
239}
240
Dmitry Vyukova05fcc12012-11-06 16:00:16 +0000241void MapShadow(uptr addr, uptr size) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700242 // Global data is not 64K aligned, but there are no adjacent mappings,
243 // so we can get away with unaligned mapping.
244 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700245 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
Stephen Hines6a211c52014-07-21 00:49:56 -0700246
247 // Meta shadow is 2:1, so tread carefully.
248 static bool data_mapped = false;
249 static uptr mapped_meta_end = 0;
250 uptr meta_begin = (uptr)MemToMeta(addr);
251 uptr meta_end = (uptr)MemToMeta(addr + size);
252 meta_begin = RoundDownTo(meta_begin, 64 << 10);
253 meta_end = RoundUpTo(meta_end, 64 << 10);
254 if (!data_mapped) {
255 // First call maps data+bss.
256 data_mapped = true;
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700257 MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
Stephen Hines6a211c52014-07-21 00:49:56 -0700258 } else {
259 // Mapping continous heap.
260 // Windows wants 64K alignment.
261 meta_begin = RoundDownTo(meta_begin, 64 << 10);
262 meta_end = RoundUpTo(meta_end, 64 << 10);
263 if (meta_end <= mapped_meta_end)
264 return;
265 if (meta_begin < mapped_meta_end)
266 meta_begin = mapped_meta_end;
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700267 MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
Stephen Hines6a211c52014-07-21 00:49:56 -0700268 mapped_meta_end = meta_end;
269 }
270 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
271 addr, addr+size, meta_begin, meta_end);
Dmitry Vyukova05fcc12012-11-06 16:00:16 +0000272}
273
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700274void MapThreadTrace(uptr addr, uptr size, const char *name) {
Dmitry Vyukovdae12512012-12-21 12:30:52 +0000275 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800276 CHECK_GE(addr, TraceMemBeg());
277 CHECK_LE(addr + size, TraceMemEnd());
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700278 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700279 uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
Dmitry Vyukov01a7ce82013-10-16 15:35:12 +0000280 if (addr1 != addr) {
281 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
282 addr, size, addr1);
Dmitry Vyukov6535c312012-12-13 08:14:02 +0000283 Die();
284 }
285}
286
Stephen Hines6d186232014-11-26 17:56:19 -0800287static void CheckShadowMapping() {
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800288 uptr beg, end;
289 for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
Stephen Hines6d186232014-11-26 17:56:19 -0800290 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
291 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
292 for (int x = -1; x <= 1; x++) {
293 const uptr p = p0 + x;
294 if (p < beg || p >= end)
295 continue;
296 const uptr s = MemToShadow(p);
Stephen Hines86277eb2015-03-23 12:06:32 -0700297 const uptr m = (uptr)MemToMeta(p);
298 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
Stephen Hines6d186232014-11-26 17:56:19 -0800299 CHECK(IsAppMem(p));
300 CHECK(IsShadowMem(s));
301 CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
Stephen Hines6d186232014-11-26 17:56:19 -0800302 CHECK(IsMetaMem(m));
303 }
304 }
305 }
306}
307
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000308void Initialize(ThreadState *thr) {
309 // Thread safe because done before all threads exist.
310 static bool is_initialized = false;
311 if (is_initialized)
312 return;
313 is_initialized = true;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700314 // We are not ready to handle interceptors yet.
315 ScopedIgnoreInterceptors ignore;
Kostya Serebryany859778a2013-01-31 14:11:21 +0000316 SanitizerToolName = "ThreadSanitizer";
Alexey Samsonov591616d2012-09-11 09:44:48 +0000317 // Install tool-specific callbacks in sanitizer_common.
318 SetCheckFailedCallback(TsanCheckFailed);
319
Stephen Hines6d186232014-11-26 17:56:19 -0800320 ctx = new(ctx_placeholder) Context;
321 const char *options = GetEnv(kTsanOptionsEnv);
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700322 CacheBinaryName();
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800323 InitializeFlags(&ctx->flags, options);
324 InitializePlatformEarly();
Stephen Hines86277eb2015-03-23 12:06:32 -0700325#ifndef SANITIZER_GO
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800326 // Re-exec ourselves if we need to set additional env or command line args.
327 MaybeReexec();
328
Dmitry Vyukov2e870512012-08-15 15:35:15 +0000329 InitializeAllocator();
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800330 ReplaceSystemMalloc();
Dmitry Vyukovbbbb20b2012-08-16 19:36:45 +0000331#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000332 InitializeInterceptors();
Stephen Hines6d186232014-11-26 17:56:19 -0800333 CheckShadowMapping();
334 InitializePlatform();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000335 InitializeMutex();
336 InitializeDynamicAnnotations();
Stephen Hines86277eb2015-03-23 12:06:32 -0700337#ifndef SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000338 InitializeShadowMemory();
Dmitry Vyukova05fcc12012-11-06 16:00:16 +0000339#endif
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000340 // Setup correct file descriptor for error reports.
Stephen Hines6d186232014-11-26 17:56:19 -0800341 __sanitizer_set_report_path(common_flags()->log_path);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000342 InitializeSuppressions();
Stephen Hines86277eb2015-03-23 12:06:32 -0700343#ifndef SANITIZER_GO
Dmitry Vyukov4af0f212013-10-03 13:37:17 +0000344 InitializeLibIgnore();
Stephen Hines6d186232014-11-26 17:56:19 -0800345 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
Stephen Hines86277eb2015-03-23 12:06:32 -0700346 // On MIPS, TSan initialization is run before
347 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
348 // new threads.
349#ifndef __mips__
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700350 StartBackgroundThread();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700351 SetSandboxingCallback(StopBackgroundThread);
352#endif
Stephen Hines86277eb2015-03-23 12:06:32 -0700353#endif
Stephen Hines6d186232014-11-26 17:56:19 -0800354 if (common_flags()->detect_deadlocks)
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700355 ctx->dd = DDetector::Create(flags());
Alexey Samsonov8cc1f812012-09-06 08:48:43 +0000356
Stephen Hines6d186232014-11-26 17:56:19 -0800357 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
358 (int)internal_getpid());
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000359
360 // Initialize thread 0.
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000361 int tid = ThreadCreate(thr, 0, 0, true);
362 CHECK_EQ(tid, 0);
Peter Collingbourne0b694fc2013-05-17 16:56:53 +0000363 ThreadStart(thr, tid, internal_getpid());
Pirama Arumuga Nainarcdce50b2015-07-01 12:26:56 -0700364#if TSAN_CONTAINS_UBSAN
365 __ubsan::InitAsPlugin();
366#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000367 ctx->initialized = true;
368
Dmitry Vyukovadfb6502012-05-22 18:07:45 +0000369 if (flags()->stop_on_start) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000370 Printf("ThreadSanitizer is suspended at startup (pid %d)."
Dmitry Vyukovadfb6502012-05-22 18:07:45 +0000371 " Call __tsan_resume().\n",
Peter Collingbourne0b694fc2013-05-17 16:56:53 +0000372 (int)internal_getpid());
Alexey Samsonovba5e9962013-01-30 07:45:58 +0000373 while (__tsan_resumed == 0) {}
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000374 }
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700375
376 OnInitialize();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000377}
378
379int Finalize(ThreadState *thr) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000380 bool failed = false;
381
Dmitry Vyukov54e0a9a2012-11-07 16:41:57 +0000382 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
383 SleepForMillis(flags()->atexit_sleep_ms);
384
Dmitry Vyukovd0a51c02012-10-02 12:07:16 +0000385 // Wait for pending reports.
386 ctx->report_mtx.Lock();
Alexey Samsonov7ed46ff2013-04-05 07:30:29 +0000387 CommonSanitizerReportMutex.Lock();
388 CommonSanitizerReportMutex.Unlock();
Dmitry Vyukovd0a51c02012-10-02 12:07:16 +0000389 ctx->report_mtx.Unlock();
390
Stephen Hines86277eb2015-03-23 12:06:32 -0700391#ifndef SANITIZER_GO
392 if (Verbosity()) AllocatorPrintStats();
Dmitry Vyukovbdd844c2013-01-24 09:08:03 +0000393#endif
394
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000395 ThreadFinalize(thr);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000396
397 if (ctx->nreported) {
398 failed = true;
Stephen Hines86277eb2015-03-23 12:06:32 -0700399#ifndef SANITIZER_GO
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000400 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
Dmitry Vyukovb3b21232012-10-07 14:21:24 +0000401#else
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000402 Printf("Found %d data race(s)\n", ctx->nreported);
Dmitry Vyukovb3b21232012-10-07 14:21:24 +0000403#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000404 }
405
406 if (ctx->nmissed_expected) {
407 failed = true;
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000408 Printf("ThreadSanitizer: missed %d expected races\n",
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000409 ctx->nmissed_expected);
410 }
411
Stephen Hines6d186232014-11-26 17:56:19 -0800412 if (common_flags()->print_suppressions)
Dmitry Vyukovf754eb52013-03-27 17:59:57 +0000413 PrintMatchedSuppressions();
Stephen Hines86277eb2015-03-23 12:06:32 -0700414#ifndef SANITIZER_GO
Dmitry Vyukov0fd908c2013-03-28 16:21:19 +0000415 if (flags()->print_benign)
416 PrintMatchedBenignRaces();
417#endif
Dmitry Vyukovf754eb52013-03-27 17:59:57 +0000418
Dmitry Vyukov22881ec2013-01-30 09:24:00 +0000419 failed = OnFinalize(failed);
Dmitry Vyukov22881ec2013-01-30 09:24:00 +0000420
Stephen Hines86277eb2015-03-23 12:06:32 -0700421#if TSAN_COLLECT_STATS
Dmitry Vyukov08adb182012-11-13 13:53:43 +0000422 StatAggregate(ctx->stat, thr->stat);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000423 StatOutput(ctx->stat);
Stephen Hines86277eb2015-03-23 12:06:32 -0700424#endif
425
Pirama Arumuga Nainar799172d2016-03-03 15:50:30 -0800426 return failed ? common_flags()->exitcode : 0;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000427}
428
Stephen Hines86277eb2015-03-23 12:06:32 -0700429#ifndef SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700430void ForkBefore(ThreadState *thr, uptr pc) {
431 ctx->thread_registry->Lock();
432 ctx->report_mtx.Lock();
433}
434
435void ForkParentAfter(ThreadState *thr, uptr pc) {
436 ctx->report_mtx.Unlock();
437 ctx->thread_registry->Unlock();
438}
439
440void ForkChildAfter(ThreadState *thr, uptr pc) {
441 ctx->report_mtx.Unlock();
442 ctx->thread_registry->Unlock();
443
444 uptr nthread = 0;
445 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
446 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
447 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
448 if (nthread == 1) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700449 StartBackgroundThread();
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700450 } else {
451 // We've just forked a multi-threaded process. We cannot reasonably function
452 // after that (some mutexes may be locked before fork). So just enable
453 // ignores for everything in the hope that we will exec soon.
454 ctx->after_multithreaded_fork = true;
455 thr->ignore_interceptors++;
456 ThreadIgnoreBegin(thr, pc);
457 ThreadIgnoreSyncBegin(thr, pc);
458 }
459}
460#endif
461
Stephen Hines86277eb2015-03-23 12:06:32 -0700462#ifdef SANITIZER_GO
Stephen Hines6a211c52014-07-21 00:49:56 -0700463NOINLINE
464void GrowShadowStack(ThreadState *thr) {
465 const int sz = thr->shadow_stack_end - thr->shadow_stack;
466 const int newsz = 2 * sz;
467 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
468 newsz * sizeof(uptr));
469 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
470 internal_free(thr->shadow_stack);
471 thr->shadow_stack = newstack;
472 thr->shadow_stack_pos = newstack + sz;
473 thr->shadow_stack_end = newstack + newsz;
474}
475#endif
476
Dmitry Vyukov84853112012-08-31 17:27:49 +0000477u32 CurrentStackId(ThreadState *thr, uptr pc) {
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700478 if (!thr->is_inited) // May happen during bootstrap.
Dmitry Vyukov84853112012-08-31 17:27:49 +0000479 return 0;
Stephen Hines6a211c52014-07-21 00:49:56 -0700480 if (pc != 0) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700481#ifndef SANITIZER_GO
Stephen Hines6a211c52014-07-21 00:49:56 -0700482 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
483#else
484 if (thr->shadow_stack_pos == thr->shadow_stack_end)
485 GrowShadowStack(thr);
486#endif
Dmitry Vyukov84853112012-08-31 17:27:49 +0000487 thr->shadow_stack_pos[0] = pc;
488 thr->shadow_stack_pos++;
489 }
Stephen Hines6d186232014-11-26 17:56:19 -0800490 u32 id = StackDepotPut(
491 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
Stephen Hines6a211c52014-07-21 00:49:56 -0700492 if (pc != 0)
Dmitry Vyukov84853112012-08-31 17:27:49 +0000493 thr->shadow_stack_pos--;
494 return id;
495}
496
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000497void TraceSwitch(ThreadState *thr) {
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000498 thr->nomalloc++;
Dmitry Vyukov9743d742013-03-20 10:31:53 +0000499 Trace *thr_trace = ThreadTrace(thr->tid);
500 Lock l(&thr_trace->mtx);
Dmitry Vyukov0415ac02012-12-04 12:19:53 +0000501 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
Dmitry Vyukov9743d742013-03-20 10:31:53 +0000502 TraceHeader *hdr = &thr_trace->headers[trace];
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000503 hdr->epoch0 = thr->fast_state.epoch();
Stephen Hines6d186232014-11-26 17:56:19 -0800504 ObtainCurrentStack(thr, 0, &hdr->stack0);
Dmitry Vyukovad9da372012-12-06 12:16:15 +0000505 hdr->mset0 = thr->mset;
Dmitry Vyukov9ad7c322012-06-22 11:08:55 +0000506 thr->nomalloc--;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000507}
508
Dmitry Vyukov9743d742013-03-20 10:31:53 +0000509Trace *ThreadTrace(int tid) {
510 return (Trace*)GetThreadTraceHeader(tid);
511}
512
Dmitry Vyukov385542a2012-11-28 10:35:31 +0000513uptr TraceTopPC(ThreadState *thr) {
514 Event *events = (Event*)GetThreadTrace(thr->tid);
Dmitry Vyukovd698edc2012-11-28 12:19:50 +0000515 uptr pc = events[thr->fast_state.GetTracePos()];
Dmitry Vyukov385542a2012-11-28 10:35:31 +0000516 return pc;
517}
518
Dmitry Vyukovd698edc2012-11-28 12:19:50 +0000519uptr TraceSize() {
520 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
521}
522
Dmitry Vyukov0415ac02012-12-04 12:19:53 +0000523uptr TraceParts() {
524 return TraceSize() / kTracePartSize;
525}
526
Stephen Hines86277eb2015-03-23 12:06:32 -0700527#ifndef SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000528extern "C" void __tsan_trace_switch() {
529 TraceSwitch(cur_thread());
530}
531
532extern "C" void __tsan_report_race() {
533 ReportRace(cur_thread());
534}
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000535#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000536
537ALWAYS_INLINE
Timur Iskhodzhanov43c36e42013-03-28 22:23:03 +0000538Shadow LoadShadow(u64 *p) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000539 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
540 return Shadow(raw);
541}
542
543ALWAYS_INLINE
Timur Iskhodzhanov43c36e42013-03-28 22:23:03 +0000544void StoreShadow(u64 *sp, u64 s) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000545 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
546}
547
548ALWAYS_INLINE
Timur Iskhodzhanov43c36e42013-03-28 22:23:03 +0000549void StoreIfNotYetStored(u64 *sp, u64 *s) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000550 StoreShadow(sp, *s);
551 *s = 0;
552}
553
Stephen Hines6a211c52014-07-21 00:49:56 -0700554ALWAYS_INLINE
555void HandleRace(ThreadState *thr, u64 *shadow_mem,
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000556 Shadow cur, Shadow old) {
557 thr->racy_state[0] = cur.raw();
558 thr->racy_state[1] = old.raw();
559 thr->racy_shadow_addr = shadow_mem;
Stephen Hines86277eb2015-03-23 12:06:32 -0700560#ifndef SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000561 HACKY_CALL(__tsan_report_race);
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000562#else
563 ReportRace(thr);
564#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000565}
566
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000567static inline bool HappensBefore(Shadow old, ThreadState *thr) {
Dmitry Vyukovc8f0a002012-11-30 20:02:11 +0000568 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000569}
570
Stephen Hines6a211c52014-07-21 00:49:56 -0700571ALWAYS_INLINE
572void MemoryAccessImpl1(ThreadState *thr, uptr addr,
Dmitry Vyukov334553e2013-02-01 09:42:06 +0000573 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000574 u64 *shadow_mem, Shadow cur) {
575 StatInc(thr, StatMop);
576 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
577 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
578
579 // This potentially can live in an MMX/SSE scratch register.
580 // The required intrinsics are:
581 // __m128i _mm_move_epi64(__m128i*);
582 // _mm_storel_epi64(u64*, __m128i);
583 u64 store_word = cur.raw();
584
585 // scan all the shadow values and dispatch to 4 categories:
586 // same, replace, candidate and race (see comments below).
587 // we consider only 3 cases regarding access sizes:
588 // equal, intersect and not intersect. initially I considered
589 // larger and smaller as well, it allowed to replace some
590 // 'candidates' with 'same' or 'replace', but I think
591 // it's just not worth it (performance- and complexity-wise).
592
Dmitry Vyukov286c9142013-03-20 11:22:03 +0000593 Shadow old(0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700594
595 // It release mode we manually unroll the loop,
596 // because empirically gcc generates better code this way.
597 // However, we can't afford unrolling in debug mode, because the function
598 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
599 // threads, which is not enough for the unrolled loop.
600#if SANITIZER_DEBUG
601 for (int idx = 0; idx < 4; idx++) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000602#include "tsan_update_shadow_word_inl.h"
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000603 }
Stephen Hines86277eb2015-03-23 12:06:32 -0700604#else
605 int idx = 0;
606#include "tsan_update_shadow_word_inl.h"
607 idx = 1;
608#include "tsan_update_shadow_word_inl.h"
609 idx = 2;
610#include "tsan_update_shadow_word_inl.h"
611 idx = 3;
612#include "tsan_update_shadow_word_inl.h"
613#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000614
615 // we did not find any races and had already stored
616 // the current access info, so we are done
617 if (LIKELY(store_word == 0))
618 return;
619 // choose a random candidate slot and replace it
620 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
621 StatInc(thr, StatShadowReplace);
622 return;
623 RACE:
624 HandleRace(thr, shadow_mem, cur, old);
625 return;
626}
627
Dmitry Vyukov8ecd0e52013-04-30 11:56:56 +0000628void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
629 int size, bool kAccessIsWrite, bool kIsAtomic) {
630 while (size) {
631 int size1 = 1;
632 int kAccessSizeLog = kSizeLog1;
Stephen Hines6d186232014-11-26 17:56:19 -0800633 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
Dmitry Vyukov8ecd0e52013-04-30 11:56:56 +0000634 size1 = 8;
635 kAccessSizeLog = kSizeLog8;
Stephen Hines6d186232014-11-26 17:56:19 -0800636 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
Dmitry Vyukov8ecd0e52013-04-30 11:56:56 +0000637 size1 = 4;
638 kAccessSizeLog = kSizeLog4;
Stephen Hines6d186232014-11-26 17:56:19 -0800639 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
Dmitry Vyukov8ecd0e52013-04-30 11:56:56 +0000640 size1 = 2;
641 kAccessSizeLog = kSizeLog2;
642 }
643 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
644 addr += size1;
645 size -= size1;
646 }
647}
648
Stephen Hines6a211c52014-07-21 00:49:56 -0700649ALWAYS_INLINE
650bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
651 Shadow cur(a);
652 for (uptr i = 0; i < kShadowCnt; i++) {
653 Shadow old(LoadShadow(&s[i]));
654 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
655 old.TidWithIgnore() == cur.TidWithIgnore() &&
656 old.epoch() > sync_epoch &&
657 old.IsAtomic() == cur.IsAtomic() &&
658 old.IsRead() <= cur.IsRead())
659 return true;
660 }
661 return false;
662}
663
Stephen Hines86277eb2015-03-23 12:06:32 -0700664#if defined(__SSE3__)
Stephen Hines6a211c52014-07-21 00:49:56 -0700665#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
666 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
667 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
668ALWAYS_INLINE
669bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
670 // This is an optimized version of ContainsSameAccessSlow.
671 // load current access into access[0:63]
672 const m128 access = _mm_cvtsi64_si128(a);
673 // duplicate high part of access in addr0:
674 // addr0[0:31] = access[32:63]
675 // addr0[32:63] = access[32:63]
676 // addr0[64:95] = access[32:63]
677 // addr0[96:127] = access[32:63]
678 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
679 // load 4 shadow slots
680 const m128 shadow0 = _mm_load_si128((__m128i*)s);
681 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
682 // load high parts of 4 shadow slots into addr_vect:
683 // addr_vect[0:31] = shadow0[32:63]
684 // addr_vect[32:63] = shadow0[96:127]
685 // addr_vect[64:95] = shadow1[32:63]
686 // addr_vect[96:127] = shadow1[96:127]
687 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
688 if (!is_write) {
689 // set IsRead bit in addr_vect
690 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
691 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
692 addr_vect = _mm_or_si128(addr_vect, rw_mask);
693 }
694 // addr0 == addr_vect?
695 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
696 // epoch1[0:63] = sync_epoch
697 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
698 // epoch[0:31] = sync_epoch[0:31]
699 // epoch[32:63] = sync_epoch[0:31]
700 // epoch[64:95] = sync_epoch[0:31]
701 // epoch[96:127] = sync_epoch[0:31]
702 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
703 // load low parts of shadow cell epochs into epoch_vect:
704 // epoch_vect[0:31] = shadow0[0:31]
705 // epoch_vect[32:63] = shadow0[64:95]
706 // epoch_vect[64:95] = shadow1[0:31]
707 // epoch_vect[96:127] = shadow1[64:95]
708 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
709 // epoch_vect >= sync_epoch?
710 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
711 // addr_res & epoch_res
712 const m128 res = _mm_and_si128(addr_res, epoch_res);
713 // mask[0] = res[7]
714 // mask[1] = res[15]
715 // ...
716 // mask[15] = res[127]
717 const int mask = _mm_movemask_epi8(res);
718 return mask != 0;
719}
720#endif
721
722ALWAYS_INLINE
723bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700724#if defined(__SSE3__)
Stephen Hines6a211c52014-07-21 00:49:56 -0700725 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
Stephen Hines6d186232014-11-26 17:56:19 -0800726 // NOTE: this check can fail if the shadow is concurrently mutated
Stephen Hines86277eb2015-03-23 12:06:32 -0700727 // by other threads. But it still can be useful if you modify
728 // ContainsSameAccessFast and want to ensure that it's not completely broken.
729 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
Stephen Hines6a211c52014-07-21 00:49:56 -0700730 return res;
731#else
732 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
733#endif
734}
735
Kostya Serebryanyd475aa82013-03-29 09:44:16 +0000736ALWAYS_INLINE USED
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000737void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
Dmitry Vyukov334553e2013-02-01 09:42:06 +0000738 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000739 u64 *shadow_mem = (u64*)MemToShadow(addr);
Dmitry Vyukov68230a12012-12-07 19:23:59 +0000740 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
Alexey Samsonove9541012012-06-06 13:11:29 +0000741 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000742 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
743 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
Alexey Samsonove9541012012-06-06 13:11:29 +0000744 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
745 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
Stephen Hines86277eb2015-03-23 12:06:32 -0700746#if SANITIZER_DEBUG
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000747 if (!IsAppMem(addr)) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000748 Printf("Access to non app mem %zx\n", addr);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000749 DCHECK(IsAppMem(addr));
750 }
751 if (!IsShadowMem((uptr)shadow_mem)) {
Alexey Samsonovb1fe3022012-11-02 12:17:51 +0000752 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000753 DCHECK(IsShadowMem((uptr)shadow_mem));
754 }
755#endif
756
Stephen Hines6a211c52014-07-21 00:49:56 -0700757 if (kCppMode && *shadow_mem == kShadowRodata) {
Dmitry Vyukov82dbc512013-03-20 13:21:50 +0000758 // Access to .rodata section, no races here.
759 // Measurements show that it can be 10-20% of all memory accesses.
760 StatInc(thr, StatMop);
761 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
762 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
763 StatInc(thr, StatMopRodata);
764 return;
765 }
766
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000767 FastState fast_state = thr->fast_state;
Stephen Hines6a211c52014-07-21 00:49:56 -0700768 if (fast_state.GetIgnoreBit()) {
769 StatInc(thr, StatMop);
770 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
771 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
772 StatInc(thr, StatMopIgnored);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000773 return;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700774 }
775
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000776 Shadow cur(fast_state);
777 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
778 cur.SetWrite(kAccessIsWrite);
Dmitry Vyukov334553e2013-02-01 09:42:06 +0000779 cur.SetAtomic(kIsAtomic);
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000780
Stephen Hines6a211c52014-07-21 00:49:56 -0700781 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
782 thr->fast_synch_epoch, kAccessIsWrite))) {
783 StatInc(thr, StatMop);
784 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
785 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
786 StatInc(thr, StatMopSame);
787 return;
788 }
789
790 if (kCollectHistory) {
791 fast_state.IncrementEpoch();
792 thr->fast_state = fast_state;
793 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
794 cur.IncrementEpoch();
795 }
796
797 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
798 shadow_mem, cur);
799}
800
801// Called by MemoryAccessRange in tsan_rtl_thread.cc
802ALWAYS_INLINE USED
803void MemoryAccessImpl(ThreadState *thr, uptr addr,
804 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
805 u64 *shadow_mem, Shadow cur) {
806 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
807 thr->fast_synch_epoch, kAccessIsWrite))) {
808 StatInc(thr, StatMop);
809 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
810 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
811 StatInc(thr, StatMopSame);
812 return;
813 }
814
815 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000816 shadow_mem, cur);
817}
818
819static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
820 u64 val) {
Dmitry Vyukov74172de2013-03-18 16:56:48 +0000821 (void)thr;
822 (void)pc;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000823 if (size == 0)
824 return;
825 // FIXME: fix me.
826 uptr offset = addr % kShadowCell;
827 if (offset) {
828 offset = kShadowCell - offset;
829 if (size <= offset)
830 return;
831 addr += offset;
832 size -= offset;
833 }
Dmitry Vyukovaaac6e22012-09-02 12:04:51 +0000834 DCHECK_EQ(addr % 8, 0);
835 // If a user passes some insane arguments (memset(0)),
836 // let it just crash as usual.
837 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
838 return;
Dmitry Vyukov74172de2013-03-18 16:56:48 +0000839 // Don't want to touch lots of shadow memory.
840 // If a program maps 10MB stack, there is no need reset the whole range.
Dmitry Vyukov26af8932012-08-15 16:52:19 +0000841 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
Dmitry Vyukov9c4d7a42013-06-13 10:15:44 +0000842 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
843 // so we do it only for C/C++.
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700844 if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
Dmitry Vyukov74172de2013-03-18 16:56:48 +0000845 u64 *p = (u64*)MemToShadow(addr);
846 CHECK(IsShadowMem((uptr)p));
847 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
848 // FIXME: may overwrite a part outside the region
849 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
850 p[i++] = val;
851 for (uptr j = 1; j < kShadowCnt; j++)
852 p[i++] = 0;
853 }
854 } else {
855 // The region is big, reset only beginning and end.
Stephen Hines86277eb2015-03-23 12:06:32 -0700856 const uptr kPageSize = GetPageSizeCached();
Dmitry Vyukov74172de2013-03-18 16:56:48 +0000857 u64 *begin = (u64*)MemToShadow(addr);
858 u64 *end = begin + size / kShadowCell * kShadowCnt;
859 u64 *p = begin;
860 // Set at least first kPageSize/2 to page boundary.
861 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
862 *p++ = val;
863 for (uptr j = 1; j < kShadowCnt; j++)
864 *p++ = 0;
865 }
866 // Reset middle part.
867 u64 *p1 = p;
868 p = RoundDown(end, kPageSize);
869 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
870 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
871 // Set the ending.
872 while (p < end) {
873 *p++ = val;
874 for (uptr j = 1; j < kShadowCnt; j++)
875 *p++ = 0;
876 }
Dmitry Vyukov26af8932012-08-15 16:52:19 +0000877 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000878}
879
880void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
881 MemoryRangeSet(thr, pc, addr, size, 0);
882}
883
884void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
Dmitry Vyukov74172de2013-03-18 16:56:48 +0000885 // Processing more than 1k (4k of shadow) is expensive,
886 // can cause excessive memory consumption (user does not necessary touch
887 // the whole range) and most likely unnecessary.
888 if (size > 1024)
889 size = 1024;
Dmitry Vyukov32858662013-02-01 14:41:58 +0000890 CHECK_EQ(thr->is_freeing, false);
891 thr->is_freeing = true;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000892 MemoryAccessRange(thr, pc, addr, size, true);
Dmitry Vyukov32858662013-02-01 14:41:58 +0000893 thr->is_freeing = false;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700894 if (kCollectHistory) {
895 thr->fast_state.IncrementEpoch();
896 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
897 }
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000898 Shadow s(thr->fast_state);
Dmitry Vyukov064c8472012-11-30 06:39:01 +0000899 s.ClearIgnoreBit();
Dmitry Vyukov069ce822012-05-17 14:17:51 +0000900 s.MarkAsFreed();
901 s.SetWrite(true);
902 s.SetAddr0AndSizeLog(0, 3);
903 MemoryRangeSet(thr, pc, addr, size, s.raw());
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000904}
905
Dmitry Vyukov26af8932012-08-15 16:52:19 +0000906void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700907 if (kCollectHistory) {
908 thr->fast_state.IncrementEpoch();
909 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
910 }
Dmitry Vyukov26af8932012-08-15 16:52:19 +0000911 Shadow s(thr->fast_state);
Dmitry Vyukov064c8472012-11-30 06:39:01 +0000912 s.ClearIgnoreBit();
Dmitry Vyukov26af8932012-08-15 16:52:19 +0000913 s.SetWrite(true);
914 s.SetAddr0AndSizeLog(0, 3);
915 MemoryRangeSet(thr, pc, addr, size, s.raw());
916}
917
Kostya Serebryanyd475aa82013-03-29 09:44:16 +0000918ALWAYS_INLINE USED
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000919void FuncEntry(ThreadState *thr, uptr pc) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000920 StatInc(thr, StatFuncEnter);
Dmitry Vyukov25d1c792012-07-16 16:44:47 +0000921 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700922 if (kCollectHistory) {
923 thr->fast_state.IncrementEpoch();
924 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
925 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000926
927 // Shadow stack maintenance can be replaced with
928 // stack unwinding during trace switch (which presumably must be faster).
Dmitry Vyukov01a7ce82013-10-16 15:35:12 +0000929 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
Stephen Hines86277eb2015-03-23 12:06:32 -0700930#ifndef SANITIZER_GO
Dmitry Vyukov01a7ce82013-10-16 15:35:12 +0000931 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
Dmitry Vyukov25d1c792012-07-16 16:44:47 +0000932#else
Stephen Hines6a211c52014-07-21 00:49:56 -0700933 if (thr->shadow_stack_pos == thr->shadow_stack_end)
934 GrowShadowStack(thr);
Dmitry Vyukov25d1c792012-07-16 16:44:47 +0000935#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000936 thr->shadow_stack_pos[0] = pc;
937 thr->shadow_stack_pos++;
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000938}
939
Kostya Serebryanyd475aa82013-03-29 09:44:16 +0000940ALWAYS_INLINE USED
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000941void FuncExit(ThreadState *thr) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000942 StatInc(thr, StatFuncExit);
Dmitry Vyukov25d1c792012-07-16 16:44:47 +0000943 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700944 if (kCollectHistory) {
945 thr->fast_state.IncrementEpoch();
946 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
947 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000948
Dmitry Vyukov01a7ce82013-10-16 15:35:12 +0000949 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
Stephen Hines86277eb2015-03-23 12:06:32 -0700950#ifndef SANITIZER_GO
Dmitry Vyukov01a7ce82013-10-16 15:35:12 +0000951 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
Dmitry Vyukov25d1c792012-07-16 16:44:47 +0000952#endif
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000953 thr->shadow_stack_pos--;
954}
955
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700956void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
Dmitry Vyukov652f78a2013-09-19 04:39:04 +0000957 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
958 thr->ignore_reads_and_writes++;
Dmitry Vyukove1ddbf92013-10-10 15:58:12 +0000959 CHECK_GT(thr->ignore_reads_and_writes, 0);
Dmitry Vyukov652f78a2013-09-19 04:39:04 +0000960 thr->fast_state.SetIgnoreBit();
Stephen Hines86277eb2015-03-23 12:06:32 -0700961#ifndef SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700962 if (!ctx->after_multithreaded_fork)
963 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
964#endif
Dmitry Vyukov652f78a2013-09-19 04:39:04 +0000965}
966
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700967void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
Dmitry Vyukov652f78a2013-09-19 04:39:04 +0000968 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
969 thr->ignore_reads_and_writes--;
970 CHECK_GE(thr->ignore_reads_and_writes, 0);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700971 if (thr->ignore_reads_and_writes == 0) {
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000972 thr->fast_state.ClearIgnoreBit();
Stephen Hines86277eb2015-03-23 12:06:32 -0700973#ifndef SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700974 thr->mop_ignore_set.Reset();
975#endif
976 }
Kostya Serebryany7ac41482012-05-10 13:48:04 +0000977}
978
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700979void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
Dmitry Vyukove1ddbf92013-10-10 15:58:12 +0000980 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
981 thr->ignore_sync++;
982 CHECK_GT(thr->ignore_sync, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700983#ifndef SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700984 if (!ctx->after_multithreaded_fork)
985 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
986#endif
Dmitry Vyukove1ddbf92013-10-10 15:58:12 +0000987}
988
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700989void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
Dmitry Vyukove1ddbf92013-10-10 15:58:12 +0000990 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
991 thr->ignore_sync--;
992 CHECK_GE(thr->ignore_sync, 0);
Stephen Hines86277eb2015-03-23 12:06:32 -0700993#ifndef SANITIZER_GO
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700994 if (thr->ignore_sync == 0)
995 thr->sync_ignore_set.Reset();
996#endif
Dmitry Vyukove1ddbf92013-10-10 15:58:12 +0000997}
998
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000999bool MD5Hash::operator==(const MD5Hash &other) const {
1000 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
1001}
1002
Stephen Hines86277eb2015-03-23 12:06:32 -07001003#if SANITIZER_DEBUG
Kostya Serebryany7ac41482012-05-10 13:48:04 +00001004void build_consistency_debug() {}
1005#else
1006void build_consistency_release() {}
1007#endif
1008
1009#if TSAN_COLLECT_STATS
1010void build_consistency_stats() {}
1011#else
1012void build_consistency_nostats() {}
1013#endif
1014
Kostya Serebryany7ac41482012-05-10 13:48:04 +00001015} // namespace __tsan
1016
Stephen Hines86277eb2015-03-23 12:06:32 -07001017#ifndef SANITIZER_GO
Kostya Serebryany7ac41482012-05-10 13:48:04 +00001018// Must be included in this file to make sure everything is inlined.
1019#include "tsan_interface_inl.h"
Dmitry Vyukovb78caa62012-07-05 16:18:28 +00001020#endif