tsan: dynamic history size
introduces history_size parameter that can be used to control trace size at startup
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@168786 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
index d98460a..d5cf999 100644
--- a/lib/sanitizer_common/sanitizer_posix.cc
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -169,7 +169,10 @@
struct rlimit rlim;
rlim.rlim_cur = limit;
rlim.rlim_max = limit;
- CHECK_EQ(0, setrlimit(RLIMIT_STACK, &rlim));
+ if (setrlimit(RLIMIT_STACK, &rlim)) {
+ Report("setrlimit() failed %d\n", errno);
+ Die();
+ }
CHECK(!StackSizeIsUnlimited());
}
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 7412c50..fdb95bf 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -56,6 +56,11 @@
f->stop_on_start = false;
f->running_on_valgrind = false;
f->external_symbolizer_path = "";
+ f->history_size = 2;
+
+#ifdef TSAN_GO
+ f->history_size = 1; // There are a lot of goroutines.
+#endif
// Let a frontend override.
OverrideFlags(f);
@@ -79,12 +84,19 @@
ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
ParseFlag(env, &f->stop_on_start, "stop_on_start");
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
+ ParseFlag(env, &f->history_size, "history_size");
if (!f->report_bugs) {
f->report_thread_leaks = false;
f->report_destroy_locked = false;
f->report_signal_unsafe = false;
}
+
+ if (f->history_size < 0 || f->history_size > 7) {
+ Printf("ThreadSanitizer: incorrect value for history_size"
+ " (must be [0..7])\n");
+ Die();
+ }
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h
index 895cef8..06c3d0d 100644
--- a/lib/tsan/rtl/tsan_flags.h
+++ b/lib/tsan/rtl/tsan_flags.h
@@ -66,6 +66,12 @@
bool running_on_valgrind;
// Path to external symbolizer.
const char *external_symbolizer_path;
+ // Per-thread history size, controls how many previous memory accesses
+ // is remembered per thread. Possible values are [0..7].
+ // history_size=0 amounts to 32K memory accesses. Each next value doubles
+ // the amount of memory accesses, up to history_size=7 that amounts to
+ // 4M memory accesses. The default value is 2 (128K memory accesses).
+ int history_size;
};
Flags *flags();
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index ee90a24..0a2ec3c 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -202,28 +202,50 @@
}
#endif // #ifndef TSAN_GO
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile rlimit rlim;
+ rlim.rlim_cur = lim;
+ rlim.rlim_max = lim;
+ setrlimit(res, (rlimit*)&rlim);
+}
+
const char *InitializePlatform() {
void *p = 0;
if (sizeof(p) == 8) {
// Disable core dumps, dumping of 16TB usually takes a bit long.
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit lim;
- lim.rlim_cur = 0;
- lim.rlim_max = 0;
- setrlimit(RLIMIT_CORE, (rlimit*)&lim);
+ setlim(RLIMIT_CORE, 0);
}
+ bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
- if (StackSizeIsUnlimited()) {
- const uptr kMaxStackSize = 32 * 1024 * 1024; // 32 Mb
+ if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ const uptr kMaxStackSize = 32 * 1024 * 1024;
Report("WARNING: Program is run with unlimited stack size, which "
"wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
- ReExec();
+ reexec = true;
}
+ if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ Report("WARNING: Program is run with limited virtual address space, which "
+ "wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with unlimited virtual address space.\n");
+ setlim(RLIMIT_AS, -1);
+ reexec = true;
+ }
+
+ if (reexec)
+ ReExec();
+
#ifndef TSAN_GO
CheckPIE();
g_tls_size = (uptr)InitTlsSize();
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index e66e5e3..22338ca 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -290,11 +290,14 @@
uptr TraceTopPC(ThreadState *thr) {
Event *events = (Event*)GetThreadTrace(thr->tid);
- uptr pc = events[thr->fast_state.epoch() % kTraceSize]
- & ((1ull << 61) - 1);
+ uptr pc = events[thr->fast_state.GetTracePos()];
return pc;
}
+uptr TraceSize() {
+ return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
+}
+
#ifndef TSAN_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 9bc2a76..79075f0 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -74,6 +74,7 @@
// tid : kTidBits
// epoch : kClkBits
// unused : -
+// history_size : 3
class FastState {
public:
FastState(u64 tid, u64 epoch) {
@@ -113,6 +114,27 @@
void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
bool GetIgnoreBit() const { return (s64)x_ < 0; }
+ void SetHistorySize(int hs) {
+ CHECK_GE(hs, 0);
+ CHECK_LE(hs, 7);
+ x_ = (x_ & ~7) | hs;
+ }
+
+ int GetHistorySize() const {
+ return (int)(x_ & 7);
+ }
+
+ void ClearHistorySize() {
+ x_ &= ~7;
+ }
+
+ u64 GetTracePos() const {
+ const int hs = GetHistorySize();
+ // When hs == 0, the trace consists of 2 parts.
+ const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+ return epoch() & mask;
+ }
+
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
@@ -131,9 +153,14 @@
// addr0 : 3
class Shadow : public FastState {
public:
- explicit Shadow(u64 x) : FastState(x) { }
+ explicit Shadow(u64 x)
+ : FastState(x) {
+ }
- explicit Shadow(const FastState &s) : FastState(s.x_) { }
+ explicit Shadow(const FastState &s)
+ : FastState(s.x_) {
+ ClearHistorySize();
+ }
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
DCHECK_EQ(x_ & 31, 0);
@@ -535,12 +562,13 @@
void TraceSwitch(ThreadState *thr);
uptr TraceTopPC(ThreadState *thr);
+uptr TraceSize();
extern "C" void __tsan_trace_switch();
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, uptr addr) {
StatInc(thr, StatEvents);
- u64 epoch = fs.epoch();
+ u64 epoch = fs.GetTracePos();
if (UNLIKELY((epoch % kTracePartSize) == 0)) {
#ifndef TSAN_GO
HACKY_CALL(__tsan_trace_switch);
@@ -549,7 +577,7 @@
#endif
}
Event *trace = (Event*)GetThreadTrace(fs.tid());
- Event *evp = &trace[epoch % kTraceSize];
+ Event *evp = &trace[epoch];
Event ev = (u64)addr | ((u64)typ << 61);
*evp = ev;
}
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 64b2798..d622c4d 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -263,11 +263,11 @@
return;
}
Lock l(&trace->mtx);
- const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts;
+ const int partidx = (epoch / (TraceSize() / kTraceParts)) % kTraceParts;
TraceHeader* hdr = &trace->headers[partidx];
if (epoch < hdr->epoch0)
return;
- const u64 eend = epoch % kTraceSize;
+ const u64 eend = epoch % TraceSize();
const u64 ebegin = eend / kTracePartSize * kTracePartSize;
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index f104b73..4a0882c 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -123,7 +123,7 @@
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
tctx = new(mem) ThreadContext(tid);
ctx->threads[tid] = tctx;
- MapThreadTrace(GetThreadTrace(tid), kTraceSize * sizeof(Event));
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
}
CHECK_NE(tctx, 0);
CHECK_GE(tid, 0);
@@ -149,7 +149,6 @@
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&tctx->sync);
StatInc(thr, StatSyncRelease);
-
tctx->creation_stack.ObtainCurrent(thr, pc);
}
return tid;
@@ -205,6 +204,7 @@
thr->fast_synch_epoch = tctx->epoch0;
thr->clock.set(tid, tctx->epoch0);
thr->clock.acquire(&tctx->sync);
+ thr->fast_state.SetHistorySize(flags()->history_size);
StatInc(thr, StatSyncAcquire);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 660b676..921f7d0 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -19,12 +19,9 @@
namespace __tsan {
-#ifndef TSAN_HISTORY_SIZE // in kibitraces
-#define TSAN_HISTORY_SIZE 128
-#endif
-
-const int kTracePartSize = 16 * 1024;
-const int kTraceParts = TSAN_HISTORY_SIZE * 1024 / kTracePartSize;
+const int kTracePartSizeBits = 14;
+const int kTracePartSize = 1 << kTracePartSizeBits;
+const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize;
const int kTraceSize = kTracePartSize * kTraceParts;
// Must fit into 3 bits.
diff --git a/lib/tsan/tests/unit/tsan_shadow_test.cc b/lib/tsan/tests/unit/tsan_shadow_test.cc
index 3547eaf..fa9c982 100644
--- a/lib/tsan/tests/unit/tsan_shadow_test.cc
+++ b/lib/tsan/tests/unit/tsan_shadow_test.cc
@@ -22,6 +22,7 @@
EXPECT_EQ(s.epoch(), (u64)22);
EXPECT_EQ(s.GetIgnoreBit(), false);
EXPECT_EQ(s.GetFreedAndReset(), false);
+ EXPECT_EQ(s.GetHistorySize(), 0);
EXPECT_EQ(s.addr0(), (u64)0);
EXPECT_EQ(s.size(), (u64)1);
EXPECT_EQ(s.is_write(), false);
@@ -35,6 +36,14 @@
EXPECT_EQ(s.GetIgnoreBit(), true);
s.ClearIgnoreBit();
EXPECT_EQ(s.GetIgnoreBit(), false);
+
+ for (int i = 0; i < 8; i++) {
+ s.SetHistorySize(i);
+ EXPECT_EQ(s.GetHistorySize(), i);
+ }
+ s.SetHistorySize(2);
+ s.ClearHistorySize();
+ EXPECT_EQ(s.GetHistorySize(), 0);
}
TEST(Shadow, Mapping) {