tsan: add atomic_fetch_sub() and atomic_signal_fence() functions
llvm-svn: 165218
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
index 5f79ae0..83b5d25 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -131,6 +131,17 @@
}
template<typename T>
+static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ if (IsReleaseOrder(mo))
+ Release(thr, pc, (uptr)a);
+ v = __sync_fetch_and_sub(a, v);
+ if (IsAcquireOrder(mo))
+ Acquire(thr, pc, (uptr)a);
+ return v;
+}
+
+template<typename T>
static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
if (IsReleaseOrder(mo))
@@ -246,6 +257,22 @@
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
@@ -338,3 +365,6 @@
char* a;
SCOPED_ATOMIC(Fence, mo);
}
+
+void __tsan_atomic_signal_fence(morder mo) {
+}