tsan: add 128-bit atomic operations

llvm-svn: 168683
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
index c6cd63f..29f95ef 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -47,6 +47,7 @@
 typedef __tsan_atomic16 a16;
 typedef __tsan_atomic32 a32;
 typedef __tsan_atomic64 a64;
+typedef __tsan_atomic128 a128;
 const morder mo_relaxed = __tsan_memory_order_relaxed;
 const morder mo_consume = __tsan_memory_order_consume;
 const morder mo_acquire = __tsan_memory_order_acquire;
@@ -60,7 +61,8 @@
   StatInc(thr, size == 1 ? StatAtomic1
              : size == 2 ? StatAtomic2
              : size == 4 ? StatAtomic4
-             :             StatAtomic8);
+             : size == 8 ? StatAtomic8
+             :             StatAtomic16);
   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
              : mo == mo_consume ? StatAtomicConsume
              : mo == mo_acquire ? StatAtomicAcquire
@@ -296,6 +298,10 @@
   SCOPED_ATOMIC(Load, a, mo);
 }
 
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+  SCOPED_ATOMIC(Load, a, mo);
+}
+
 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(Store, a, v, mo);
 }
@@ -312,6 +318,10 @@
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(Store, a, v, mo);
+}
+
 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
@@ -328,6 +338,10 @@
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
@@ -344,6 +358,10 @@
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
@@ -360,6 +378,10 @@
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
@@ -376,6 +398,10 @@
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
@@ -392,6 +418,10 @@
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
@@ -408,6 +438,10 @@
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
@@ -424,6 +458,10 @@
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+  SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
@@ -444,6 +482,11 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+    morder mo, morder fmo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
@@ -464,6 +507,11 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+    morder mo, morder fmo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
@@ -483,6 +531,11 @@
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
+a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+    morder mo, morder fmo) {
+  SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
 void __tsan_atomic_thread_fence(morder mo) {
   char* a;
   SCOPED_ATOMIC(Fence, mo);