Alexey Samsonov | 3b2f9f4 | 2012-06-04 13:55:19 +0000 | [diff] [blame] | 1 | //===-- tsan_interface_atomic.cc ------------------------------------------===// |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 14 | // ThreadSanitizer atomic operations are based on C++11/C1x standards. |
| 15 | // For background see C++11 standard. A slightly older, publically |
| 16 | // available draft of the standard (not entirely up-to-date, but close enough |
| 17 | // for casual browsing) is available here: |
| 18 | // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf |
| 19 | // The following page contains more background information: |
| 20 | // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ |
| 21 | |
Alexey Samsonov | 8bd9098 | 2012-06-07 09:50:16 +0000 | [diff] [blame] | 22 | #include "sanitizer_common/sanitizer_placement_new.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 23 | #include "tsan_interface_atomic.h" |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 24 | #include "tsan_flags.h" |
| 25 | #include "tsan_rtl.h" |
| 26 | |
| 27 | using namespace __tsan; // NOLINT |
| 28 | |
| 29 | class ScopedAtomic { |
| 30 | public: |
| 31 | ScopedAtomic(ThreadState *thr, uptr pc, const char *func) |
| 32 | : thr_(thr) { |
| 33 | CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member. |
| 34 | DPrintf("#%d: %s\n", thr_->tid, func); |
| 35 | } |
| 36 | ~ScopedAtomic() { |
| 37 | CHECK_EQ(thr_->in_rtl, 1); |
| 38 | } |
| 39 | private: |
| 40 | ThreadState *thr_; |
| 41 | ScopedInRtl in_rtl_; |
| 42 | }; |
| 43 | |
| 44 | // Some shortcuts. |
| 45 | typedef __tsan_memory_order morder; |
| 46 | typedef __tsan_atomic8 a8; |
| 47 | typedef __tsan_atomic16 a16; |
| 48 | typedef __tsan_atomic32 a32; |
| 49 | typedef __tsan_atomic64 a64; |
Dmitry Vyukov | 805006b | 2012-11-09 14:11:51 +0000 | [diff] [blame] | 50 | const morder mo_relaxed = __tsan_memory_order_relaxed; |
| 51 | const morder mo_consume = __tsan_memory_order_consume; |
| 52 | const morder mo_acquire = __tsan_memory_order_acquire; |
| 53 | const morder mo_release = __tsan_memory_order_release; |
| 54 | const morder mo_acq_rel = __tsan_memory_order_acq_rel; |
| 55 | const morder mo_seq_cst = __tsan_memory_order_seq_cst; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 56 | |
| 57 | static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) { |
| 58 | StatInc(thr, StatAtomic); |
| 59 | StatInc(thr, t); |
| 60 | StatInc(thr, size == 1 ? StatAtomic1 |
| 61 | : size == 2 ? StatAtomic2 |
| 62 | : size == 4 ? StatAtomic4 |
| 63 | : StatAtomic8); |
| 64 | StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed |
| 65 | : mo == mo_consume ? StatAtomicConsume |
| 66 | : mo == mo_acquire ? StatAtomicAcquire |
| 67 | : mo == mo_release ? StatAtomicRelease |
| 68 | : mo == mo_acq_rel ? StatAtomicAcq_Rel |
| 69 | : StatAtomicSeq_Cst); |
| 70 | } |
| 71 | |
Dmitry Vyukov | be68783 | 2012-10-03 13:00:13 +0000 | [diff] [blame] | 72 | static bool IsLoadOrder(morder mo) { |
| 73 | return mo == mo_relaxed || mo == mo_consume |
| 74 | || mo == mo_acquire || mo == mo_seq_cst; |
| 75 | } |
| 76 | |
| 77 | static bool IsStoreOrder(morder mo) { |
| 78 | return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst; |
| 79 | } |
| 80 | |
| 81 | static bool IsReleaseOrder(morder mo) { |
| 82 | return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst; |
| 83 | } |
| 84 | |
| 85 | static bool IsAcquireOrder(morder mo) { |
| 86 | return mo == mo_consume || mo == mo_acquire |
| 87 | || mo == mo_acq_rel || mo == mo_seq_cst; |
| 88 | } |
| 89 | |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 90 | static bool IsAcqRelOrder(morder mo) { |
| 91 | return mo == mo_acq_rel || mo == mo_seq_cst; |
| 92 | } |
| 93 | |
Dmitry Vyukov | 805006b | 2012-11-09 14:11:51 +0000 | [diff] [blame] | 94 | static morder ConvertOrder(morder mo) { |
| 95 | if (mo > (morder)100500) { |
| 96 | mo = morder(mo - 100500); |
| 97 | if (mo == morder(1 << 0)) |
| 98 | mo = mo_relaxed; |
| 99 | else if (mo == morder(1 << 1)) |
| 100 | mo = mo_consume; |
| 101 | else if (mo == morder(1 << 2)) |
| 102 | mo = mo_acquire; |
| 103 | else if (mo == morder(1 << 3)) |
| 104 | mo = mo_release; |
| 105 | else if (mo == morder(1 << 4)) |
| 106 | mo = mo_acq_rel; |
| 107 | else if (mo == morder(1 << 5)) |
| 108 | mo = mo_seq_cst; |
| 109 | } |
| 110 | CHECK_GE(mo, mo_relaxed); |
| 111 | CHECK_LE(mo, mo_seq_cst); |
| 112 | return mo; |
| 113 | } |
| 114 | |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 115 | template<typename T> T func_xchg(T v, T op) { |
| 116 | return op; |
| 117 | } |
| 118 | |
| 119 | template<typename T> T func_add(T v, T op) { |
| 120 | return v + op; |
| 121 | } |
| 122 | |
| 123 | template<typename T> T func_sub(T v, T op) { |
| 124 | return v - op; |
| 125 | } |
| 126 | |
| 127 | template<typename T> T func_and(T v, T op) { |
| 128 | return v & op; |
| 129 | } |
| 130 | |
| 131 | template<typename T> T func_or(T v, T op) { |
| 132 | return v | op; |
| 133 | } |
| 134 | |
| 135 | template<typename T> T func_xor(T v, T op) { |
| 136 | return v ^ op; |
| 137 | } |
| 138 | |
| 139 | template<typename T> T func_nand(T v, T op) { |
| 140 | return ~v & op; |
| 141 | } |
| 142 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 143 | #define SCOPED_ATOMIC(func, ...) \ |
Dmitry Vyukov | 805006b | 2012-11-09 14:11:51 +0000 | [diff] [blame] | 144 | mo = ConvertOrder(mo); \ |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 145 | mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ |
| 146 | ThreadState *const thr = cur_thread(); \ |
Dmitry Vyukov | 262465c | 2012-11-15 17:40:49 +0000 | [diff] [blame] | 147 | ProcessPendingSignals(thr); \ |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 148 | const uptr pc = (uptr)__builtin_return_address(0); \ |
| 149 | AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ |
| 150 | ScopedAtomic sa(thr, pc, __FUNCTION__); \ |
| 151 | return Atomic##func(thr, pc, __VA_ARGS__); \ |
| 152 | /**/ |
| 153 | |
| 154 | template<typename T> |
| 155 | static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, |
| 156 | morder mo) { |
Dmitry Vyukov | be68783 | 2012-10-03 13:00:13 +0000 | [diff] [blame] | 157 | CHECK(IsLoadOrder(mo)); |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 158 | // This fast-path is critical for performance. |
| 159 | // Assume the access is atomic. |
| 160 | if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) |
| 161 | return *a; |
| 162 | SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false); |
| 163 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 164 | thr->clock.acquire(&s->clock); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 165 | T v = *a; |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 166 | s->mtx.ReadUnlock(); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 167 | return v; |
| 168 | } |
| 169 | |
| 170 | template<typename T> |
| 171 | static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 172 | morder mo) { |
Dmitry Vyukov | be68783 | 2012-10-03 13:00:13 +0000 | [diff] [blame] | 173 | CHECK(IsStoreOrder(mo)); |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 174 | // This fast-path is critical for performance. |
| 175 | // Assume the access is atomic. |
| 176 | // Strictly saying even relaxed store cuts off release sequence, |
| 177 | // so must reset the clock. |
| 178 | if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) { |
| 179 | *a = v; |
| 180 | return; |
| 181 | } |
| 182 | SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true); |
| 183 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 184 | thr->clock.ReleaseStore(&s->clock); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 185 | *a = v; |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 186 | s->mtx.Unlock(); |
| 187 | } |
| 188 | |
| 189 | template<typename T, T (*F)(T v, T op)> |
| 190 | static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { |
| 191 | SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true); |
| 192 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 193 | if (IsAcqRelOrder(mo)) |
| 194 | thr->clock.acq_rel(&s->clock); |
| 195 | else if (IsReleaseOrder(mo)) |
| 196 | thr->clock.release(&s->clock); |
| 197 | else if (IsAcquireOrder(mo)) |
| 198 | thr->clock.acquire(&s->clock); |
| 199 | T c = *a; |
| 200 | *a = F(c, v); |
| 201 | s->mtx.Unlock(); |
| 202 | return c; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | template<typename T> |
| 206 | static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 207 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 208 | return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | template<typename T> |
| 212 | static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 213 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 214 | return AtomicRMW<T, func_add>(thr, pc, a, v, mo); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | template<typename T> |
Dmitry Vyukov | b96a7b5 | 2012-10-04 10:08:23 +0000 | [diff] [blame] | 218 | static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 219 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 220 | return AtomicRMW<T, func_sub>(thr, pc, a, v, mo); |
Dmitry Vyukov | b96a7b5 | 2012-10-04 10:08:23 +0000 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | template<typename T> |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 224 | static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 225 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 226 | return AtomicRMW<T, func_and>(thr, pc, a, v, mo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | template<typename T> |
| 230 | static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 231 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 232 | return AtomicRMW<T, func_or>(thr, pc, a, v, mo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | template<typename T> |
| 236 | static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 237 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 238 | return AtomicRMW<T, func_xor>(thr, pc, a, v, mo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | template<typename T> |
Dmitry Vyukov | 3b45012 | 2012-11-26 09:42:56 +0000 | [diff] [blame] | 242 | static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, |
| 243 | morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 244 | return AtomicRMW<T, func_nand>(thr, pc, a, v, mo); |
Dmitry Vyukov | 3b45012 | 2012-11-26 09:42:56 +0000 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | template<typename T> |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 248 | static bool AtomicCAS(ThreadState *thr, uptr pc, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 249 | volatile T *a, T *c, T v, morder mo, morder fmo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 250 | (void)fmo; // Unused because llvm does not pass it yet. |
| 251 | SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true); |
| 252 | thr->clock.set(thr->tid, thr->fast_state.epoch()); |
| 253 | if (IsAcqRelOrder(mo)) |
| 254 | thr->clock.acq_rel(&s->clock); |
| 255 | else if (IsReleaseOrder(mo)) |
| 256 | thr->clock.release(&s->clock); |
| 257 | else if (IsAcquireOrder(mo)) |
| 258 | thr->clock.acquire(&s->clock); |
| 259 | T cur = *a; |
| 260 | bool res = false; |
| 261 | if (cur == *c) { |
| 262 | *a = v; |
| 263 | res = true; |
| 264 | } else { |
| 265 | *c = cur; |
| 266 | } |
| 267 | s->mtx.Unlock(); |
| 268 | return res; |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 269 | } |
| 270 | |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 271 | template<typename T> |
| 272 | static T AtomicCAS(ThreadState *thr, uptr pc, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 273 | volatile T *a, T c, T v, morder mo, morder fmo) { |
| 274 | AtomicCAS(thr, pc, a, &c, v, mo, fmo); |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 275 | return c; |
| 276 | } |
| 277 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 278 | static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { |
Dmitry Vyukov | 10362c4 | 2012-11-27 07:25:50 +0000 | [diff] [blame^] | 279 | // FIXME(dvyukov): not implemented. |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 280 | __sync_synchronize(); |
| 281 | } |
| 282 | |
| 283 | a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { |
| 284 | SCOPED_ATOMIC(Load, a, mo); |
| 285 | } |
| 286 | |
| 287 | a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { |
| 288 | SCOPED_ATOMIC(Load, a, mo); |
| 289 | } |
| 290 | |
| 291 | a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { |
| 292 | SCOPED_ATOMIC(Load, a, mo); |
| 293 | } |
| 294 | |
| 295 | a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { |
| 296 | SCOPED_ATOMIC(Load, a, mo); |
| 297 | } |
| 298 | |
| 299 | void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { |
| 300 | SCOPED_ATOMIC(Store, a, v, mo); |
| 301 | } |
| 302 | |
| 303 | void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { |
| 304 | SCOPED_ATOMIC(Store, a, v, mo); |
| 305 | } |
| 306 | |
| 307 | void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { |
| 308 | SCOPED_ATOMIC(Store, a, v, mo); |
| 309 | } |
| 310 | |
| 311 | void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { |
| 312 | SCOPED_ATOMIC(Store, a, v, mo); |
| 313 | } |
| 314 | |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 315 | a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { |
| 316 | SCOPED_ATOMIC(Exchange, a, v, mo); |
| 317 | } |
| 318 | |
| 319 | a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { |
| 320 | SCOPED_ATOMIC(Exchange, a, v, mo); |
| 321 | } |
| 322 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 323 | a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { |
| 324 | SCOPED_ATOMIC(Exchange, a, v, mo); |
| 325 | } |
| 326 | |
| 327 | a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { |
| 328 | SCOPED_ATOMIC(Exchange, a, v, mo); |
| 329 | } |
| 330 | |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 331 | a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { |
| 332 | SCOPED_ATOMIC(FetchAdd, a, v, mo); |
| 333 | } |
| 334 | |
| 335 | a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { |
| 336 | SCOPED_ATOMIC(FetchAdd, a, v, mo); |
| 337 | } |
| 338 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 339 | a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { |
| 340 | SCOPED_ATOMIC(FetchAdd, a, v, mo); |
| 341 | } |
| 342 | |
| 343 | a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { |
| 344 | SCOPED_ATOMIC(FetchAdd, a, v, mo); |
| 345 | } |
| 346 | |
Dmitry Vyukov | b96a7b5 | 2012-10-04 10:08:23 +0000 | [diff] [blame] | 347 | a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { |
| 348 | SCOPED_ATOMIC(FetchSub, a, v, mo); |
| 349 | } |
| 350 | |
| 351 | a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { |
| 352 | SCOPED_ATOMIC(FetchSub, a, v, mo); |
| 353 | } |
| 354 | |
| 355 | a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { |
| 356 | SCOPED_ATOMIC(FetchSub, a, v, mo); |
| 357 | } |
| 358 | |
| 359 | a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { |
| 360 | SCOPED_ATOMIC(FetchSub, a, v, mo); |
| 361 | } |
| 362 | |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 363 | a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { |
| 364 | SCOPED_ATOMIC(FetchAnd, a, v, mo); |
| 365 | } |
| 366 | |
| 367 | a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { |
| 368 | SCOPED_ATOMIC(FetchAnd, a, v, mo); |
| 369 | } |
| 370 | |
| 371 | a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { |
| 372 | SCOPED_ATOMIC(FetchAnd, a, v, mo); |
| 373 | } |
| 374 | |
| 375 | a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { |
| 376 | SCOPED_ATOMIC(FetchAnd, a, v, mo); |
| 377 | } |
| 378 | |
| 379 | a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { |
| 380 | SCOPED_ATOMIC(FetchOr, a, v, mo); |
| 381 | } |
| 382 | |
| 383 | a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { |
| 384 | SCOPED_ATOMIC(FetchOr, a, v, mo); |
| 385 | } |
| 386 | |
| 387 | a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { |
| 388 | SCOPED_ATOMIC(FetchOr, a, v, mo); |
| 389 | } |
| 390 | |
| 391 | a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { |
| 392 | SCOPED_ATOMIC(FetchOr, a, v, mo); |
| 393 | } |
| 394 | |
| 395 | a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { |
| 396 | SCOPED_ATOMIC(FetchXor, a, v, mo); |
| 397 | } |
| 398 | |
| 399 | a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { |
| 400 | SCOPED_ATOMIC(FetchXor, a, v, mo); |
| 401 | } |
| 402 | |
| 403 | a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { |
| 404 | SCOPED_ATOMIC(FetchXor, a, v, mo); |
| 405 | } |
| 406 | |
| 407 | a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { |
| 408 | SCOPED_ATOMIC(FetchXor, a, v, mo); |
| 409 | } |
| 410 | |
Dmitry Vyukov | 3b45012 | 2012-11-26 09:42:56 +0000 | [diff] [blame] | 411 | a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { |
| 412 | SCOPED_ATOMIC(FetchNand, a, v, mo); |
| 413 | } |
| 414 | |
| 415 | a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { |
| 416 | SCOPED_ATOMIC(FetchNand, a, v, mo); |
| 417 | } |
| 418 | |
| 419 | a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { |
| 420 | SCOPED_ATOMIC(FetchNand, a, v, mo); |
| 421 | } |
| 422 | |
| 423 | a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { |
| 424 | SCOPED_ATOMIC(FetchNand, a, v, mo); |
| 425 | } |
| 426 | |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 427 | int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 428 | morder mo, morder fmo) { |
| 429 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 430 | } |
| 431 | |
| 432 | int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 433 | morder mo, morder fmo) { |
| 434 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 435 | } |
| 436 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 437 | int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 438 | morder mo, morder fmo) { |
| 439 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 443 | morder mo, morder fmo) { |
| 444 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 445 | } |
| 446 | |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 447 | int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 448 | morder mo, morder fmo) { |
| 449 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 453 | morder mo, morder fmo) { |
| 454 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 455 | } |
| 456 | |
| 457 | int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 458 | morder mo, morder fmo) { |
| 459 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 460 | } |
| 461 | |
| 462 | int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 463 | morder mo, morder fmo) { |
| 464 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 572c5b2 | 2012-05-14 15:33:00 +0000 | [diff] [blame] | 465 | } |
| 466 | |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 467 | a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 468 | morder mo, morder fmo) { |
| 469 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 470 | } |
| 471 | a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 472 | morder mo, morder fmo) { |
| 473 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 477 | morder mo, morder fmo) { |
| 478 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, |
Dmitry Vyukov | 195eda9 | 2012-11-23 15:51:45 +0000 | [diff] [blame] | 482 | morder mo, morder fmo) { |
| 483 | SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); |
Dmitry Vyukov | 4e5f72d | 2012-11-09 12:54:37 +0000 | [diff] [blame] | 484 | } |
| 485 | |
Kostya Serebryany | 4ad375f | 2012-05-10 13:48:04 +0000 | [diff] [blame] | 486 | void __tsan_atomic_thread_fence(morder mo) { |
| 487 | char* a; |
| 488 | SCOPED_ATOMIC(Fence, mo); |
| 489 | } |
Dmitry Vyukov | b96a7b5 | 2012-10-04 10:08:23 +0000 | [diff] [blame] | 490 | |
| 491 | void __tsan_atomic_signal_fence(morder mo) { |
| 492 | } |