blob: a9d75e5bf76ca02eb0719dd2a7156e6dba05b8fc [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_interface_atomic.cc ------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
Dmitry Vyukov10362c42012-11-27 07:25:50 +000014// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard. A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
Alexey Samsonov8bd90982012-06-07 09:50:16 +000022#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000023#include "tsan_interface_atomic.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan; // NOLINT
28
29class ScopedAtomic {
30 public:
31 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32 : thr_(thr) {
33 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
34 DPrintf("#%d: %s\n", thr_->tid, func);
35 }
36 ~ScopedAtomic() {
37 CHECK_EQ(thr_->in_rtl, 1);
38 }
39 private:
40 ThreadState *thr_;
41 ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
Dmitry Vyukov59d58662012-11-27 07:41:27 +000050typedef __tsan_atomic128 a128;
Dmitry Vyukov805006b2012-11-09 14:11:51 +000051const morder mo_relaxed = __tsan_memory_order_relaxed;
52const morder mo_consume = __tsan_memory_order_consume;
53const morder mo_acquire = __tsan_memory_order_acquire;
54const morder mo_release = __tsan_memory_order_release;
55const morder mo_acq_rel = __tsan_memory_order_acq_rel;
56const morder mo_seq_cst = __tsan_memory_order_seq_cst;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000057
58static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
59 StatInc(thr, StatAtomic);
60 StatInc(thr, t);
61 StatInc(thr, size == 1 ? StatAtomic1
62 : size == 2 ? StatAtomic2
63 : size == 4 ? StatAtomic4
Dmitry Vyukov59d58662012-11-27 07:41:27 +000064 : size == 8 ? StatAtomic8
65 : StatAtomic16);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000066 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
67 : mo == mo_consume ? StatAtomicConsume
68 : mo == mo_acquire ? StatAtomicAcquire
69 : mo == mo_release ? StatAtomicRelease
70 : mo == mo_acq_rel ? StatAtomicAcq_Rel
71 : StatAtomicSeq_Cst);
72}
73
Dmitry Vyukovbe687832012-10-03 13:00:13 +000074static bool IsLoadOrder(morder mo) {
75 return mo == mo_relaxed || mo == mo_consume
76 || mo == mo_acquire || mo == mo_seq_cst;
77}
78
79static bool IsStoreOrder(morder mo) {
80 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
81}
82
83static bool IsReleaseOrder(morder mo) {
84 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
85}
86
87static bool IsAcquireOrder(morder mo) {
88 return mo == mo_consume || mo == mo_acquire
89 || mo == mo_acq_rel || mo == mo_seq_cst;
90}
91
Dmitry Vyukov10362c42012-11-27 07:25:50 +000092static bool IsAcqRelOrder(morder mo) {
93 return mo == mo_acq_rel || mo == mo_seq_cst;
94}
95
Dmitry Vyukov805006b2012-11-09 14:11:51 +000096static morder ConvertOrder(morder mo) {
97 if (mo > (morder)100500) {
98 mo = morder(mo - 100500);
99 if (mo == morder(1 << 0))
100 mo = mo_relaxed;
101 else if (mo == morder(1 << 1))
102 mo = mo_consume;
103 else if (mo == morder(1 << 2))
104 mo = mo_acquire;
105 else if (mo == morder(1 << 3))
106 mo = mo_release;
107 else if (mo == morder(1 << 4))
108 mo = mo_acq_rel;
109 else if (mo == morder(1 << 5))
110 mo = mo_seq_cst;
111 }
112 CHECK_GE(mo, mo_relaxed);
113 CHECK_LE(mo, mo_seq_cst);
114 return mo;
115}
116
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000117template<typename T> T func_xchg(volatile T *v, T op) {
Dmitry Vyukovd413d8c2012-12-05 13:14:55 +0000118 T res = __sync_lock_test_and_set(v, op);
119 // __sync_lock_test_and_set does not contain full barrier.
120 __sync_synchronize();
121 return res;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000122}
123
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000124template<typename T> T func_add(volatile T *v, T op) {
125 return __sync_fetch_and_add(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000126}
127
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000128template<typename T> T func_sub(volatile T *v, T op) {
129 return __sync_fetch_and_sub(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000130}
131
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000132template<typename T> T func_and(volatile T *v, T op) {
133 return __sync_fetch_and_and(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000134}
135
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000136template<typename T> T func_or(volatile T *v, T op) {
137 return __sync_fetch_and_or(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000138}
139
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000140template<typename T> T func_xor(volatile T *v, T op) {
141 return __sync_fetch_and_xor(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000142}
143
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000144template<typename T> T func_nand(volatile T *v, T op) {
145 // clang does not support __sync_fetch_and_nand.
146 T cmp = *v;
147 for (;;) {
148 T newv = ~(cmp & op);
149 T cur = __sync_val_compare_and_swap(v, cmp, newv);
150 if (cmp == cur)
151 return cmp;
152 cmp = cur;
153 }
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000154}
155
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000156template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
157 return __sync_val_compare_and_swap(v, cmp, xch);
158}
159
160// clang does not support 128-bit atomic ops.
161// Atomic ops are executed under tsan internal mutex,
162// here we assume that the atomic variables are not accessed
163// from non-instrumented code.
164#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
165a128 func_xchg(volatile a128 *v, a128 op) {
166 a128 cmp = *v;
167 *v = op;
168 return cmp;
169}
170
171a128 func_add(volatile a128 *v, a128 op) {
172 a128 cmp = *v;
173 *v = cmp + op;
174 return cmp;
175}
176
177a128 func_sub(volatile a128 *v, a128 op) {
178 a128 cmp = *v;
179 *v = cmp - op;
180 return cmp;
181}
182
183a128 func_and(volatile a128 *v, a128 op) {
184 a128 cmp = *v;
185 *v = cmp & op;
186 return cmp;
187}
188
189a128 func_or(volatile a128 *v, a128 op) {
190 a128 cmp = *v;
191 *v = cmp | op;
192 return cmp;
193}
194
195a128 func_xor(volatile a128 *v, a128 op) {
196 a128 cmp = *v;
197 *v = cmp ^ op;
198 return cmp;
199}
200
201a128 func_nand(volatile a128 *v, a128 op) {
202 a128 cmp = *v;
203 *v = ~(cmp & op);
204 return cmp;
205}
206
207a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
208 a128 cur = *v;
209 if (cur == cmp)
210 *v = xch;
211 return cur;
212}
213#endif
214
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000215#define SCOPED_ATOMIC(func, ...) \
Dmitry Vyukov805006b2012-11-09 14:11:51 +0000216 mo = ConvertOrder(mo); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000217 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
218 ThreadState *const thr = cur_thread(); \
Dmitry Vyukov262465c2012-11-15 17:40:49 +0000219 ProcessPendingSignals(thr); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000220 const uptr pc = (uptr)__builtin_return_address(0); \
221 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
222 ScopedAtomic sa(thr, pc, __FUNCTION__); \
223 return Atomic##func(thr, pc, __VA_ARGS__); \
224/**/
225
226template<typename T>
227static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
228 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000229 CHECK(IsLoadOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000230 // This fast-path is critical for performance.
231 // Assume the access is atomic.
232 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
233 return *a;
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000234 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000235 thr->clock.set(thr->tid, thr->fast_state.epoch());
236 thr->clock.acquire(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000237 T v = *a;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000238 s->mtx.ReadUnlock();
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000239 __sync_synchronize();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000240 return v;
241}
242
243template<typename T>
244static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
245 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000246 CHECK(IsStoreOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000247 // This fast-path is critical for performance.
248 // Assume the access is atomic.
249 // Strictly saying even relaxed store cuts off release sequence,
250 // so must reset the clock.
251 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
252 *a = v;
253 return;
254 }
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000255 __sync_synchronize();
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000256 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000257 thr->clock.set(thr->tid, thr->fast_state.epoch());
258 thr->clock.ReleaseStore(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000259 *a = v;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000260 s->mtx.Unlock();
Dmitry Vyukovd413d8c2012-12-05 13:14:55 +0000261 // Trainling memory barrier to provide sequential consistency
262 // for Dekker-like store-load synchronization.
263 __sync_synchronize();
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000264}
265
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000266template<typename T, T (*F)(volatile T *v, T op)>
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000267static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000268 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000269 thr->clock.set(thr->tid, thr->fast_state.epoch());
270 if (IsAcqRelOrder(mo))
271 thr->clock.acq_rel(&s->clock);
272 else if (IsReleaseOrder(mo))
273 thr->clock.release(&s->clock);
274 else if (IsAcquireOrder(mo))
275 thr->clock.acquire(&s->clock);
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000276 v = F(a, v);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000277 s->mtx.Unlock();
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000278 return v;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000279}
280
281template<typename T>
282static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
283 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000284 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000285}
286
287template<typename T>
288static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
289 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000290 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000291}
292
293template<typename T>
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000294static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
295 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000296 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000297}
298
299template<typename T>
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000300static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
301 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000302 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000303}
304
305template<typename T>
306static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
307 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000308 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000309}
310
311template<typename T>
312static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
313 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000314 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000315}
316
317template<typename T>
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000318static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
319 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000320 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000321}
322
323template<typename T>
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000324static bool AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000325 volatile T *a, T *c, T v, morder mo, morder fmo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000326 (void)fmo; // Unused because llvm does not pass it yet.
Dmitry Vyukovfd5ebcd2012-12-06 12:16:15 +0000327 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000328 thr->clock.set(thr->tid, thr->fast_state.epoch());
329 if (IsAcqRelOrder(mo))
330 thr->clock.acq_rel(&s->clock);
331 else if (IsReleaseOrder(mo))
332 thr->clock.release(&s->clock);
333 else if (IsAcquireOrder(mo))
334 thr->clock.acquire(&s->clock);
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000335 T cc = *c;
336 T pr = func_cas(a, cc, v);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000337 s->mtx.Unlock();
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000338 if (pr == cc)
339 return true;
340 *c = pr;
341 return false;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000342}
343
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000344template<typename T>
345static T AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000346 volatile T *a, T c, T v, morder mo, morder fmo) {
347 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000348 return c;
349}
350
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000351static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000352 // FIXME(dvyukov): not implemented.
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000353 __sync_synchronize();
354}
355
356a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
357 SCOPED_ATOMIC(Load, a, mo);
358}
359
360a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
361 SCOPED_ATOMIC(Load, a, mo);
362}
363
364a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
365 SCOPED_ATOMIC(Load, a, mo);
366}
367
368a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
369 SCOPED_ATOMIC(Load, a, mo);
370}
371
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000372#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000373a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
374 SCOPED_ATOMIC(Load, a, mo);
375}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000376#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000377
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000378void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
379 SCOPED_ATOMIC(Store, a, v, mo);
380}
381
382void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
383 SCOPED_ATOMIC(Store, a, v, mo);
384}
385
386void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
387 SCOPED_ATOMIC(Store, a, v, mo);
388}
389
390void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
391 SCOPED_ATOMIC(Store, a, v, mo);
392}
393
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000394#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000395void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
396 SCOPED_ATOMIC(Store, a, v, mo);
397}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000398#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000399
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000400a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
401 SCOPED_ATOMIC(Exchange, a, v, mo);
402}
403
404a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
405 SCOPED_ATOMIC(Exchange, a, v, mo);
406}
407
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000408a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
409 SCOPED_ATOMIC(Exchange, a, v, mo);
410}
411
412a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
413 SCOPED_ATOMIC(Exchange, a, v, mo);
414}
415
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000416#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000417a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
418 SCOPED_ATOMIC(Exchange, a, v, mo);
419}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000420#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000421
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000422a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
423 SCOPED_ATOMIC(FetchAdd, a, v, mo);
424}
425
426a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
427 SCOPED_ATOMIC(FetchAdd, a, v, mo);
428}
429
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000430a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
431 SCOPED_ATOMIC(FetchAdd, a, v, mo);
432}
433
434a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
435 SCOPED_ATOMIC(FetchAdd, a, v, mo);
436}
437
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000438#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000439a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
440 SCOPED_ATOMIC(FetchAdd, a, v, mo);
441}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000442#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000443
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000444a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
445 SCOPED_ATOMIC(FetchSub, a, v, mo);
446}
447
448a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
449 SCOPED_ATOMIC(FetchSub, a, v, mo);
450}
451
452a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
453 SCOPED_ATOMIC(FetchSub, a, v, mo);
454}
455
456a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
457 SCOPED_ATOMIC(FetchSub, a, v, mo);
458}
459
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000460#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000461a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
462 SCOPED_ATOMIC(FetchSub, a, v, mo);
463}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000464#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000465
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000466a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
467 SCOPED_ATOMIC(FetchAnd, a, v, mo);
468}
469
470a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
471 SCOPED_ATOMIC(FetchAnd, a, v, mo);
472}
473
474a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
475 SCOPED_ATOMIC(FetchAnd, a, v, mo);
476}
477
478a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
479 SCOPED_ATOMIC(FetchAnd, a, v, mo);
480}
481
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000482#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000483a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
484 SCOPED_ATOMIC(FetchAnd, a, v, mo);
485}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000486#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000487
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000488a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
489 SCOPED_ATOMIC(FetchOr, a, v, mo);
490}
491
492a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
493 SCOPED_ATOMIC(FetchOr, a, v, mo);
494}
495
496a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
497 SCOPED_ATOMIC(FetchOr, a, v, mo);
498}
499
500a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
501 SCOPED_ATOMIC(FetchOr, a, v, mo);
502}
503
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000504#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000505a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
506 SCOPED_ATOMIC(FetchOr, a, v, mo);
507}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000508#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000509
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000510a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
511 SCOPED_ATOMIC(FetchXor, a, v, mo);
512}
513
514a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
515 SCOPED_ATOMIC(FetchXor, a, v, mo);
516}
517
518a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
519 SCOPED_ATOMIC(FetchXor, a, v, mo);
520}
521
522a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
523 SCOPED_ATOMIC(FetchXor, a, v, mo);
524}
525
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000526#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000527a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
528 SCOPED_ATOMIC(FetchXor, a, v, mo);
529}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000530#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000531
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000532a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
533 SCOPED_ATOMIC(FetchNand, a, v, mo);
534}
535
536a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
537 SCOPED_ATOMIC(FetchNand, a, v, mo);
538}
539
540a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
541 SCOPED_ATOMIC(FetchNand, a, v, mo);
542}
543
544a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
545 SCOPED_ATOMIC(FetchNand, a, v, mo);
546}
547
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000548#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000549a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
550 SCOPED_ATOMIC(FetchNand, a, v, mo);
551}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000552#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000553
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000554int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000555 morder mo, morder fmo) {
556 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000557}
558
559int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000560 morder mo, morder fmo) {
561 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000562}
563
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000564int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000565 morder mo, morder fmo) {
566 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000567}
568
569int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000570 morder mo, morder fmo) {
571 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000572}
573
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000574#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000575int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
576 morder mo, morder fmo) {
577 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
578}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000579#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000580
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000581int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000582 morder mo, morder fmo) {
583 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000584}
585
586int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000587 morder mo, morder fmo) {
588 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000589}
590
591int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000592 morder mo, morder fmo) {
593 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000594}
595
596int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000597 morder mo, morder fmo) {
598 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000599}
600
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000601#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000602int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
603 morder mo, morder fmo) {
604 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
605}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000606#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000607
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000608a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000609 morder mo, morder fmo) {
610 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000611}
612a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000613 morder mo, morder fmo) {
614 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000615}
616
617a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000618 morder mo, morder fmo) {
619 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000620}
621
622a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000623 morder mo, morder fmo) {
624 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000625}
626
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000627#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000628a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
629 morder mo, morder fmo) {
630 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
631}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000632#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000633
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000634void __tsan_atomic_thread_fence(morder mo) {
635 char* a;
636 SCOPED_ATOMIC(Fence, mo);
637}
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000638
639void __tsan_atomic_signal_fence(morder mo) {
640}