blob: 756e2c841c3155c1242f6b7b739f1c04c7d2a8e9 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_interface_atomic.cc ------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
Dmitry Vyukov10362c42012-11-27 07:25:50 +000014// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard. A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
Alexey Samsonov8bd90982012-06-07 09:50:16 +000022#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000023#include "tsan_interface_atomic.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan; // NOLINT
28
29class ScopedAtomic {
30 public:
31 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32 : thr_(thr) {
33 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
34 DPrintf("#%d: %s\n", thr_->tid, func);
35 }
36 ~ScopedAtomic() {
37 CHECK_EQ(thr_->in_rtl, 1);
38 }
39 private:
40 ThreadState *thr_;
41 ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
Dmitry Vyukov59d58662012-11-27 07:41:27 +000050typedef __tsan_atomic128 a128;
Dmitry Vyukov805006b2012-11-09 14:11:51 +000051const morder mo_relaxed = __tsan_memory_order_relaxed;
52const morder mo_consume = __tsan_memory_order_consume;
53const morder mo_acquire = __tsan_memory_order_acquire;
54const morder mo_release = __tsan_memory_order_release;
55const morder mo_acq_rel = __tsan_memory_order_acq_rel;
56const morder mo_seq_cst = __tsan_memory_order_seq_cst;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000057
58static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
59 StatInc(thr, StatAtomic);
60 StatInc(thr, t);
61 StatInc(thr, size == 1 ? StatAtomic1
62 : size == 2 ? StatAtomic2
63 : size == 4 ? StatAtomic4
Dmitry Vyukov59d58662012-11-27 07:41:27 +000064 : size == 8 ? StatAtomic8
65 : StatAtomic16);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000066 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
67 : mo == mo_consume ? StatAtomicConsume
68 : mo == mo_acquire ? StatAtomicAcquire
69 : mo == mo_release ? StatAtomicRelease
70 : mo == mo_acq_rel ? StatAtomicAcq_Rel
71 : StatAtomicSeq_Cst);
72}
73
Dmitry Vyukovbe687832012-10-03 13:00:13 +000074static bool IsLoadOrder(morder mo) {
75 return mo == mo_relaxed || mo == mo_consume
76 || mo == mo_acquire || mo == mo_seq_cst;
77}
78
79static bool IsStoreOrder(morder mo) {
80 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
81}
82
83static bool IsReleaseOrder(morder mo) {
84 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
85}
86
87static bool IsAcquireOrder(morder mo) {
88 return mo == mo_consume || mo == mo_acquire
89 || mo == mo_acq_rel || mo == mo_seq_cst;
90}
91
Dmitry Vyukov10362c42012-11-27 07:25:50 +000092static bool IsAcqRelOrder(morder mo) {
93 return mo == mo_acq_rel || mo == mo_seq_cst;
94}
95
Dmitry Vyukov805006b2012-11-09 14:11:51 +000096static morder ConvertOrder(morder mo) {
97 if (mo > (morder)100500) {
98 mo = morder(mo - 100500);
99 if (mo == morder(1 << 0))
100 mo = mo_relaxed;
101 else if (mo == morder(1 << 1))
102 mo = mo_consume;
103 else if (mo == morder(1 << 2))
104 mo = mo_acquire;
105 else if (mo == morder(1 << 3))
106 mo = mo_release;
107 else if (mo == morder(1 << 4))
108 mo = mo_acq_rel;
109 else if (mo == morder(1 << 5))
110 mo = mo_seq_cst;
111 }
112 CHECK_GE(mo, mo_relaxed);
113 CHECK_LE(mo, mo_seq_cst);
114 return mo;
115}
116
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000117template<typename T> T func_xchg(volatile T *v, T op) {
118 return __sync_lock_test_and_set(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000119}
120
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000121template<typename T> T func_add(volatile T *v, T op) {
122 return __sync_fetch_and_add(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000123}
124
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000125template<typename T> T func_sub(volatile T *v, T op) {
126 return __sync_fetch_and_sub(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000127}
128
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000129template<typename T> T func_and(volatile T *v, T op) {
130 return __sync_fetch_and_and(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000131}
132
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000133template<typename T> T func_or(volatile T *v, T op) {
134 return __sync_fetch_and_or(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000135}
136
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000137template<typename T> T func_xor(volatile T *v, T op) {
138 return __sync_fetch_and_xor(v, op);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000139}
140
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000141template<typename T> T func_nand(volatile T *v, T op) {
142 // clang does not support __sync_fetch_and_nand.
143 T cmp = *v;
144 for (;;) {
145 T newv = ~(cmp & op);
146 T cur = __sync_val_compare_and_swap(v, cmp, newv);
147 if (cmp == cur)
148 return cmp;
149 cmp = cur;
150 }
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000151}
152
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000153template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
154 return __sync_val_compare_and_swap(v, cmp, xch);
155}
156
157// clang does not support 128-bit atomic ops.
158// Atomic ops are executed under tsan internal mutex,
159// here we assume that the atomic variables are not accessed
160// from non-instrumented code.
161#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
162a128 func_xchg(volatile a128 *v, a128 op) {
163 a128 cmp = *v;
164 *v = op;
165 return cmp;
166}
167
168a128 func_add(volatile a128 *v, a128 op) {
169 a128 cmp = *v;
170 *v = cmp + op;
171 return cmp;
172}
173
174a128 func_sub(volatile a128 *v, a128 op) {
175 a128 cmp = *v;
176 *v = cmp - op;
177 return cmp;
178}
179
180a128 func_and(volatile a128 *v, a128 op) {
181 a128 cmp = *v;
182 *v = cmp & op;
183 return cmp;
184}
185
186a128 func_or(volatile a128 *v, a128 op) {
187 a128 cmp = *v;
188 *v = cmp | op;
189 return cmp;
190}
191
192a128 func_xor(volatile a128 *v, a128 op) {
193 a128 cmp = *v;
194 *v = cmp ^ op;
195 return cmp;
196}
197
198a128 func_nand(volatile a128 *v, a128 op) {
199 a128 cmp = *v;
200 *v = ~(cmp & op);
201 return cmp;
202}
203
204a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
205 a128 cur = *v;
206 if (cur == cmp)
207 *v = xch;
208 return cur;
209}
210#endif
211
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000212#define SCOPED_ATOMIC(func, ...) \
Dmitry Vyukov805006b2012-11-09 14:11:51 +0000213 mo = ConvertOrder(mo); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000214 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
215 ThreadState *const thr = cur_thread(); \
Dmitry Vyukov262465c2012-11-15 17:40:49 +0000216 ProcessPendingSignals(thr); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000217 const uptr pc = (uptr)__builtin_return_address(0); \
218 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
219 ScopedAtomic sa(thr, pc, __FUNCTION__); \
220 return Atomic##func(thr, pc, __VA_ARGS__); \
221/**/
222
223template<typename T>
224static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
225 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000226 CHECK(IsLoadOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000227 // This fast-path is critical for performance.
228 // Assume the access is atomic.
229 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
230 return *a;
231 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
232 thr->clock.set(thr->tid, thr->fast_state.epoch());
233 thr->clock.acquire(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000234 T v = *a;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000235 s->mtx.ReadUnlock();
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000236 __sync_synchronize();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000237 return v;
238}
239
240template<typename T>
241static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
242 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000243 CHECK(IsStoreOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000244 // This fast-path is critical for performance.
245 // Assume the access is atomic.
246 // Strictly saying even relaxed store cuts off release sequence,
247 // so must reset the clock.
248 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
249 *a = v;
250 return;
251 }
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000252 __sync_synchronize();
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000253 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
254 thr->clock.set(thr->tid, thr->fast_state.epoch());
255 thr->clock.ReleaseStore(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000256 *a = v;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000257 s->mtx.Unlock();
258}
259
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000260template<typename T, T (*F)(volatile T *v, T op)>
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000261static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
262 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
263 thr->clock.set(thr->tid, thr->fast_state.epoch());
264 if (IsAcqRelOrder(mo))
265 thr->clock.acq_rel(&s->clock);
266 else if (IsReleaseOrder(mo))
267 thr->clock.release(&s->clock);
268 else if (IsAcquireOrder(mo))
269 thr->clock.acquire(&s->clock);
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000270 v = F(a, v);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000271 s->mtx.Unlock();
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000272 return v;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000273}
274
275template<typename T>
276static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
277 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000278 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000279}
280
281template<typename T>
282static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
283 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000284 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000285}
286
287template<typename T>
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000288static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
289 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000290 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000291}
292
293template<typename T>
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000294static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
295 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000296 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000297}
298
299template<typename T>
300static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
301 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000302 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000303}
304
305template<typename T>
306static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
307 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000308 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000309}
310
311template<typename T>
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000312static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
313 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000314 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000315}
316
317template<typename T>
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000318static bool AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000319 volatile T *a, T *c, T v, morder mo, morder fmo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000320 (void)fmo; // Unused because llvm does not pass it yet.
321 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
322 thr->clock.set(thr->tid, thr->fast_state.epoch());
323 if (IsAcqRelOrder(mo))
324 thr->clock.acq_rel(&s->clock);
325 else if (IsReleaseOrder(mo))
326 thr->clock.release(&s->clock);
327 else if (IsAcquireOrder(mo))
328 thr->clock.acquire(&s->clock);
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000329 T cc = *c;
330 T pr = func_cas(a, cc, v);
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000331 s->mtx.Unlock();
Dmitry Vyukov4b82b2b2012-12-04 14:50:10 +0000332 if (pr == cc)
333 return true;
334 *c = pr;
335 return false;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000336}
337
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000338template<typename T>
339static T AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000340 volatile T *a, T c, T v, morder mo, morder fmo) {
341 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000342 return c;
343}
344
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000345static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000346 // FIXME(dvyukov): not implemented.
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000347 __sync_synchronize();
348}
349
350a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
351 SCOPED_ATOMIC(Load, a, mo);
352}
353
354a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
355 SCOPED_ATOMIC(Load, a, mo);
356}
357
358a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
359 SCOPED_ATOMIC(Load, a, mo);
360}
361
362a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
363 SCOPED_ATOMIC(Load, a, mo);
364}
365
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000366#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000367a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
368 SCOPED_ATOMIC(Load, a, mo);
369}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000370#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000371
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000372void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
373 SCOPED_ATOMIC(Store, a, v, mo);
374}
375
376void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
377 SCOPED_ATOMIC(Store, a, v, mo);
378}
379
380void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
381 SCOPED_ATOMIC(Store, a, v, mo);
382}
383
384void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
385 SCOPED_ATOMIC(Store, a, v, mo);
386}
387
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000388#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000389void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
390 SCOPED_ATOMIC(Store, a, v, mo);
391}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000392#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000393
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000394a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
395 SCOPED_ATOMIC(Exchange, a, v, mo);
396}
397
398a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
399 SCOPED_ATOMIC(Exchange, a, v, mo);
400}
401
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000402a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
403 SCOPED_ATOMIC(Exchange, a, v, mo);
404}
405
406a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
407 SCOPED_ATOMIC(Exchange, a, v, mo);
408}
409
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000410#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000411a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
412 SCOPED_ATOMIC(Exchange, a, v, mo);
413}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000414#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000415
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000416a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
417 SCOPED_ATOMIC(FetchAdd, a, v, mo);
418}
419
420a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
421 SCOPED_ATOMIC(FetchAdd, a, v, mo);
422}
423
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000424a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
425 SCOPED_ATOMIC(FetchAdd, a, v, mo);
426}
427
428a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
429 SCOPED_ATOMIC(FetchAdd, a, v, mo);
430}
431
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000432#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000433a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
434 SCOPED_ATOMIC(FetchAdd, a, v, mo);
435}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000436#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000437
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000438a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
439 SCOPED_ATOMIC(FetchSub, a, v, mo);
440}
441
442a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
443 SCOPED_ATOMIC(FetchSub, a, v, mo);
444}
445
446a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
447 SCOPED_ATOMIC(FetchSub, a, v, mo);
448}
449
450a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
451 SCOPED_ATOMIC(FetchSub, a, v, mo);
452}
453
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000454#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000455a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
456 SCOPED_ATOMIC(FetchSub, a, v, mo);
457}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000458#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000459
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000460a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
461 SCOPED_ATOMIC(FetchAnd, a, v, mo);
462}
463
464a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
465 SCOPED_ATOMIC(FetchAnd, a, v, mo);
466}
467
468a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
469 SCOPED_ATOMIC(FetchAnd, a, v, mo);
470}
471
472a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
473 SCOPED_ATOMIC(FetchAnd, a, v, mo);
474}
475
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000476#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000477a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
478 SCOPED_ATOMIC(FetchAnd, a, v, mo);
479}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000480#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000481
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000482a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
483 SCOPED_ATOMIC(FetchOr, a, v, mo);
484}
485
486a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
487 SCOPED_ATOMIC(FetchOr, a, v, mo);
488}
489
490a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
491 SCOPED_ATOMIC(FetchOr, a, v, mo);
492}
493
494a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
495 SCOPED_ATOMIC(FetchOr, a, v, mo);
496}
497
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000498#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000499a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
500 SCOPED_ATOMIC(FetchOr, a, v, mo);
501}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000502#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000503
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000504a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
505 SCOPED_ATOMIC(FetchXor, a, v, mo);
506}
507
508a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
509 SCOPED_ATOMIC(FetchXor, a, v, mo);
510}
511
512a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
513 SCOPED_ATOMIC(FetchXor, a, v, mo);
514}
515
516a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
517 SCOPED_ATOMIC(FetchXor, a, v, mo);
518}
519
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000520#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000521a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
522 SCOPED_ATOMIC(FetchXor, a, v, mo);
523}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000524#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000525
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000526a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
527 SCOPED_ATOMIC(FetchNand, a, v, mo);
528}
529
530a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
531 SCOPED_ATOMIC(FetchNand, a, v, mo);
532}
533
534a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
535 SCOPED_ATOMIC(FetchNand, a, v, mo);
536}
537
538a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
539 SCOPED_ATOMIC(FetchNand, a, v, mo);
540}
541
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000542#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000543a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
544 SCOPED_ATOMIC(FetchNand, a, v, mo);
545}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000546#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000547
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000548int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000549 morder mo, morder fmo) {
550 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000551}
552
553int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000554 morder mo, morder fmo) {
555 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000556}
557
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000558int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000559 morder mo, morder fmo) {
560 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000561}
562
563int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000564 morder mo, morder fmo) {
565 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000566}
567
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000568#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000569int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
570 morder mo, morder fmo) {
571 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
572}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000573#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000574
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000575int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000576 morder mo, morder fmo) {
577 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000578}
579
580int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000581 morder mo, morder fmo) {
582 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000583}
584
585int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000586 morder mo, morder fmo) {
587 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000588}
589
590int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000591 morder mo, morder fmo) {
592 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000593}
594
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000595#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000596int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
597 morder mo, morder fmo) {
598 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
599}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000600#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000601
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000602a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000603 morder mo, morder fmo) {
604 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000605}
606a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000607 morder mo, morder fmo) {
608 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000609}
610
611a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000612 morder mo, morder fmo) {
613 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000614}
615
616a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000617 morder mo, morder fmo) {
618 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000619}
620
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000621#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000622a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
623 morder mo, morder fmo) {
624 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
625}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000626#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000627
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000628void __tsan_atomic_thread_fence(morder mo) {
629 char* a;
630 SCOPED_ATOMIC(Fence, mo);
631}
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000632
633void __tsan_atomic_signal_fence(morder mo) {
634}