blob: be233432aee266b7eb68457ace6fe6e1b76c7494 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_interface_atomic.cc ------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
Dmitry Vyukov10362c42012-11-27 07:25:50 +000014// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard. A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
Alexey Samsonov8bd90982012-06-07 09:50:16 +000022#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000023#include "tsan_interface_atomic.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan; // NOLINT
28
29class ScopedAtomic {
30 public:
31 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32 : thr_(thr) {
33 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
34 DPrintf("#%d: %s\n", thr_->tid, func);
35 }
36 ~ScopedAtomic() {
37 CHECK_EQ(thr_->in_rtl, 1);
38 }
39 private:
40 ThreadState *thr_;
41 ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
Dmitry Vyukov59d58662012-11-27 07:41:27 +000050typedef __tsan_atomic128 a128;
Dmitry Vyukov805006b2012-11-09 14:11:51 +000051const morder mo_relaxed = __tsan_memory_order_relaxed;
52const morder mo_consume = __tsan_memory_order_consume;
53const morder mo_acquire = __tsan_memory_order_acquire;
54const morder mo_release = __tsan_memory_order_release;
55const morder mo_acq_rel = __tsan_memory_order_acq_rel;
56const morder mo_seq_cst = __tsan_memory_order_seq_cst;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000057
58static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
59 StatInc(thr, StatAtomic);
60 StatInc(thr, t);
61 StatInc(thr, size == 1 ? StatAtomic1
62 : size == 2 ? StatAtomic2
63 : size == 4 ? StatAtomic4
Dmitry Vyukov59d58662012-11-27 07:41:27 +000064 : size == 8 ? StatAtomic8
65 : StatAtomic16);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000066 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
67 : mo == mo_consume ? StatAtomicConsume
68 : mo == mo_acquire ? StatAtomicAcquire
69 : mo == mo_release ? StatAtomicRelease
70 : mo == mo_acq_rel ? StatAtomicAcq_Rel
71 : StatAtomicSeq_Cst);
72}
73
Dmitry Vyukovbe687832012-10-03 13:00:13 +000074static bool IsLoadOrder(morder mo) {
75 return mo == mo_relaxed || mo == mo_consume
76 || mo == mo_acquire || mo == mo_seq_cst;
77}
78
79static bool IsStoreOrder(morder mo) {
80 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
81}
82
83static bool IsReleaseOrder(morder mo) {
84 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
85}
86
87static bool IsAcquireOrder(morder mo) {
88 return mo == mo_consume || mo == mo_acquire
89 || mo == mo_acq_rel || mo == mo_seq_cst;
90}
91
Dmitry Vyukov10362c42012-11-27 07:25:50 +000092static bool IsAcqRelOrder(morder mo) {
93 return mo == mo_acq_rel || mo == mo_seq_cst;
94}
95
Dmitry Vyukov805006b2012-11-09 14:11:51 +000096static morder ConvertOrder(morder mo) {
97 if (mo > (morder)100500) {
98 mo = morder(mo - 100500);
99 if (mo == morder(1 << 0))
100 mo = mo_relaxed;
101 else if (mo == morder(1 << 1))
102 mo = mo_consume;
103 else if (mo == morder(1 << 2))
104 mo = mo_acquire;
105 else if (mo == morder(1 << 3))
106 mo = mo_release;
107 else if (mo == morder(1 << 4))
108 mo = mo_acq_rel;
109 else if (mo == morder(1 << 5))
110 mo = mo_seq_cst;
111 }
112 CHECK_GE(mo, mo_relaxed);
113 CHECK_LE(mo, mo_seq_cst);
114 return mo;
115}
116
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000117template<typename T> T func_xchg(T v, T op) {
118 return op;
119}
120
121template<typename T> T func_add(T v, T op) {
122 return v + op;
123}
124
125template<typename T> T func_sub(T v, T op) {
126 return v - op;
127}
128
129template<typename T> T func_and(T v, T op) {
130 return v & op;
131}
132
133template<typename T> T func_or(T v, T op) {
134 return v | op;
135}
136
137template<typename T> T func_xor(T v, T op) {
138 return v ^ op;
139}
140
141template<typename T> T func_nand(T v, T op) {
142 return ~v & op;
143}
144
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000145#define SCOPED_ATOMIC(func, ...) \
Dmitry Vyukov805006b2012-11-09 14:11:51 +0000146 mo = ConvertOrder(mo); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000147 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
148 ThreadState *const thr = cur_thread(); \
Dmitry Vyukov262465c2012-11-15 17:40:49 +0000149 ProcessPendingSignals(thr); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000150 const uptr pc = (uptr)__builtin_return_address(0); \
151 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
152 ScopedAtomic sa(thr, pc, __FUNCTION__); \
153 return Atomic##func(thr, pc, __VA_ARGS__); \
154/**/
155
156template<typename T>
157static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
158 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000159 CHECK(IsLoadOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000160 // This fast-path is critical for performance.
161 // Assume the access is atomic.
162 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
163 return *a;
164 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
165 thr->clock.set(thr->tid, thr->fast_state.epoch());
166 thr->clock.acquire(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000167 T v = *a;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000168 s->mtx.ReadUnlock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000169 return v;
170}
171
172template<typename T>
173static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
174 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000175 CHECK(IsStoreOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000176 // This fast-path is critical for performance.
177 // Assume the access is atomic.
178 // Strictly saying even relaxed store cuts off release sequence,
179 // so must reset the clock.
180 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
181 *a = v;
182 return;
183 }
184 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
185 thr->clock.set(thr->tid, thr->fast_state.epoch());
186 thr->clock.ReleaseStore(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000187 *a = v;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000188 s->mtx.Unlock();
189}
190
191template<typename T, T (*F)(T v, T op)>
192static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
193 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
194 thr->clock.set(thr->tid, thr->fast_state.epoch());
195 if (IsAcqRelOrder(mo))
196 thr->clock.acq_rel(&s->clock);
197 else if (IsReleaseOrder(mo))
198 thr->clock.release(&s->clock);
199 else if (IsAcquireOrder(mo))
200 thr->clock.acquire(&s->clock);
201 T c = *a;
202 *a = F(c, v);
203 s->mtx.Unlock();
204 return c;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000205}
206
207template<typename T>
208static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
209 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000210 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000211}
212
213template<typename T>
214static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
215 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000216 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000217}
218
219template<typename T>
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000220static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
221 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000222 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000223}
224
225template<typename T>
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000226static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
227 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000228 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000229}
230
231template<typename T>
232static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
233 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000234 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000235}
236
237template<typename T>
238static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
239 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000240 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000241}
242
243template<typename T>
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000244static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
245 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000246 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000247}
248
249template<typename T>
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000250static bool AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000251 volatile T *a, T *c, T v, morder mo, morder fmo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000252 (void)fmo; // Unused because llvm does not pass it yet.
253 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
254 thr->clock.set(thr->tid, thr->fast_state.epoch());
255 if (IsAcqRelOrder(mo))
256 thr->clock.acq_rel(&s->clock);
257 else if (IsReleaseOrder(mo))
258 thr->clock.release(&s->clock);
259 else if (IsAcquireOrder(mo))
260 thr->clock.acquire(&s->clock);
261 T cur = *a;
262 bool res = false;
263 if (cur == *c) {
264 *a = v;
265 res = true;
266 } else {
267 *c = cur;
268 }
269 s->mtx.Unlock();
270 return res;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000271}
272
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000273template<typename T>
274static T AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000275 volatile T *a, T c, T v, morder mo, morder fmo) {
276 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000277 return c;
278}
279
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000280static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000281 // FIXME(dvyukov): not implemented.
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000282 __sync_synchronize();
283}
284
285a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
286 SCOPED_ATOMIC(Load, a, mo);
287}
288
289a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
290 SCOPED_ATOMIC(Load, a, mo);
291}
292
293a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
294 SCOPED_ATOMIC(Load, a, mo);
295}
296
297a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
298 SCOPED_ATOMIC(Load, a, mo);
299}
300
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000301#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000302a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
303 SCOPED_ATOMIC(Load, a, mo);
304}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000305#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000306
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000307void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
308 SCOPED_ATOMIC(Store, a, v, mo);
309}
310
311void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
312 SCOPED_ATOMIC(Store, a, v, mo);
313}
314
315void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
316 SCOPED_ATOMIC(Store, a, v, mo);
317}
318
319void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
320 SCOPED_ATOMIC(Store, a, v, mo);
321}
322
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000323#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000324void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
325 SCOPED_ATOMIC(Store, a, v, mo);
326}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000327#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000328
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000329a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
330 SCOPED_ATOMIC(Exchange, a, v, mo);
331}
332
333a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
334 SCOPED_ATOMIC(Exchange, a, v, mo);
335}
336
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000337a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
338 SCOPED_ATOMIC(Exchange, a, v, mo);
339}
340
341a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
342 SCOPED_ATOMIC(Exchange, a, v, mo);
343}
344
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000345#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000346a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
347 SCOPED_ATOMIC(Exchange, a, v, mo);
348}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000349#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000350
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000351a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
352 SCOPED_ATOMIC(FetchAdd, a, v, mo);
353}
354
355a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
356 SCOPED_ATOMIC(FetchAdd, a, v, mo);
357}
358
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000359a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
360 SCOPED_ATOMIC(FetchAdd, a, v, mo);
361}
362
363a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
364 SCOPED_ATOMIC(FetchAdd, a, v, mo);
365}
366
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000367#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000368a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
369 SCOPED_ATOMIC(FetchAdd, a, v, mo);
370}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000371#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000372
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000373a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
374 SCOPED_ATOMIC(FetchSub, a, v, mo);
375}
376
377a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
378 SCOPED_ATOMIC(FetchSub, a, v, mo);
379}
380
381a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
382 SCOPED_ATOMIC(FetchSub, a, v, mo);
383}
384
385a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
386 SCOPED_ATOMIC(FetchSub, a, v, mo);
387}
388
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000389#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000390a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
391 SCOPED_ATOMIC(FetchSub, a, v, mo);
392}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000393#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000394
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000395a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
396 SCOPED_ATOMIC(FetchAnd, a, v, mo);
397}
398
399a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
400 SCOPED_ATOMIC(FetchAnd, a, v, mo);
401}
402
403a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
404 SCOPED_ATOMIC(FetchAnd, a, v, mo);
405}
406
407a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
408 SCOPED_ATOMIC(FetchAnd, a, v, mo);
409}
410
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000411#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000412a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
413 SCOPED_ATOMIC(FetchAnd, a, v, mo);
414}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000415#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000416
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000417a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
418 SCOPED_ATOMIC(FetchOr, a, v, mo);
419}
420
421a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
422 SCOPED_ATOMIC(FetchOr, a, v, mo);
423}
424
425a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
426 SCOPED_ATOMIC(FetchOr, a, v, mo);
427}
428
429a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
430 SCOPED_ATOMIC(FetchOr, a, v, mo);
431}
432
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000433#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000434a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
435 SCOPED_ATOMIC(FetchOr, a, v, mo);
436}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000437#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000438
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000439a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
440 SCOPED_ATOMIC(FetchXor, a, v, mo);
441}
442
443a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
444 SCOPED_ATOMIC(FetchXor, a, v, mo);
445}
446
447a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
448 SCOPED_ATOMIC(FetchXor, a, v, mo);
449}
450
451a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
452 SCOPED_ATOMIC(FetchXor, a, v, mo);
453}
454
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000455#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000456a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
457 SCOPED_ATOMIC(FetchXor, a, v, mo);
458}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000459#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000460
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000461a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
462 SCOPED_ATOMIC(FetchNand, a, v, mo);
463}
464
465a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
466 SCOPED_ATOMIC(FetchNand, a, v, mo);
467}
468
469a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
470 SCOPED_ATOMIC(FetchNand, a, v, mo);
471}
472
473a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
474 SCOPED_ATOMIC(FetchNand, a, v, mo);
475}
476
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000477#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000478a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
479 SCOPED_ATOMIC(FetchNand, a, v, mo);
480}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000481#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000482
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000483int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000484 morder mo, morder fmo) {
485 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000486}
487
488int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000489 morder mo, morder fmo) {
490 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000491}
492
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000493int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000494 morder mo, morder fmo) {
495 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000496}
497
498int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000499 morder mo, morder fmo) {
500 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000501}
502
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000503#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000504int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
505 morder mo, morder fmo) {
506 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
507}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000508#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000509
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000510int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000511 morder mo, morder fmo) {
512 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000513}
514
515int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000516 morder mo, morder fmo) {
517 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000518}
519
520int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000521 morder mo, morder fmo) {
522 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000523}
524
525int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000526 morder mo, morder fmo) {
527 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000528}
529
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000530#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000531int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
532 morder mo, morder fmo) {
533 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
534}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000535#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000536
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000537a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000538 morder mo, morder fmo) {
539 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000540}
541a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000542 morder mo, morder fmo) {
543 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000544}
545
546a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000547 morder mo, morder fmo) {
548 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000549}
550
551a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000552 morder mo, morder fmo) {
553 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000554}
555
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000556#if __TSAN_HAS_INT128
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000557a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
558 morder mo, morder fmo) {
559 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
560}
Dmitry Vyukov69a071d2012-11-27 09:35:44 +0000561#endif
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000562
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000563void __tsan_atomic_thread_fence(morder mo) {
564 char* a;
565 SCOPED_ATOMIC(Fence, mo);
566}
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000567
568void __tsan_atomic_signal_fence(morder mo) {
569}