blob: c6cd63fbbc22938bc42d721b5f76985fcc3aeb56 [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_interface_atomic.cc ------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
Dmitry Vyukov10362c42012-11-27 07:25:50 +000014// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard. A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
Alexey Samsonov8bd90982012-06-07 09:50:16 +000022#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000023#include "tsan_interface_atomic.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan; // NOLINT
28
29class ScopedAtomic {
30 public:
31 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32 : thr_(thr) {
33 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
34 DPrintf("#%d: %s\n", thr_->tid, func);
35 }
36 ~ScopedAtomic() {
37 CHECK_EQ(thr_->in_rtl, 1);
38 }
39 private:
40 ThreadState *thr_;
41 ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
Dmitry Vyukov805006b2012-11-09 14:11:51 +000050const morder mo_relaxed = __tsan_memory_order_relaxed;
51const morder mo_consume = __tsan_memory_order_consume;
52const morder mo_acquire = __tsan_memory_order_acquire;
53const morder mo_release = __tsan_memory_order_release;
54const morder mo_acq_rel = __tsan_memory_order_acq_rel;
55const morder mo_seq_cst = __tsan_memory_order_seq_cst;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000056
57static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
58 StatInc(thr, StatAtomic);
59 StatInc(thr, t);
60 StatInc(thr, size == 1 ? StatAtomic1
61 : size == 2 ? StatAtomic2
62 : size == 4 ? StatAtomic4
63 : StatAtomic8);
64 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
65 : mo == mo_consume ? StatAtomicConsume
66 : mo == mo_acquire ? StatAtomicAcquire
67 : mo == mo_release ? StatAtomicRelease
68 : mo == mo_acq_rel ? StatAtomicAcq_Rel
69 : StatAtomicSeq_Cst);
70}
71
Dmitry Vyukovbe687832012-10-03 13:00:13 +000072static bool IsLoadOrder(morder mo) {
73 return mo == mo_relaxed || mo == mo_consume
74 || mo == mo_acquire || mo == mo_seq_cst;
75}
76
77static bool IsStoreOrder(morder mo) {
78 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
79}
80
81static bool IsReleaseOrder(morder mo) {
82 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
83}
84
85static bool IsAcquireOrder(morder mo) {
86 return mo == mo_consume || mo == mo_acquire
87 || mo == mo_acq_rel || mo == mo_seq_cst;
88}
89
Dmitry Vyukov10362c42012-11-27 07:25:50 +000090static bool IsAcqRelOrder(morder mo) {
91 return mo == mo_acq_rel || mo == mo_seq_cst;
92}
93
Dmitry Vyukov805006b2012-11-09 14:11:51 +000094static morder ConvertOrder(morder mo) {
95 if (mo > (morder)100500) {
96 mo = morder(mo - 100500);
97 if (mo == morder(1 << 0))
98 mo = mo_relaxed;
99 else if (mo == morder(1 << 1))
100 mo = mo_consume;
101 else if (mo == morder(1 << 2))
102 mo = mo_acquire;
103 else if (mo == morder(1 << 3))
104 mo = mo_release;
105 else if (mo == morder(1 << 4))
106 mo = mo_acq_rel;
107 else if (mo == morder(1 << 5))
108 mo = mo_seq_cst;
109 }
110 CHECK_GE(mo, mo_relaxed);
111 CHECK_LE(mo, mo_seq_cst);
112 return mo;
113}
114
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000115template<typename T> T func_xchg(T v, T op) {
116 return op;
117}
118
119template<typename T> T func_add(T v, T op) {
120 return v + op;
121}
122
123template<typename T> T func_sub(T v, T op) {
124 return v - op;
125}
126
127template<typename T> T func_and(T v, T op) {
128 return v & op;
129}
130
131template<typename T> T func_or(T v, T op) {
132 return v | op;
133}
134
135template<typename T> T func_xor(T v, T op) {
136 return v ^ op;
137}
138
139template<typename T> T func_nand(T v, T op) {
140 return ~v & op;
141}
142
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000143#define SCOPED_ATOMIC(func, ...) \
Dmitry Vyukov805006b2012-11-09 14:11:51 +0000144 mo = ConvertOrder(mo); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000145 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
146 ThreadState *const thr = cur_thread(); \
Dmitry Vyukov262465c2012-11-15 17:40:49 +0000147 ProcessPendingSignals(thr); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000148 const uptr pc = (uptr)__builtin_return_address(0); \
149 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
150 ScopedAtomic sa(thr, pc, __FUNCTION__); \
151 return Atomic##func(thr, pc, __VA_ARGS__); \
152/**/
153
154template<typename T>
155static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
156 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000157 CHECK(IsLoadOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000158 // This fast-path is critical for performance.
159 // Assume the access is atomic.
160 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
161 return *a;
162 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
163 thr->clock.set(thr->tid, thr->fast_state.epoch());
164 thr->clock.acquire(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000165 T v = *a;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000166 s->mtx.ReadUnlock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000167 return v;
168}
169
170template<typename T>
171static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
172 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000173 CHECK(IsStoreOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000174 // This fast-path is critical for performance.
175 // Assume the access is atomic.
176 // Strictly saying even relaxed store cuts off release sequence,
177 // so must reset the clock.
178 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
179 *a = v;
180 return;
181 }
182 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
183 thr->clock.set(thr->tid, thr->fast_state.epoch());
184 thr->clock.ReleaseStore(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000185 *a = v;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000186 s->mtx.Unlock();
187}
188
189template<typename T, T (*F)(T v, T op)>
190static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
191 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
192 thr->clock.set(thr->tid, thr->fast_state.epoch());
193 if (IsAcqRelOrder(mo))
194 thr->clock.acq_rel(&s->clock);
195 else if (IsReleaseOrder(mo))
196 thr->clock.release(&s->clock);
197 else if (IsAcquireOrder(mo))
198 thr->clock.acquire(&s->clock);
199 T c = *a;
200 *a = F(c, v);
201 s->mtx.Unlock();
202 return c;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000203}
204
205template<typename T>
206static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
207 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000208 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000209}
210
211template<typename T>
212static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
213 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000214 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000215}
216
217template<typename T>
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000218static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
219 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000220 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000221}
222
223template<typename T>
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000224static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
225 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000226 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000227}
228
229template<typename T>
230static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
231 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000232 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000233}
234
235template<typename T>
236static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
237 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000238 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000239}
240
241template<typename T>
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000242static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
243 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000244 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000245}
246
247template<typename T>
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000248static bool AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000249 volatile T *a, T *c, T v, morder mo, morder fmo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000250 (void)fmo; // Unused because llvm does not pass it yet.
251 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
252 thr->clock.set(thr->tid, thr->fast_state.epoch());
253 if (IsAcqRelOrder(mo))
254 thr->clock.acq_rel(&s->clock);
255 else if (IsReleaseOrder(mo))
256 thr->clock.release(&s->clock);
257 else if (IsAcquireOrder(mo))
258 thr->clock.acquire(&s->clock);
259 T cur = *a;
260 bool res = false;
261 if (cur == *c) {
262 *a = v;
263 res = true;
264 } else {
265 *c = cur;
266 }
267 s->mtx.Unlock();
268 return res;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000269}
270
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000271template<typename T>
272static T AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000273 volatile T *a, T c, T v, morder mo, morder fmo) {
274 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000275 return c;
276}
277
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000278static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000279 // FIXME(dvyukov): not implemented.
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000280 __sync_synchronize();
281}
282
283a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
284 SCOPED_ATOMIC(Load, a, mo);
285}
286
287a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
288 SCOPED_ATOMIC(Load, a, mo);
289}
290
291a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
292 SCOPED_ATOMIC(Load, a, mo);
293}
294
295a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
296 SCOPED_ATOMIC(Load, a, mo);
297}
298
299void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
300 SCOPED_ATOMIC(Store, a, v, mo);
301}
302
303void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
304 SCOPED_ATOMIC(Store, a, v, mo);
305}
306
307void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
308 SCOPED_ATOMIC(Store, a, v, mo);
309}
310
311void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
312 SCOPED_ATOMIC(Store, a, v, mo);
313}
314
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000315a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
316 SCOPED_ATOMIC(Exchange, a, v, mo);
317}
318
319a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
320 SCOPED_ATOMIC(Exchange, a, v, mo);
321}
322
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000323a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
324 SCOPED_ATOMIC(Exchange, a, v, mo);
325}
326
327a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
328 SCOPED_ATOMIC(Exchange, a, v, mo);
329}
330
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000331a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
332 SCOPED_ATOMIC(FetchAdd, a, v, mo);
333}
334
335a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
336 SCOPED_ATOMIC(FetchAdd, a, v, mo);
337}
338
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000339a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
340 SCOPED_ATOMIC(FetchAdd, a, v, mo);
341}
342
343a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
344 SCOPED_ATOMIC(FetchAdd, a, v, mo);
345}
346
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000347a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
348 SCOPED_ATOMIC(FetchSub, a, v, mo);
349}
350
351a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
352 SCOPED_ATOMIC(FetchSub, a, v, mo);
353}
354
355a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
356 SCOPED_ATOMIC(FetchSub, a, v, mo);
357}
358
359a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
360 SCOPED_ATOMIC(FetchSub, a, v, mo);
361}
362
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000363a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
364 SCOPED_ATOMIC(FetchAnd, a, v, mo);
365}
366
367a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
368 SCOPED_ATOMIC(FetchAnd, a, v, mo);
369}
370
371a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
372 SCOPED_ATOMIC(FetchAnd, a, v, mo);
373}
374
375a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
376 SCOPED_ATOMIC(FetchAnd, a, v, mo);
377}
378
379a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
380 SCOPED_ATOMIC(FetchOr, a, v, mo);
381}
382
383a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
384 SCOPED_ATOMIC(FetchOr, a, v, mo);
385}
386
387a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
388 SCOPED_ATOMIC(FetchOr, a, v, mo);
389}
390
391a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
392 SCOPED_ATOMIC(FetchOr, a, v, mo);
393}
394
395a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
396 SCOPED_ATOMIC(FetchXor, a, v, mo);
397}
398
399a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
400 SCOPED_ATOMIC(FetchXor, a, v, mo);
401}
402
403a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
404 SCOPED_ATOMIC(FetchXor, a, v, mo);
405}
406
407a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
408 SCOPED_ATOMIC(FetchXor, a, v, mo);
409}
410
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000411a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
412 SCOPED_ATOMIC(FetchNand, a, v, mo);
413}
414
415a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
416 SCOPED_ATOMIC(FetchNand, a, v, mo);
417}
418
419a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
420 SCOPED_ATOMIC(FetchNand, a, v, mo);
421}
422
423a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
424 SCOPED_ATOMIC(FetchNand, a, v, mo);
425}
426
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000427int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000428 morder mo, morder fmo) {
429 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000430}
431
432int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000433 morder mo, morder fmo) {
434 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000435}
436
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000437int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000438 morder mo, morder fmo) {
439 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000440}
441
442int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000443 morder mo, morder fmo) {
444 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000445}
446
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000447int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000448 morder mo, morder fmo) {
449 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000450}
451
452int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000453 morder mo, morder fmo) {
454 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000455}
456
457int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000458 morder mo, morder fmo) {
459 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000460}
461
462int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000463 morder mo, morder fmo) {
464 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000465}
466
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000467a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000468 morder mo, morder fmo) {
469 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000470}
471a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000472 morder mo, morder fmo) {
473 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000474}
475
476a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000477 morder mo, morder fmo) {
478 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000479}
480
481a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000482 morder mo, morder fmo) {
483 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000484}
485
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000486void __tsan_atomic_thread_fence(morder mo) {
487 char* a;
488 SCOPED_ATOMIC(Fence, mo);
489}
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000490
491void __tsan_atomic_signal_fence(morder mo) {
492}