blob: 29f95ef59fe64991b8c82ff3acbc6ffd740941ab [file] [log] [blame]
Alexey Samsonov3b2f9f42012-06-04 13:55:19 +00001//===-- tsan_interface_atomic.cc ------------------------------------------===//
Kostya Serebryany4ad375f2012-05-10 13:48:04 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
12//===----------------------------------------------------------------------===//
13
Dmitry Vyukov10362c42012-11-27 07:25:50 +000014// ThreadSanitizer atomic operations are based on C++11/C1x standards.
15// For background see C++11 standard. A slightly older, publically
16// available draft of the standard (not entirely up-to-date, but close enough
17// for casual browsing) is available here:
18// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19// The following page contains more background information:
20// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
Alexey Samsonov8bd90982012-06-07 09:50:16 +000022#include "sanitizer_common/sanitizer_placement_new.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000023#include "tsan_interface_atomic.h"
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000024#include "tsan_flags.h"
25#include "tsan_rtl.h"
26
27using namespace __tsan; // NOLINT
28
29class ScopedAtomic {
30 public:
31 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
32 : thr_(thr) {
33 CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member.
34 DPrintf("#%d: %s\n", thr_->tid, func);
35 }
36 ~ScopedAtomic() {
37 CHECK_EQ(thr_->in_rtl, 1);
38 }
39 private:
40 ThreadState *thr_;
41 ScopedInRtl in_rtl_;
42};
43
44// Some shortcuts.
45typedef __tsan_memory_order morder;
46typedef __tsan_atomic8 a8;
47typedef __tsan_atomic16 a16;
48typedef __tsan_atomic32 a32;
49typedef __tsan_atomic64 a64;
Dmitry Vyukov59d58662012-11-27 07:41:27 +000050typedef __tsan_atomic128 a128;
Dmitry Vyukov805006b2012-11-09 14:11:51 +000051const morder mo_relaxed = __tsan_memory_order_relaxed;
52const morder mo_consume = __tsan_memory_order_consume;
53const morder mo_acquire = __tsan_memory_order_acquire;
54const morder mo_release = __tsan_memory_order_release;
55const morder mo_acq_rel = __tsan_memory_order_acq_rel;
56const morder mo_seq_cst = __tsan_memory_order_seq_cst;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000057
58static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
59 StatInc(thr, StatAtomic);
60 StatInc(thr, t);
61 StatInc(thr, size == 1 ? StatAtomic1
62 : size == 2 ? StatAtomic2
63 : size == 4 ? StatAtomic4
Dmitry Vyukov59d58662012-11-27 07:41:27 +000064 : size == 8 ? StatAtomic8
65 : StatAtomic16);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +000066 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
67 : mo == mo_consume ? StatAtomicConsume
68 : mo == mo_acquire ? StatAtomicAcquire
69 : mo == mo_release ? StatAtomicRelease
70 : mo == mo_acq_rel ? StatAtomicAcq_Rel
71 : StatAtomicSeq_Cst);
72}
73
Dmitry Vyukovbe687832012-10-03 13:00:13 +000074static bool IsLoadOrder(morder mo) {
75 return mo == mo_relaxed || mo == mo_consume
76 || mo == mo_acquire || mo == mo_seq_cst;
77}
78
79static bool IsStoreOrder(morder mo) {
80 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
81}
82
83static bool IsReleaseOrder(morder mo) {
84 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
85}
86
87static bool IsAcquireOrder(morder mo) {
88 return mo == mo_consume || mo == mo_acquire
89 || mo == mo_acq_rel || mo == mo_seq_cst;
90}
91
Dmitry Vyukov10362c42012-11-27 07:25:50 +000092static bool IsAcqRelOrder(morder mo) {
93 return mo == mo_acq_rel || mo == mo_seq_cst;
94}
95
Dmitry Vyukov805006b2012-11-09 14:11:51 +000096static morder ConvertOrder(morder mo) {
97 if (mo > (morder)100500) {
98 mo = morder(mo - 100500);
99 if (mo == morder(1 << 0))
100 mo = mo_relaxed;
101 else if (mo == morder(1 << 1))
102 mo = mo_consume;
103 else if (mo == morder(1 << 2))
104 mo = mo_acquire;
105 else if (mo == morder(1 << 3))
106 mo = mo_release;
107 else if (mo == morder(1 << 4))
108 mo = mo_acq_rel;
109 else if (mo == morder(1 << 5))
110 mo = mo_seq_cst;
111 }
112 CHECK_GE(mo, mo_relaxed);
113 CHECK_LE(mo, mo_seq_cst);
114 return mo;
115}
116
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000117template<typename T> T func_xchg(T v, T op) {
118 return op;
119}
120
121template<typename T> T func_add(T v, T op) {
122 return v + op;
123}
124
125template<typename T> T func_sub(T v, T op) {
126 return v - op;
127}
128
129template<typename T> T func_and(T v, T op) {
130 return v & op;
131}
132
133template<typename T> T func_or(T v, T op) {
134 return v | op;
135}
136
137template<typename T> T func_xor(T v, T op) {
138 return v ^ op;
139}
140
141template<typename T> T func_nand(T v, T op) {
142 return ~v & op;
143}
144
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000145#define SCOPED_ATOMIC(func, ...) \
Dmitry Vyukov805006b2012-11-09 14:11:51 +0000146 mo = ConvertOrder(mo); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000147 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
148 ThreadState *const thr = cur_thread(); \
Dmitry Vyukov262465c2012-11-15 17:40:49 +0000149 ProcessPendingSignals(thr); \
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000150 const uptr pc = (uptr)__builtin_return_address(0); \
151 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
152 ScopedAtomic sa(thr, pc, __FUNCTION__); \
153 return Atomic##func(thr, pc, __VA_ARGS__); \
154/**/
155
156template<typename T>
157static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
158 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000159 CHECK(IsLoadOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000160 // This fast-path is critical for performance.
161 // Assume the access is atomic.
162 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
163 return *a;
164 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
165 thr->clock.set(thr->tid, thr->fast_state.epoch());
166 thr->clock.acquire(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000167 T v = *a;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000168 s->mtx.ReadUnlock();
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000169 return v;
170}
171
172template<typename T>
173static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
174 morder mo) {
Dmitry Vyukovbe687832012-10-03 13:00:13 +0000175 CHECK(IsStoreOrder(mo));
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000176 // This fast-path is critical for performance.
177 // Assume the access is atomic.
178 // Strictly saying even relaxed store cuts off release sequence,
179 // so must reset the clock.
180 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
181 *a = v;
182 return;
183 }
184 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
185 thr->clock.set(thr->tid, thr->fast_state.epoch());
186 thr->clock.ReleaseStore(&s->clock);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000187 *a = v;
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000188 s->mtx.Unlock();
189}
190
191template<typename T, T (*F)(T v, T op)>
192static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
193 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
194 thr->clock.set(thr->tid, thr->fast_state.epoch());
195 if (IsAcqRelOrder(mo))
196 thr->clock.acq_rel(&s->clock);
197 else if (IsReleaseOrder(mo))
198 thr->clock.release(&s->clock);
199 else if (IsAcquireOrder(mo))
200 thr->clock.acquire(&s->clock);
201 T c = *a;
202 *a = F(c, v);
203 s->mtx.Unlock();
204 return c;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000205}
206
207template<typename T>
208static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
209 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000210 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000211}
212
213template<typename T>
214static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
215 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000216 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000217}
218
219template<typename T>
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000220static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
221 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000222 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000223}
224
225template<typename T>
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000226static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
227 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000228 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000229}
230
231template<typename T>
232static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
233 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000234 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000235}
236
237template<typename T>
238static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
239 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000240 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000241}
242
243template<typename T>
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000244static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
245 morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000246 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000247}
248
249template<typename T>
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000250static bool AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000251 volatile T *a, T *c, T v, morder mo, morder fmo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000252 (void)fmo; // Unused because llvm does not pass it yet.
253 SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
254 thr->clock.set(thr->tid, thr->fast_state.epoch());
255 if (IsAcqRelOrder(mo))
256 thr->clock.acq_rel(&s->clock);
257 else if (IsReleaseOrder(mo))
258 thr->clock.release(&s->clock);
259 else if (IsAcquireOrder(mo))
260 thr->clock.acquire(&s->clock);
261 T cur = *a;
262 bool res = false;
263 if (cur == *c) {
264 *a = v;
265 res = true;
266 } else {
267 *c = cur;
268 }
269 s->mtx.Unlock();
270 return res;
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000271}
272
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000273template<typename T>
274static T AtomicCAS(ThreadState *thr, uptr pc,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000275 volatile T *a, T c, T v, morder mo, morder fmo) {
276 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000277 return c;
278}
279
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000280static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
Dmitry Vyukov10362c42012-11-27 07:25:50 +0000281 // FIXME(dvyukov): not implemented.
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000282 __sync_synchronize();
283}
284
285a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
286 SCOPED_ATOMIC(Load, a, mo);
287}
288
289a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
290 SCOPED_ATOMIC(Load, a, mo);
291}
292
293a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
294 SCOPED_ATOMIC(Load, a, mo);
295}
296
297a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
298 SCOPED_ATOMIC(Load, a, mo);
299}
300
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000301a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
302 SCOPED_ATOMIC(Load, a, mo);
303}
304
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000305void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
306 SCOPED_ATOMIC(Store, a, v, mo);
307}
308
309void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
310 SCOPED_ATOMIC(Store, a, v, mo);
311}
312
313void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
314 SCOPED_ATOMIC(Store, a, v, mo);
315}
316
317void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
318 SCOPED_ATOMIC(Store, a, v, mo);
319}
320
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000321void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
322 SCOPED_ATOMIC(Store, a, v, mo);
323}
324
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000325a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
326 SCOPED_ATOMIC(Exchange, a, v, mo);
327}
328
329a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
330 SCOPED_ATOMIC(Exchange, a, v, mo);
331}
332
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000333a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
334 SCOPED_ATOMIC(Exchange, a, v, mo);
335}
336
337a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
338 SCOPED_ATOMIC(Exchange, a, v, mo);
339}
340
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000341a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
342 SCOPED_ATOMIC(Exchange, a, v, mo);
343}
344
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000345a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
346 SCOPED_ATOMIC(FetchAdd, a, v, mo);
347}
348
349a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
350 SCOPED_ATOMIC(FetchAdd, a, v, mo);
351}
352
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000353a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
354 SCOPED_ATOMIC(FetchAdd, a, v, mo);
355}
356
357a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
358 SCOPED_ATOMIC(FetchAdd, a, v, mo);
359}
360
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000361a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
362 SCOPED_ATOMIC(FetchAdd, a, v, mo);
363}
364
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000365a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
366 SCOPED_ATOMIC(FetchSub, a, v, mo);
367}
368
369a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
370 SCOPED_ATOMIC(FetchSub, a, v, mo);
371}
372
373a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
374 SCOPED_ATOMIC(FetchSub, a, v, mo);
375}
376
377a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
378 SCOPED_ATOMIC(FetchSub, a, v, mo);
379}
380
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000381a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
382 SCOPED_ATOMIC(FetchSub, a, v, mo);
383}
384
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000385a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
386 SCOPED_ATOMIC(FetchAnd, a, v, mo);
387}
388
389a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
390 SCOPED_ATOMIC(FetchAnd, a, v, mo);
391}
392
393a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
394 SCOPED_ATOMIC(FetchAnd, a, v, mo);
395}
396
397a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
398 SCOPED_ATOMIC(FetchAnd, a, v, mo);
399}
400
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000401a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
402 SCOPED_ATOMIC(FetchAnd, a, v, mo);
403}
404
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000405a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
406 SCOPED_ATOMIC(FetchOr, a, v, mo);
407}
408
409a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
410 SCOPED_ATOMIC(FetchOr, a, v, mo);
411}
412
413a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
414 SCOPED_ATOMIC(FetchOr, a, v, mo);
415}
416
417a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
418 SCOPED_ATOMIC(FetchOr, a, v, mo);
419}
420
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000421a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
422 SCOPED_ATOMIC(FetchOr, a, v, mo);
423}
424
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000425a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
426 SCOPED_ATOMIC(FetchXor, a, v, mo);
427}
428
429a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
430 SCOPED_ATOMIC(FetchXor, a, v, mo);
431}
432
433a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
434 SCOPED_ATOMIC(FetchXor, a, v, mo);
435}
436
437a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
438 SCOPED_ATOMIC(FetchXor, a, v, mo);
439}
440
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000441a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
442 SCOPED_ATOMIC(FetchXor, a, v, mo);
443}
444
Dmitry Vyukov3b450122012-11-26 09:42:56 +0000445a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
446 SCOPED_ATOMIC(FetchNand, a, v, mo);
447}
448
449a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
450 SCOPED_ATOMIC(FetchNand, a, v, mo);
451}
452
453a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
454 SCOPED_ATOMIC(FetchNand, a, v, mo);
455}
456
457a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
458 SCOPED_ATOMIC(FetchNand, a, v, mo);
459}
460
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000461a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
462 SCOPED_ATOMIC(FetchNand, a, v, mo);
463}
464
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000465int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000466 morder mo, morder fmo) {
467 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000468}
469
470int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000471 morder mo, morder fmo) {
472 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000473}
474
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000475int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000476 morder mo, morder fmo) {
477 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000478}
479
480int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000481 morder mo, morder fmo) {
482 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000483}
484
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000485int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
486 morder mo, morder fmo) {
487 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
488}
489
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000490int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000491 morder mo, morder fmo) {
492 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000493}
494
495int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000496 morder mo, morder fmo) {
497 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000498}
499
500int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000501 morder mo, morder fmo) {
502 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000503}
504
505int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000506 morder mo, morder fmo) {
507 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov572c5b22012-05-14 15:33:00 +0000508}
509
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000510int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
511 morder mo, morder fmo) {
512 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
513}
514
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000515a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000516 morder mo, morder fmo) {
517 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000518}
519a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000520 morder mo, morder fmo) {
521 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000522}
523
524a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000525 morder mo, morder fmo) {
526 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000527}
528
529a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
Dmitry Vyukov195eda92012-11-23 15:51:45 +0000530 morder mo, morder fmo) {
531 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
Dmitry Vyukov4e5f72d2012-11-09 12:54:37 +0000532}
533
Dmitry Vyukov59d58662012-11-27 07:41:27 +0000534a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
535 morder mo, morder fmo) {
536 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
537}
538
Kostya Serebryany4ad375f2012-05-10 13:48:04 +0000539void __tsan_atomic_thread_fence(morder mo) {
540 char* a;
541 SCOPED_ATOMIC(Fence, mo);
542}
Dmitry Vyukovb96a7b52012-10-04 10:08:23 +0000543
544void __tsan_atomic_signal_fence(morder mo) {
545}