blob: cdd39ef4b753ffdba6ccc577eb014f2868451dcd [file] [log] [blame]
mtkleina64c48f2015-01-21 13:13:31 -08001#ifndef SkAtomics_DEFINED
2#define SkAtomics_DEFINED
3
4// This file is not part of the public Skia API.
5#include "SkTypes.h"
6
mtkleina669bc72015-02-02 12:22:07 -08007enum sk_memory_order {
8 sk_memory_order_relaxed,
9 sk_memory_order_consume,
10 sk_memory_order_acquire,
11 sk_memory_order_release,
12 sk_memory_order_acq_rel,
13 sk_memory_order_seq_cst,
14};
15
16template <typename T>
17T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
18
19template <typename T>
20void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
21
22template <typename T>
23T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
24
25template <typename T>
26bool sk_atomic_compare_exchange(T*, T* expected, T desired,
27 sk_memory_order success = sk_memory_order_seq_cst,
28 sk_memory_order failure = sk_memory_order_seq_cst);
mtklein86821b52015-02-24 14:38:12 -080029
30// A little wrapper class for small T (think, builtins: int, float, void*) to
31// ensure they're always used atomically. This is our stand-in for std::atomic<T>.
32template <typename T>
33class SkAtomic : SkNoncopyable {
34public:
35 SkAtomic() {}
36
37 // It is essential we return by value rather than by const&. fVal may change at any time.
38 T load(sk_memory_order mo = sk_memory_order_seq_cst) const {
39 return sk_atomic_load(&fVal, mo);
40 }
41
42 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
43 sk_atomic_store(&fVal, val, mo);
44 }
mtklein59c92032015-02-25 12:51:55 -080045
46 bool compare_exchange(T* expected, const T& desired,
47 sk_memory_order success = sk_memory_order_seq_cst,
48 sk_memory_order failure = sk_memory_order_seq_cst) {
49 return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
50 }
mtklein86821b52015-02-24 14:38:12 -080051private:
52 T fVal;
53};
54
mtkleina64c48f2015-01-21 13:13:31 -080055#if defined(_MSC_VER)
mtkleina669bc72015-02-02 12:22:07 -080056 #include "../ports/SkAtomics_std.h"
57#elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED)
58 #include "../ports/SkAtomics_atomic.h"
mtkleina64c48f2015-01-21 13:13:31 -080059#else
60 #include "../ports/SkAtomics_sync.h"
61#endif
62
mtkleina669bc72015-02-02 12:22:07 -080063// From here down we have shims for our old atomics API, to be weaned off of.
64// We use the default sequentially-consistent memory order to make things simple
65// and to match the practical reality of our old _sync and _win implementations.
66
67inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
68inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
69inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr, v); }
70
71inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }
72
73inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
74 return sk_atomic_compare_exchange(ptr, &expected, desired);
75}
76
77inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
78 (void)sk_atomic_compare_exchange(ptr, &expected, desired);
79 return expected;
80}
81
82inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
83 int32_t prev = sk_atomic_load(ptr);
84 do {
85 if (0 == prev) {
86 break;
87 }
88 } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
89 return prev;
90}
91
92template <typename T>
93T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
94
95template <typename T>
mtkleina669bc72015-02-02 12:22:07 -080096void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }
97
98inline void sk_membar_acquire__after_atomic_dec() {}
99inline void sk_membar_acquire__after_atomic_conditional_inc() {}
100
mtkleina64c48f2015-01-21 13:13:31 -0800101#endif//SkAtomics_DEFINED