mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 1 | #ifndef SkAtomics_DEFINED |
| 2 | #define SkAtomics_DEFINED |
| 3 | |
| 4 | // This file is not part of the public Skia API. |
| 5 | #include "SkTypes.h" |
| 6 | |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 7 | enum sk_memory_order { |
| 8 | sk_memory_order_relaxed, |
| 9 | sk_memory_order_consume, |
| 10 | sk_memory_order_acquire, |
| 11 | sk_memory_order_release, |
| 12 | sk_memory_order_acq_rel, |
| 13 | sk_memory_order_seq_cst, |
| 14 | }; |
| 15 | |
| 16 | template <typename T> |
| 17 | T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); |
| 18 | |
| 19 | template <typename T> |
| 20 | void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 21 | |
| 22 | template <typename T> |
| 23 | T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 24 | |
| 25 | template <typename T> |
| 26 | bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
| 27 | sk_memory_order success = sk_memory_order_seq_cst, |
| 28 | sk_memory_order failure = sk_memory_order_seq_cst); |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 29 | |
| 30 | // A little wrapper class for small T (think, builtins: int, float, void*) to |
| 31 | // ensure they're always used atomically. This is our stand-in for std::atomic<T>. |
| 32 | template <typename T> |
| 33 | class SkAtomic : SkNoncopyable { |
| 34 | public: |
| 35 | SkAtomic() {} |
| 36 | |
| 37 | // It is essential we return by value rather than by const&. fVal may change at any time. |
| 38 | T load(sk_memory_order mo = sk_memory_order_seq_cst) const { |
| 39 | return sk_atomic_load(&fVal, mo); |
| 40 | } |
| 41 | |
| 42 | void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
| 43 | sk_atomic_store(&fVal, val, mo); |
| 44 | } |
mtklein | 59c9203 | 2015-02-25 12:51:55 -0800 | [diff] [blame^] | 45 | |
| 46 | bool compare_exchange(T* expected, const T& desired, |
| 47 | sk_memory_order success = sk_memory_order_seq_cst, |
| 48 | sk_memory_order failure = sk_memory_order_seq_cst) { |
| 49 | return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure); |
| 50 | } |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 51 | private: |
| 52 | T fVal; |
| 53 | }; |
| 54 | |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 55 | #if defined(_MSC_VER) |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 56 | #include "../ports/SkAtomics_std.h" |
| 57 | #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) |
| 58 | #include "../ports/SkAtomics_atomic.h" |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 59 | #else |
| 60 | #include "../ports/SkAtomics_sync.h" |
| 61 | #endif |
| 62 | |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 63 | // From here down we have shims for our old atomics API, to be weaned off of. |
| 64 | // We use the default sequentially-consistent memory order to make things simple |
| 65 | // and to match the practical reality of our old _sync and _win implementations. |
| 66 | |
| 67 | inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); } |
| 68 | inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); } |
| 69 | inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr, v); } |
| 70 | |
| 71 | inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); } |
| 72 | |
| 73 | inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) { |
| 74 | return sk_atomic_compare_exchange(ptr, &expected, desired); |
| 75 | } |
| 76 | |
| 77 | inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) { |
| 78 | (void)sk_atomic_compare_exchange(ptr, &expected, desired); |
| 79 | return expected; |
| 80 | } |
| 81 | |
| 82 | inline int32_t sk_atomic_conditional_inc(int32_t* ptr) { |
| 83 | int32_t prev = sk_atomic_load(ptr); |
| 84 | do { |
| 85 | if (0 == prev) { |
| 86 | break; |
| 87 | } |
| 88 | } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1)); |
| 89 | return prev; |
| 90 | } |
| 91 | |
| 92 | template <typename T> |
| 93 | T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } |
| 94 | |
| 95 | template <typename T> |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 96 | void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); } |
| 97 | |
| 98 | inline void sk_membar_acquire__after_atomic_dec() {} |
| 99 | inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
| 100 | |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 101 | #endif//SkAtomics_DEFINED |