mtklein | 50ffd99 | 2015-03-30 08:13:33 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 8 | #ifndef SkAtomics_DEFINED |
| 9 | #define SkAtomics_DEFINED |
| 10 | |
| 11 | // This file is not part of the public Skia API. |
| 12 | #include "SkTypes.h" |
mtklein | 23267db | 2015-11-12 11:07:53 -0800 | [diff] [blame] | 13 | #include <atomic> |
| 14 | |
| 15 | // ~~~~~~~~ APIs ~~~~~~~~~ |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 16 | |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 17 | enum sk_memory_order { |
| 18 | sk_memory_order_relaxed, |
| 19 | sk_memory_order_consume, |
| 20 | sk_memory_order_acquire, |
| 21 | sk_memory_order_release, |
| 22 | sk_memory_order_acq_rel, |
| 23 | sk_memory_order_seq_cst, |
| 24 | }; |
| 25 | |
| 26 | template <typename T> |
| 27 | T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); |
| 28 | |
| 29 | template <typename T> |
| 30 | void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 31 | |
| 32 | template <typename T> |
| 33 | T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 34 | |
| 35 | template <typename T> |
herb | 3667d5b | 2015-09-16 07:46:17 -0700 | [diff] [blame] | 36 | T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 37 | |
| 38 | template <typename T> |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 39 | bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
| 40 | sk_memory_order success = sk_memory_order_seq_cst, |
| 41 | sk_memory_order failure = sk_memory_order_seq_cst); |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 42 | |
mtklein | 50ffd99 | 2015-03-30 08:13:33 -0700 | [diff] [blame] | 43 | template <typename T> |
| 44 | T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 45 | |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 46 | // A little wrapper class for small T (think, builtins: int, float, void*) to |
| 47 | // ensure they're always used atomically. This is our stand-in for std::atomic<T>. |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 48 | // !!! Please _really_ know what you're doing if you change default_memory_order. !!! |
| 49 | template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst> |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 50 | class SkAtomic : SkNoncopyable { |
| 51 | public: |
| 52 | SkAtomic() {} |
mtklein | 942e99b | 2015-06-17 07:53:22 -0700 | [diff] [blame] | 53 | explicit SkAtomic(const T& val) : fVal(val) {} |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 54 | |
| 55 | // It is essential we return by value rather than by const&. fVal may change at any time. |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 56 | T load(sk_memory_order mo = default_memory_order) const { |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 57 | return sk_atomic_load(&fVal, mo); |
| 58 | } |
| 59 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 60 | void store(const T& val, sk_memory_order mo = default_memory_order) { |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 61 | sk_atomic_store(&fVal, val, mo); |
| 62 | } |
mtklein | 59c9203 | 2015-02-25 12:51:55 -0800 | [diff] [blame] | 63 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 64 | // Alias for .load(default_memory_order). |
herb | 0869267 | 2015-09-28 08:59:18 -0700 | [diff] [blame] | 65 | operator T() const { |
| 66 | return this->load(); |
| 67 | } |
| 68 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 69 | // Alias for .store(v, default_memory_order). |
herb | 0869267 | 2015-09-28 08:59:18 -0700 | [diff] [blame] | 70 | T operator=(const T& v) { |
| 71 | this->store(v); |
| 72 | return v; |
| 73 | } |
| 74 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 75 | T fetch_add(const T& val, sk_memory_order mo = default_memory_order) { |
mtklein | 942e99b | 2015-06-17 07:53:22 -0700 | [diff] [blame] | 76 | return sk_atomic_fetch_add(&fVal, val, mo); |
| 77 | } |
| 78 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 79 | T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) { |
herb | 3667d5b | 2015-09-16 07:46:17 -0700 | [diff] [blame] | 80 | return sk_atomic_fetch_sub(&fVal, val, mo); |
| 81 | } |
| 82 | |
mtklein | 59c9203 | 2015-02-25 12:51:55 -0800 | [diff] [blame] | 83 | bool compare_exchange(T* expected, const T& desired, |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 84 | sk_memory_order success = default_memory_order, |
| 85 | sk_memory_order failure = default_memory_order) { |
mtklein | 59c9203 | 2015-02-25 12:51:55 -0800 | [diff] [blame] | 86 | return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure); |
| 87 | } |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 88 | private: |
| 89 | T fVal; |
| 90 | }; |
| 91 | |
mtklein | 23267db | 2015-11-12 11:07:53 -0800 | [diff] [blame] | 92 | // ~~~~~~~~ Implementations ~~~~~~~~~ |
| 93 | |
| 94 | template <typename T> |
| 95 | T sk_atomic_load(const T* ptr, sk_memory_order mo) { |
| 96 | SkASSERT(mo == sk_memory_order_relaxed || |
| 97 | mo == sk_memory_order_seq_cst || |
| 98 | mo == sk_memory_order_acquire || |
| 99 | mo == sk_memory_order_consume); |
| 100 | const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr); |
| 101 | return std::atomic_load_explicit(ap, (std::memory_order)mo); |
| 102 | } |
| 103 | |
| 104 | template <typename T> |
| 105 | void sk_atomic_store(T* ptr, T val, sk_memory_order mo) { |
| 106 | SkASSERT(mo == sk_memory_order_relaxed || |
| 107 | mo == sk_memory_order_seq_cst || |
| 108 | mo == sk_memory_order_release); |
| 109 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 110 | return std::atomic_store_explicit(ap, val, (std::memory_order)mo); |
| 111 | } |
| 112 | |
| 113 | template <typename T> |
| 114 | T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) { |
| 115 | // All values of mo are valid. |
| 116 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 117 | return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo); |
| 118 | } |
| 119 | |
| 120 | template <typename T> |
| 121 | T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) { |
| 122 | // All values of mo are valid. |
| 123 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 124 | return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo); |
| 125 | } |
| 126 | |
| 127 | template <typename T> |
| 128 | bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, |
| 129 | sk_memory_order success, |
| 130 | sk_memory_order failure) { |
| 131 | // All values of success are valid. |
| 132 | SkASSERT(failure == sk_memory_order_relaxed || |
| 133 | failure == sk_memory_order_seq_cst || |
| 134 | failure == sk_memory_order_acquire || |
| 135 | failure == sk_memory_order_consume); |
| 136 | SkASSERT(failure <= success); |
| 137 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 138 | return std::atomic_compare_exchange_strong_explicit(ap, expected, desired, |
| 139 | (std::memory_order)success, |
| 140 | (std::memory_order)failure); |
| 141 | } |
| 142 | |
| 143 | template <typename T> |
| 144 | T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) { |
| 145 | // All values of mo are valid. |
| 146 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 147 | return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo); |
| 148 | } |
| 149 | |
| 150 | // ~~~~~~~~ Legacy APIs ~~~~~~~~~ |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 151 | |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 152 | // From here down we have shims for our old atomics API, to be weaned off of. |
| 153 | // We use the default sequentially-consistent memory order to make things simple |
| 154 | // and to match the practical reality of our old _sync and _win implementations. |
| 155 | |
mtklein | 570c868 | 2016-07-27 08:40:45 -0700 | [diff] [blame] | 156 | inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); } |
| 157 | inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); } |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 158 | |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 159 | #endif//SkAtomics_DEFINED |