mtklein | 50ffd99 | 2015-03-30 08:13:33 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 8 | #ifndef SkAtomics_DEFINED |
| 9 | #define SkAtomics_DEFINED |
| 10 | |
| 11 | // This file is not part of the public Skia API. |
Ben Wagner | d5148e3 | 2018-07-16 17:44:06 -0400 | [diff] [blame] | 12 | #include "../private/SkNoncopyable.h" |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 13 | #include "SkTypes.h" |
mtklein | 23267db | 2015-11-12 11:07:53 -0800 | [diff] [blame] | 14 | #include <atomic> |
| 15 | |
| 16 | // ~~~~~~~~ APIs ~~~~~~~~~ |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 17 | |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 18 | enum sk_memory_order { |
| 19 | sk_memory_order_relaxed, |
| 20 | sk_memory_order_consume, |
| 21 | sk_memory_order_acquire, |
| 22 | sk_memory_order_release, |
| 23 | sk_memory_order_acq_rel, |
| 24 | sk_memory_order_seq_cst, |
| 25 | }; |
| 26 | |
| 27 | template <typename T> |
| 28 | T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); |
| 29 | |
| 30 | template <typename T> |
| 31 | void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 32 | |
| 33 | template <typename T> |
| 34 | T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 35 | |
| 36 | template <typename T> |
| 37 | bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
| 38 | sk_memory_order success = sk_memory_order_seq_cst, |
| 39 | sk_memory_order failure = sk_memory_order_seq_cst); |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 40 | |
| 41 | // A little wrapper class for small T (think, builtins: int, float, void*) to |
| 42 | // ensure they're always used atomically. This is our stand-in for std::atomic<T>. |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 43 | // !!! Please _really_ know what you're doing if you change default_memory_order. !!! |
| 44 | template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst> |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 45 | class SkAtomic : SkNoncopyable { |
| 46 | public: |
| 47 | SkAtomic() {} |
mtklein | 942e99b | 2015-06-17 07:53:22 -0700 | [diff] [blame] | 48 | explicit SkAtomic(const T& val) : fVal(val) {} |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 49 | |
| 50 | // It is essential we return by value rather than by const&. fVal may change at any time. |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 51 | T load(sk_memory_order mo = default_memory_order) const { |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 52 | return sk_atomic_load(&fVal, mo); |
| 53 | } |
| 54 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 55 | void store(const T& val, sk_memory_order mo = default_memory_order) { |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 56 | sk_atomic_store(&fVal, val, mo); |
| 57 | } |
mtklein | 59c9203 | 2015-02-25 12:51:55 -0800 | [diff] [blame] | 58 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 59 | // Alias for .load(default_memory_order). |
herb | 0869267 | 2015-09-28 08:59:18 -0700 | [diff] [blame] | 60 | operator T() const { |
| 61 | return this->load(); |
| 62 | } |
| 63 | |
mtklein | bf90520 | 2015-10-07 12:46:43 -0700 | [diff] [blame] | 64 | // Alias for .store(v, default_memory_order). |
herb | 0869267 | 2015-09-28 08:59:18 -0700 | [diff] [blame] | 65 | T operator=(const T& v) { |
| 66 | this->store(v); |
| 67 | return v; |
| 68 | } |
mtklein | 86821b5 | 2015-02-24 14:38:12 -0800 | [diff] [blame] | 69 | private: |
| 70 | T fVal; |
| 71 | }; |
| 72 | |
mtklein | 23267db | 2015-11-12 11:07:53 -0800 | [diff] [blame] | 73 | // ~~~~~~~~ Implementations ~~~~~~~~~ |
| 74 | |
| 75 | template <typename T> |
| 76 | T sk_atomic_load(const T* ptr, sk_memory_order mo) { |
| 77 | SkASSERT(mo == sk_memory_order_relaxed || |
| 78 | mo == sk_memory_order_seq_cst || |
| 79 | mo == sk_memory_order_acquire || |
| 80 | mo == sk_memory_order_consume); |
| 81 | const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr); |
| 82 | return std::atomic_load_explicit(ap, (std::memory_order)mo); |
| 83 | } |
| 84 | |
| 85 | template <typename T> |
| 86 | void sk_atomic_store(T* ptr, T val, sk_memory_order mo) { |
| 87 | SkASSERT(mo == sk_memory_order_relaxed || |
| 88 | mo == sk_memory_order_seq_cst || |
| 89 | mo == sk_memory_order_release); |
| 90 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 91 | return std::atomic_store_explicit(ap, val, (std::memory_order)mo); |
| 92 | } |
| 93 | |
| 94 | template <typename T> |
| 95 | T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) { |
| 96 | // All values of mo are valid. |
| 97 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 98 | return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo); |
| 99 | } |
| 100 | |
| 101 | template <typename T> |
mtklein | 23267db | 2015-11-12 11:07:53 -0800 | [diff] [blame] | 102 | bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, |
| 103 | sk_memory_order success, |
| 104 | sk_memory_order failure) { |
| 105 | // All values of success are valid. |
| 106 | SkASSERT(failure == sk_memory_order_relaxed || |
| 107 | failure == sk_memory_order_seq_cst || |
| 108 | failure == sk_memory_order_acquire || |
| 109 | failure == sk_memory_order_consume); |
| 110 | SkASSERT(failure <= success); |
| 111 | std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 112 | return std::atomic_compare_exchange_strong_explicit(ap, expected, desired, |
| 113 | (std::memory_order)success, |
| 114 | (std::memory_order)failure); |
| 115 | } |
| 116 | |
mtklein | 23267db | 2015-11-12 11:07:53 -0800 | [diff] [blame] | 117 | // ~~~~~~~~ Legacy APIs ~~~~~~~~~ |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 118 | |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 119 | // From here down we have shims for our old atomics API, to be weaned off of. |
| 120 | // We use the default sequentially-consistent memory order to make things simple |
| 121 | // and to match the practical reality of our old _sync and _win implementations. |
| 122 | |
mtklein | 570c868 | 2016-07-27 08:40:45 -0700 | [diff] [blame] | 123 | inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); } |
| 124 | inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); } |
mtklein | a669bc7 | 2015-02-02 12:22:07 -0800 | [diff] [blame] | 125 | |
mtklein | a64c48f | 2015-01-21 13:13:31 -0800 | [diff] [blame] | 126 | #endif//SkAtomics_DEFINED |