blob: 349bdc44779c5474bf0a0da654f7ba4602a06025 [file] [log] [blame]
mtklein50ffd992015-03-30 08:13:33 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
mtkleina64c48f2015-01-21 13:13:31 -08008#ifndef SkAtomics_DEFINED
9#define SkAtomics_DEFINED
10
11// This file is not part of the public Skia API.
Ben Wagnerd5148e32018-07-16 17:44:06 -040012#include "../private/SkNoncopyable.h"
mtkleina64c48f2015-01-21 13:13:31 -080013#include "SkTypes.h"
mtklein23267db2015-11-12 11:07:53 -080014#include <atomic>
15
16// ~~~~~~~~ APIs ~~~~~~~~~
mtkleina64c48f2015-01-21 13:13:31 -080017
mtkleina669bc72015-02-02 12:22:07 -080018enum sk_memory_order {
19 sk_memory_order_relaxed,
20 sk_memory_order_consume,
21 sk_memory_order_acquire,
22 sk_memory_order_release,
23 sk_memory_order_acq_rel,
24 sk_memory_order_seq_cst,
25};
26
27template <typename T>
28T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
29
30template <typename T>
31void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
32
33template <typename T>
34T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
35
36template <typename T>
37bool sk_atomic_compare_exchange(T*, T* expected, T desired,
38 sk_memory_order success = sk_memory_order_seq_cst,
39 sk_memory_order failure = sk_memory_order_seq_cst);
mtklein86821b52015-02-24 14:38:12 -080040
41// A little wrapper class for small T (think, builtins: int, float, void*) to
42// ensure they're always used atomically. This is our stand-in for std::atomic<T>.
mtkleinbf905202015-10-07 12:46:43 -070043// !!! Please _really_ know what you're doing if you change default_memory_order. !!!
44template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
mtklein86821b52015-02-24 14:38:12 -080045class SkAtomic : SkNoncopyable {
46public:
47 SkAtomic() {}
mtklein942e99b2015-06-17 07:53:22 -070048 explicit SkAtomic(const T& val) : fVal(val) {}
mtklein86821b52015-02-24 14:38:12 -080049
50 // It is essential we return by value rather than by const&. fVal may change at any time.
mtkleinbf905202015-10-07 12:46:43 -070051 T load(sk_memory_order mo = default_memory_order) const {
mtklein86821b52015-02-24 14:38:12 -080052 return sk_atomic_load(&fVal, mo);
53 }
54
mtkleinbf905202015-10-07 12:46:43 -070055 void store(const T& val, sk_memory_order mo = default_memory_order) {
mtklein86821b52015-02-24 14:38:12 -080056 sk_atomic_store(&fVal, val, mo);
57 }
mtklein59c92032015-02-25 12:51:55 -080058
mtkleinbf905202015-10-07 12:46:43 -070059 // Alias for .load(default_memory_order).
herb08692672015-09-28 08:59:18 -070060 operator T() const {
61 return this->load();
62 }
63
mtkleinbf905202015-10-07 12:46:43 -070064 // Alias for .store(v, default_memory_order).
herb08692672015-09-28 08:59:18 -070065 T operator=(const T& v) {
66 this->store(v);
67 return v;
68 }
mtklein86821b52015-02-24 14:38:12 -080069private:
70 T fVal;
71};
72
mtklein23267db2015-11-12 11:07:53 -080073// ~~~~~~~~ Implementations ~~~~~~~~~
74
75template <typename T>
76T sk_atomic_load(const T* ptr, sk_memory_order mo) {
77 SkASSERT(mo == sk_memory_order_relaxed ||
78 mo == sk_memory_order_seq_cst ||
79 mo == sk_memory_order_acquire ||
80 mo == sk_memory_order_consume);
81 const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
82 return std::atomic_load_explicit(ap, (std::memory_order)mo);
83}
84
85template <typename T>
86void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
87 SkASSERT(mo == sk_memory_order_relaxed ||
88 mo == sk_memory_order_seq_cst ||
89 mo == sk_memory_order_release);
90 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
91 return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
92}
93
94template <typename T>
95T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
96 // All values of mo are valid.
97 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
98 return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
99}
100
101template <typename T>
mtklein23267db2015-11-12 11:07:53 -0800102bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
103 sk_memory_order success,
104 sk_memory_order failure) {
105 // All values of success are valid.
106 SkASSERT(failure == sk_memory_order_relaxed ||
107 failure == sk_memory_order_seq_cst ||
108 failure == sk_memory_order_acquire ||
109 failure == sk_memory_order_consume);
110 SkASSERT(failure <= success);
111 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
112 return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
113 (std::memory_order)success,
114 (std::memory_order)failure);
115}
116
mtklein23267db2015-11-12 11:07:53 -0800117// ~~~~~~~~ Legacy APIs ~~~~~~~~~
mtkleina64c48f2015-01-21 13:13:31 -0800118
mtkleina669bc72015-02-02 12:22:07 -0800119// From here down we have shims for our old atomics API, to be weaned off of.
120// We use the default sequentially-consistent memory order to make things simple
121// and to match the practical reality of our old _sync and _win implementations.
122
mtklein570c8682016-07-27 08:40:45 -0700123inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
124inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
mtkleina669bc72015-02-02 12:22:07 -0800125
mtkleina64c48f2015-01-21 13:13:31 -0800126#endif//SkAtomics_DEFINED