Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 1 | #ifndef Py_ATOMIC_H |
| 2 | #define Py_ATOMIC_H |
Victor Stinner | 6df29ad | 2015-09-18 15:06:34 +0200 | [diff] [blame] | 3 | #ifdef Py_BUILD_CORE |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 4 | |
| 5 | #include "dynamic_annotations.h" |
| 6 | |
Victor Stinner | 4f5366e | 2015-01-09 02:13:19 +0100 | [diff] [blame] | 7 | #include "pyconfig.h" |
| 8 | |
Victor Stinner | 3b6d0ae | 2015-03-12 16:04:41 +0100 | [diff] [blame] | 9 | #if defined(HAVE_STD_ATOMIC) |
| 10 | #include <stdatomic.h> |
| 11 | #endif |
| 12 | |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 13 | /* This is modeled after the atomics interface from C1x, according to |
| 14 | * the draft at |
| 15 | * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf. |
| 16 | * Operations and types are named the same except with a _Py_ prefix |
| 17 | * and have the same semantics. |
| 18 | * |
| 19 | * Beware, the implementations here are deep magic. |
| 20 | */ |
| 21 | |
Victor Stinner | 4f5366e | 2015-01-09 02:13:19 +0100 | [diff] [blame] | 22 | #if defined(HAVE_STD_ATOMIC) |
| 23 | |
| 24 | typedef enum _Py_memory_order { |
| 25 | _Py_memory_order_relaxed = memory_order_relaxed, |
| 26 | _Py_memory_order_acquire = memory_order_acquire, |
| 27 | _Py_memory_order_release = memory_order_release, |
| 28 | _Py_memory_order_acq_rel = memory_order_acq_rel, |
| 29 | _Py_memory_order_seq_cst = memory_order_seq_cst |
| 30 | } _Py_memory_order; |
| 31 | |
| 32 | typedef struct _Py_atomic_address { |
Victor Stinner | b02ef71 | 2016-01-22 14:09:55 +0100 | [diff] [blame] | 33 | atomic_uintptr_t _value; |
Victor Stinner | 4f5366e | 2015-01-09 02:13:19 +0100 | [diff] [blame] | 34 | } _Py_atomic_address; |
| 35 | |
| 36 | typedef struct _Py_atomic_int { |
| 37 | atomic_int _value; |
| 38 | } _Py_atomic_int; |
| 39 | |
| 40 | #define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ |
| 41 | atomic_signal_fence(ORDER) |
| 42 | |
| 43 | #define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ |
| 44 | atomic_thread_fence(ORDER) |
| 45 | |
| 46 | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ |
| 47 | atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER) |
| 48 | |
| 49 | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ |
| 50 | atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER) |
| 51 | |
| 52 | /* Use builtin atomic operations in GCC >= 4.7 */ |
| 53 | #elif defined(HAVE_BUILTIN_ATOMIC) |
| 54 | |
| 55 | typedef enum _Py_memory_order { |
| 56 | _Py_memory_order_relaxed = __ATOMIC_RELAXED, |
| 57 | _Py_memory_order_acquire = __ATOMIC_ACQUIRE, |
| 58 | _Py_memory_order_release = __ATOMIC_RELEASE, |
| 59 | _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL, |
| 60 | _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST |
| 61 | } _Py_memory_order; |
| 62 | |
| 63 | typedef struct _Py_atomic_address { |
Benjamin Peterson | ca47063 | 2016-09-06 13:47:26 -0700 | [diff] [blame] | 64 | uintptr_t _value; |
Victor Stinner | 4f5366e | 2015-01-09 02:13:19 +0100 | [diff] [blame] | 65 | } _Py_atomic_address; |
| 66 | |
| 67 | typedef struct _Py_atomic_int { |
| 68 | int _value; |
| 69 | } _Py_atomic_int; |
| 70 | |
| 71 | #define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \ |
| 72 | __atomic_signal_fence(ORDER) |
| 73 | |
| 74 | #define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \ |
| 75 | __atomic_thread_fence(ORDER) |
| 76 | |
| 77 | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ |
| 78 | (assert((ORDER) == __ATOMIC_RELAXED \ |
| 79 | || (ORDER) == __ATOMIC_SEQ_CST \ |
| 80 | || (ORDER) == __ATOMIC_RELEASE), \ |
| 81 | __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)) |
| 82 | |
| 83 | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ |
| 84 | (assert((ORDER) == __ATOMIC_RELAXED \ |
| 85 | || (ORDER) == __ATOMIC_SEQ_CST \ |
| 86 | || (ORDER) == __ATOMIC_ACQUIRE \ |
| 87 | || (ORDER) == __ATOMIC_CONSUME), \ |
| 88 | __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER)) |
| 89 | |
| 90 | #else |
| 91 | |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 92 | typedef enum _Py_memory_order { |
| 93 | _Py_memory_order_relaxed, |
| 94 | _Py_memory_order_acquire, |
| 95 | _Py_memory_order_release, |
| 96 | _Py_memory_order_acq_rel, |
| 97 | _Py_memory_order_seq_cst |
| 98 | } _Py_memory_order; |
| 99 | |
| 100 | typedef struct _Py_atomic_address { |
Benjamin Peterson | ca47063 | 2016-09-06 13:47:26 -0700 | [diff] [blame] | 101 | uintptr_t _value; |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 102 | } _Py_atomic_address; |
| 103 | |
| 104 | typedef struct _Py_atomic_int { |
| 105 | int _value; |
| 106 | } _Py_atomic_int; |
| 107 | |
| 108 | /* Only support GCC (for expression statements) and x86 (for simple |
| 109 | * atomic semantics) for now */ |
| 110 | #if defined(__GNUC__) && (defined(__i386__) || defined(__amd64)) |
| 111 | |
| 112 | static __inline__ void |
| 113 | _Py_atomic_signal_fence(_Py_memory_order order) |
| 114 | { |
| 115 | if (order != _Py_memory_order_relaxed) |
| 116 | __asm__ volatile("":::"memory"); |
| 117 | } |
| 118 | |
| 119 | static __inline__ void |
| 120 | _Py_atomic_thread_fence(_Py_memory_order order) |
| 121 | { |
| 122 | if (order != _Py_memory_order_relaxed) |
| 123 | __asm__ volatile("mfence":::"memory"); |
| 124 | } |
| 125 | |
| 126 | /* Tell the race checker about this operation's effects. */ |
| 127 | static __inline__ void |
| 128 | _Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order) |
| 129 | { |
Barry Warsaw | 9f57135 | 2011-12-05 16:45:02 -0500 | [diff] [blame] | 130 | (void)address; /* shut up -Wunused-parameter */ |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 131 | switch(order) { |
| 132 | case _Py_memory_order_release: |
| 133 | case _Py_memory_order_acq_rel: |
| 134 | case _Py_memory_order_seq_cst: |
| 135 | _Py_ANNOTATE_HAPPENS_BEFORE(address); |
| 136 | break; |
Petri Lehtinen | 8d40f16 | 2011-11-19 22:03:10 +0200 | [diff] [blame] | 137 | case _Py_memory_order_relaxed: |
| 138 | case _Py_memory_order_acquire: |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 139 | break; |
| 140 | } |
| 141 | switch(order) { |
| 142 | case _Py_memory_order_acquire: |
| 143 | case _Py_memory_order_acq_rel: |
| 144 | case _Py_memory_order_seq_cst: |
| 145 | _Py_ANNOTATE_HAPPENS_AFTER(address); |
| 146 | break; |
Petri Lehtinen | 8d40f16 | 2011-11-19 22:03:10 +0200 | [diff] [blame] | 147 | case _Py_memory_order_relaxed: |
| 148 | case _Py_memory_order_release: |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 149 | break; |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ |
| 154 | __extension__ ({ \ |
| 155 | __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ |
| 156 | __typeof__(atomic_val->_value) new_val = NEW_VAL;\ |
| 157 | volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \ |
| 158 | _Py_memory_order order = ORDER; \ |
| 159 | _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ |
| 160 | \ |
| 161 | /* Perform the operation. */ \ |
| 162 | _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \ |
| 163 | switch(order) { \ |
| 164 | case _Py_memory_order_release: \ |
| 165 | _Py_atomic_signal_fence(_Py_memory_order_release); \ |
| 166 | /* fallthrough */ \ |
| 167 | case _Py_memory_order_relaxed: \ |
| 168 | *volatile_data = new_val; \ |
| 169 | break; \ |
| 170 | \ |
| 171 | case _Py_memory_order_acquire: \ |
| 172 | case _Py_memory_order_acq_rel: \ |
| 173 | case _Py_memory_order_seq_cst: \ |
| 174 | __asm__ volatile("xchg %0, %1" \ |
| 175 | : "+r"(new_val) \ |
| 176 | : "m"(atomic_val->_value) \ |
| 177 | : "memory"); \ |
| 178 | break; \ |
| 179 | } \ |
| 180 | _Py_ANNOTATE_IGNORE_WRITES_END(); \ |
| 181 | }) |
| 182 | |
| 183 | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ |
| 184 | __extension__ ({ \ |
| 185 | __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \ |
| 186 | __typeof__(atomic_val->_value) result; \ |
| 187 | volatile __typeof__(result) *volatile_data = &atomic_val->_value; \ |
| 188 | _Py_memory_order order = ORDER; \ |
| 189 | _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \ |
| 190 | \ |
| 191 | /* Perform the operation. */ \ |
| 192 | _Py_ANNOTATE_IGNORE_READS_BEGIN(); \ |
| 193 | switch(order) { \ |
| 194 | case _Py_memory_order_release: \ |
| 195 | case _Py_memory_order_acq_rel: \ |
| 196 | case _Py_memory_order_seq_cst: \ |
| 197 | /* Loads on x86 are not releases by default, so need a */ \ |
| 198 | /* thread fence. */ \ |
| 199 | _Py_atomic_thread_fence(_Py_memory_order_release); \ |
| 200 | break; \ |
| 201 | default: \ |
| 202 | /* No fence */ \ |
| 203 | break; \ |
| 204 | } \ |
| 205 | result = *volatile_data; \ |
| 206 | switch(order) { \ |
| 207 | case _Py_memory_order_acquire: \ |
| 208 | case _Py_memory_order_acq_rel: \ |
| 209 | case _Py_memory_order_seq_cst: \ |
| 210 | /* Loads on x86 are automatically acquire operations so */ \ |
| 211 | /* can get by with just a compiler fence. */ \ |
| 212 | _Py_atomic_signal_fence(_Py_memory_order_acquire); \ |
| 213 | break; \ |
| 214 | default: \ |
| 215 | /* No fence */ \ |
| 216 | break; \ |
| 217 | } \ |
| 218 | _Py_ANNOTATE_IGNORE_READS_END(); \ |
| 219 | result; \ |
| 220 | }) |
| 221 | |
| 222 | #else /* !gcc x86 */ |
| 223 | /* Fall back to other compilers and processors by assuming that simple |
| 224 | volatile accesses are atomic. This is false, so people should port |
| 225 | this. */ |
| 226 | #define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0) |
| 227 | #define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0) |
| 228 | #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \ |
| 229 | ((ATOMIC_VAL)->_value = NEW_VAL) |
| 230 | #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \ |
| 231 | ((ATOMIC_VAL)->_value) |
| 232 | |
| 233 | #endif /* !gcc x86 */ |
Victor Stinner | 4f5366e | 2015-01-09 02:13:19 +0100 | [diff] [blame] | 234 | #endif |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 235 | |
| 236 | /* Standardized shortcuts. */ |
| 237 | #define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \ |
| 238 | _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst) |
| 239 | #define _Py_atomic_load(ATOMIC_VAL) \ |
| 240 | _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst) |
| 241 | |
| 242 | /* Python-local extensions */ |
| 243 | |
| 244 | #define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \ |
| 245 | _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed) |
| 246 | #define _Py_atomic_load_relaxed(ATOMIC_VAL) \ |
| 247 | _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed) |
| 248 | |
Victor Stinner | 6df29ad | 2015-09-18 15:06:34 +0200 | [diff] [blame] | 249 | #endif /* Py_BUILD_CORE */ |
Jeffrey Yasskin | 3937083 | 2010-05-03 19:29:34 +0000 | [diff] [blame] | 250 | #endif /* Py_ATOMIC_H */ |