blob: 99816a5b611058dc6013e608d9810a935a335430 [file] [log] [blame]
Victor Stinner6562b292015-03-17 22:53:27 +01001/* Issue #23644: <stdatomic.h> is incompatible with C++, see:
2 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932 */
3#if !defined(Py_LIMITED_API) && !defined(__cplusplus)
Jeffrey Yasskin39370832010-05-03 19:29:34 +00004#ifndef Py_ATOMIC_H
5#define Py_ATOMIC_H
Jeffrey Yasskin39370832010-05-03 19:29:34 +00006
7#include "dynamic_annotations.h"
8
Victor Stinner4f5366e2015-01-09 02:13:19 +01009#include "pyconfig.h"
10
Victor Stinner3b6d0ae2015-03-12 16:04:41 +010011#if defined(HAVE_STD_ATOMIC)
12#include <stdatomic.h>
13#endif
14
Jeffrey Yasskin39370832010-05-03 19:29:34 +000015/* This is modeled after the atomics interface from C1x, according to
16 * the draft at
17 * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
18 * Operations and types are named the same except with a _Py_ prefix
19 * and have the same semantics.
20 *
21 * Beware, the implementations here are deep magic.
22 */
23
Victor Stinner4f5366e2015-01-09 02:13:19 +010024#if defined(HAVE_STD_ATOMIC)
25
26typedef enum _Py_memory_order {
27 _Py_memory_order_relaxed = memory_order_relaxed,
28 _Py_memory_order_acquire = memory_order_acquire,
29 _Py_memory_order_release = memory_order_release,
30 _Py_memory_order_acq_rel = memory_order_acq_rel,
31 _Py_memory_order_seq_cst = memory_order_seq_cst
32} _Py_memory_order;
33
34typedef struct _Py_atomic_address {
35 _Atomic void *_value;
36} _Py_atomic_address;
37
38typedef struct _Py_atomic_int {
39 atomic_int _value;
40} _Py_atomic_int;
41
42#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
43 atomic_signal_fence(ORDER)
44
45#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
46 atomic_thread_fence(ORDER)
47
48#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
49 atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
50
51#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
52 atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
53
54/* Use builtin atomic operations in GCC >= 4.7 */
55#elif defined(HAVE_BUILTIN_ATOMIC)
56
57typedef enum _Py_memory_order {
58 _Py_memory_order_relaxed = __ATOMIC_RELAXED,
59 _Py_memory_order_acquire = __ATOMIC_ACQUIRE,
60 _Py_memory_order_release = __ATOMIC_RELEASE,
61 _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
62 _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
63} _Py_memory_order;
64
65typedef struct _Py_atomic_address {
66 void *_value;
67} _Py_atomic_address;
68
69typedef struct _Py_atomic_int {
70 int _value;
71} _Py_atomic_int;
72
73#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
74 __atomic_signal_fence(ORDER)
75
76#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
77 __atomic_thread_fence(ORDER)
78
79#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
80 (assert((ORDER) == __ATOMIC_RELAXED \
81 || (ORDER) == __ATOMIC_SEQ_CST \
82 || (ORDER) == __ATOMIC_RELEASE), \
83 __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
84
85#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
86 (assert((ORDER) == __ATOMIC_RELAXED \
87 || (ORDER) == __ATOMIC_SEQ_CST \
88 || (ORDER) == __ATOMIC_ACQUIRE \
89 || (ORDER) == __ATOMIC_CONSUME), \
90 __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
91
92#else
93
Jeffrey Yasskin39370832010-05-03 19:29:34 +000094typedef enum _Py_memory_order {
95 _Py_memory_order_relaxed,
96 _Py_memory_order_acquire,
97 _Py_memory_order_release,
98 _Py_memory_order_acq_rel,
99 _Py_memory_order_seq_cst
100} _Py_memory_order;
101
102typedef struct _Py_atomic_address {
103 void *_value;
104} _Py_atomic_address;
105
106typedef struct _Py_atomic_int {
107 int _value;
108} _Py_atomic_int;
109
110/* Only support GCC (for expression statements) and x86 (for simple
111 * atomic semantics) for now */
112#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
113
114static __inline__ void
115_Py_atomic_signal_fence(_Py_memory_order order)
116{
117 if (order != _Py_memory_order_relaxed)
118 __asm__ volatile("":::"memory");
119}
120
121static __inline__ void
122_Py_atomic_thread_fence(_Py_memory_order order)
123{
124 if (order != _Py_memory_order_relaxed)
125 __asm__ volatile("mfence":::"memory");
126}
127
128/* Tell the race checker about this operation's effects. */
129static __inline__ void
130_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
131{
Barry Warsaw9f571352011-12-05 16:45:02 -0500132 (void)address; /* shut up -Wunused-parameter */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000133 switch(order) {
134 case _Py_memory_order_release:
135 case _Py_memory_order_acq_rel:
136 case _Py_memory_order_seq_cst:
137 _Py_ANNOTATE_HAPPENS_BEFORE(address);
138 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +0200139 case _Py_memory_order_relaxed:
140 case _Py_memory_order_acquire:
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000141 break;
142 }
143 switch(order) {
144 case _Py_memory_order_acquire:
145 case _Py_memory_order_acq_rel:
146 case _Py_memory_order_seq_cst:
147 _Py_ANNOTATE_HAPPENS_AFTER(address);
148 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +0200149 case _Py_memory_order_relaxed:
150 case _Py_memory_order_release:
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000151 break;
152 }
153}
154
155#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
156 __extension__ ({ \
157 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
158 __typeof__(atomic_val->_value) new_val = NEW_VAL;\
159 volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
160 _Py_memory_order order = ORDER; \
161 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
162 \
163 /* Perform the operation. */ \
164 _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
165 switch(order) { \
166 case _Py_memory_order_release: \
167 _Py_atomic_signal_fence(_Py_memory_order_release); \
168 /* fallthrough */ \
169 case _Py_memory_order_relaxed: \
170 *volatile_data = new_val; \
171 break; \
172 \
173 case _Py_memory_order_acquire: \
174 case _Py_memory_order_acq_rel: \
175 case _Py_memory_order_seq_cst: \
176 __asm__ volatile("xchg %0, %1" \
177 : "+r"(new_val) \
178 : "m"(atomic_val->_value) \
179 : "memory"); \
180 break; \
181 } \
182 _Py_ANNOTATE_IGNORE_WRITES_END(); \
183 })
184
185#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
186 __extension__ ({ \
187 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
188 __typeof__(atomic_val->_value) result; \
189 volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
190 _Py_memory_order order = ORDER; \
191 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
192 \
193 /* Perform the operation. */ \
194 _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
195 switch(order) { \
196 case _Py_memory_order_release: \
197 case _Py_memory_order_acq_rel: \
198 case _Py_memory_order_seq_cst: \
199 /* Loads on x86 are not releases by default, so need a */ \
200 /* thread fence. */ \
201 _Py_atomic_thread_fence(_Py_memory_order_release); \
202 break; \
203 default: \
204 /* No fence */ \
205 break; \
206 } \
207 result = *volatile_data; \
208 switch(order) { \
209 case _Py_memory_order_acquire: \
210 case _Py_memory_order_acq_rel: \
211 case _Py_memory_order_seq_cst: \
212 /* Loads on x86 are automatically acquire operations so */ \
213 /* can get by with just a compiler fence. */ \
214 _Py_atomic_signal_fence(_Py_memory_order_acquire); \
215 break; \
216 default: \
217 /* No fence */ \
218 break; \
219 } \
220 _Py_ANNOTATE_IGNORE_READS_END(); \
221 result; \
222 })
223
224#else /* !gcc x86 */
225/* Fall back to other compilers and processors by assuming that simple
226 volatile accesses are atomic. This is false, so people should port
227 this. */
228#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
229#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
230#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
231 ((ATOMIC_VAL)->_value = NEW_VAL)
232#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
233 ((ATOMIC_VAL)->_value)
234
235#endif /* !gcc x86 */
Victor Stinner4f5366e2015-01-09 02:13:19 +0100236#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000237
238/* Standardized shortcuts. */
239#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
240 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
241#define _Py_atomic_load(ATOMIC_VAL) \
242 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
243
244/* Python-local extensions */
245
246#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
247 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
248#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
249 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
250
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000251#endif /* Py_ATOMIC_H */
Martin v. Löwis4d0d4712010-12-03 20:14:31 +0000252#endif /* Py_LIMITED_API */