blob: 80bd825bd75131c3f57ad848cc28b91cd1c9a9ff [file] [log] [blame]
Martin v. Löwis4d0d4712010-12-03 20:14:31 +00001#ifndef Py_LIMITED_API
Jeffrey Yasskin39370832010-05-03 19:29:34 +00002#ifndef Py_ATOMIC_H
3#define Py_ATOMIC_H
Jeffrey Yasskin39370832010-05-03 19:29:34 +00004
5#include "dynamic_annotations.h"
6
Victor Stinner4f5366e2015-01-09 02:13:19 +01007#include "pyconfig.h"
8
9#if defined(HAVE_STD_ATOMIC)
10#include <stdatomic.h>
11#endif
12
Jeffrey Yasskin39370832010-05-03 19:29:34 +000013#ifdef __cplusplus
14extern "C" {
15#endif
16
17/* This is modeled after the atomics interface from C1x, according to
18 * the draft at
19 * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
20 * Operations and types are named the same except with a _Py_ prefix
21 * and have the same semantics.
22 *
23 * Beware, the implementations here are deep magic.
24 */
25
Victor Stinner4f5366e2015-01-09 02:13:19 +010026#if defined(HAVE_STD_ATOMIC)
27
28typedef enum _Py_memory_order {
29 _Py_memory_order_relaxed = memory_order_relaxed,
30 _Py_memory_order_acquire = memory_order_acquire,
31 _Py_memory_order_release = memory_order_release,
32 _Py_memory_order_acq_rel = memory_order_acq_rel,
33 _Py_memory_order_seq_cst = memory_order_seq_cst
34} _Py_memory_order;
35
36typedef struct _Py_atomic_address {
37 _Atomic void *_value;
38} _Py_atomic_address;
39
40typedef struct _Py_atomic_int {
41 atomic_int _value;
42} _Py_atomic_int;
43
44#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
45 atomic_signal_fence(ORDER)
46
47#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
48 atomic_thread_fence(ORDER)
49
50#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
51 atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
52
53#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
54 atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
55
56/* Use builtin atomic operations in GCC >= 4.7 */
57#elif defined(HAVE_BUILTIN_ATOMIC)
58
59typedef enum _Py_memory_order {
60 _Py_memory_order_relaxed = __ATOMIC_RELAXED,
61 _Py_memory_order_acquire = __ATOMIC_ACQUIRE,
62 _Py_memory_order_release = __ATOMIC_RELEASE,
63 _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
64 _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
65} _Py_memory_order;
66
67typedef struct _Py_atomic_address {
68 void *_value;
69} _Py_atomic_address;
70
71typedef struct _Py_atomic_int {
72 int _value;
73} _Py_atomic_int;
74
75#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
76 __atomic_signal_fence(ORDER)
77
78#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
79 __atomic_thread_fence(ORDER)
80
81#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
82 (assert((ORDER) == __ATOMIC_RELAXED \
83 || (ORDER) == __ATOMIC_SEQ_CST \
84 || (ORDER) == __ATOMIC_RELEASE), \
85 __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
86
87#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
88 (assert((ORDER) == __ATOMIC_RELAXED \
89 || (ORDER) == __ATOMIC_SEQ_CST \
90 || (ORDER) == __ATOMIC_ACQUIRE \
91 || (ORDER) == __ATOMIC_CONSUME), \
92 __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
93
94#else
95
Jeffrey Yasskin39370832010-05-03 19:29:34 +000096typedef enum _Py_memory_order {
97 _Py_memory_order_relaxed,
98 _Py_memory_order_acquire,
99 _Py_memory_order_release,
100 _Py_memory_order_acq_rel,
101 _Py_memory_order_seq_cst
102} _Py_memory_order;
103
104typedef struct _Py_atomic_address {
105 void *_value;
106} _Py_atomic_address;
107
108typedef struct _Py_atomic_int {
109 int _value;
110} _Py_atomic_int;
111
112/* Only support GCC (for expression statements) and x86 (for simple
113 * atomic semantics) for now */
114#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
115
116static __inline__ void
117_Py_atomic_signal_fence(_Py_memory_order order)
118{
119 if (order != _Py_memory_order_relaxed)
120 __asm__ volatile("":::"memory");
121}
122
123static __inline__ void
124_Py_atomic_thread_fence(_Py_memory_order order)
125{
126 if (order != _Py_memory_order_relaxed)
127 __asm__ volatile("mfence":::"memory");
128}
129
130/* Tell the race checker about this operation's effects. */
131static __inline__ void
132_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
133{
Barry Warsaw9f571352011-12-05 16:45:02 -0500134 (void)address; /* shut up -Wunused-parameter */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000135 switch(order) {
136 case _Py_memory_order_release:
137 case _Py_memory_order_acq_rel:
138 case _Py_memory_order_seq_cst:
139 _Py_ANNOTATE_HAPPENS_BEFORE(address);
140 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +0200141 case _Py_memory_order_relaxed:
142 case _Py_memory_order_acquire:
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000143 break;
144 }
145 switch(order) {
146 case _Py_memory_order_acquire:
147 case _Py_memory_order_acq_rel:
148 case _Py_memory_order_seq_cst:
149 _Py_ANNOTATE_HAPPENS_AFTER(address);
150 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +0200151 case _Py_memory_order_relaxed:
152 case _Py_memory_order_release:
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000153 break;
154 }
155}
156
157#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
158 __extension__ ({ \
159 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
160 __typeof__(atomic_val->_value) new_val = NEW_VAL;\
161 volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
162 _Py_memory_order order = ORDER; \
163 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
164 \
165 /* Perform the operation. */ \
166 _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
167 switch(order) { \
168 case _Py_memory_order_release: \
169 _Py_atomic_signal_fence(_Py_memory_order_release); \
170 /* fallthrough */ \
171 case _Py_memory_order_relaxed: \
172 *volatile_data = new_val; \
173 break; \
174 \
175 case _Py_memory_order_acquire: \
176 case _Py_memory_order_acq_rel: \
177 case _Py_memory_order_seq_cst: \
178 __asm__ volatile("xchg %0, %1" \
179 : "+r"(new_val) \
180 : "m"(atomic_val->_value) \
181 : "memory"); \
182 break; \
183 } \
184 _Py_ANNOTATE_IGNORE_WRITES_END(); \
185 })
186
187#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
188 __extension__ ({ \
189 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
190 __typeof__(atomic_val->_value) result; \
191 volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
192 _Py_memory_order order = ORDER; \
193 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
194 \
195 /* Perform the operation. */ \
196 _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
197 switch(order) { \
198 case _Py_memory_order_release: \
199 case _Py_memory_order_acq_rel: \
200 case _Py_memory_order_seq_cst: \
201 /* Loads on x86 are not releases by default, so need a */ \
202 /* thread fence. */ \
203 _Py_atomic_thread_fence(_Py_memory_order_release); \
204 break; \
205 default: \
206 /* No fence */ \
207 break; \
208 } \
209 result = *volatile_data; \
210 switch(order) { \
211 case _Py_memory_order_acquire: \
212 case _Py_memory_order_acq_rel: \
213 case _Py_memory_order_seq_cst: \
214 /* Loads on x86 are automatically acquire operations so */ \
215 /* can get by with just a compiler fence. */ \
216 _Py_atomic_signal_fence(_Py_memory_order_acquire); \
217 break; \
218 default: \
219 /* No fence */ \
220 break; \
221 } \
222 _Py_ANNOTATE_IGNORE_READS_END(); \
223 result; \
224 })
225
226#else /* !gcc x86 */
227/* Fall back to other compilers and processors by assuming that simple
228 volatile accesses are atomic. This is false, so people should port
229 this. */
230#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
231#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
232#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
233 ((ATOMIC_VAL)->_value = NEW_VAL)
234#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
235 ((ATOMIC_VAL)->_value)
236
237#endif /* !gcc x86 */
Victor Stinner4f5366e2015-01-09 02:13:19 +0100238#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000239
240/* Standardized shortcuts. */
241#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
242 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
243#define _Py_atomic_load(ATOMIC_VAL) \
244 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
245
246/* Python-local extensions */
247
248#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
249 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
250#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
251 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
252
253#ifdef __cplusplus
254}
255#endif
256
257#endif /* Py_ATOMIC_H */
Martin v. Löwis4d0d4712010-12-03 20:14:31 +0000258#endif /* Py_LIMITED_API */