blob: 5f349cc3e9e99c531d615c3179ac895d4346c93a [file] [log] [blame]
Jeffrey Yasskin39370832010-05-03 19:29:34 +00001#ifndef Py_ATOMIC_H
2#define Py_ATOMIC_H
Victor Stinner31368a42018-10-30 15:14:25 +01003#ifdef __cplusplus
4extern "C" {
5#endif
6
7#ifndef Py_BUILD_CORE
8# error "Py_BUILD_CORE must be defined to include this header"
9#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +000010
11#include "dynamic_annotations.h"
12
Victor Stinner4f5366e2015-01-09 02:13:19 +010013#include "pyconfig.h"
14
Victor Stinner3b6d0ae2015-03-12 16:04:41 +010015#if defined(HAVE_STD_ATOMIC)
16#include <stdatomic.h>
17#endif
18
Pär Björklunde664d7f2017-08-12 11:19:30 +020019
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +030020#if defined(_MSC_VER)
Pär Björklunde664d7f2017-08-12 11:19:30 +020021#include <intrin.h>
22#include <immintrin.h>
23#endif
24
Jeffrey Yasskin39370832010-05-03 19:29:34 +000025/* This is modeled after the atomics interface from C1x, according to
26 * the draft at
27 * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
28 * Operations and types are named the same except with a _Py_ prefix
29 * and have the same semantics.
30 *
31 * Beware, the implementations here are deep magic.
32 */
33
Victor Stinner4f5366e2015-01-09 02:13:19 +010034#if defined(HAVE_STD_ATOMIC)
35
36typedef enum _Py_memory_order {
37 _Py_memory_order_relaxed = memory_order_relaxed,
38 _Py_memory_order_acquire = memory_order_acquire,
39 _Py_memory_order_release = memory_order_release,
40 _Py_memory_order_acq_rel = memory_order_acq_rel,
41 _Py_memory_order_seq_cst = memory_order_seq_cst
42} _Py_memory_order;
43
44typedef struct _Py_atomic_address {
Victor Stinnerb02ef712016-01-22 14:09:55 +010045 atomic_uintptr_t _value;
Victor Stinner4f5366e2015-01-09 02:13:19 +010046} _Py_atomic_address;
47
48typedef struct _Py_atomic_int {
49 atomic_int _value;
50} _Py_atomic_int;
51
52#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
53 atomic_signal_fence(ORDER)
54
55#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
56 atomic_thread_fence(ORDER)
57
58#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
59 atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
60
61#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
62 atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
63
64/* Use builtin atomic operations in GCC >= 4.7 */
65#elif defined(HAVE_BUILTIN_ATOMIC)
66
67typedef enum _Py_memory_order {
68 _Py_memory_order_relaxed = __ATOMIC_RELAXED,
69 _Py_memory_order_acquire = __ATOMIC_ACQUIRE,
70 _Py_memory_order_release = __ATOMIC_RELEASE,
71 _Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
72 _Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
73} _Py_memory_order;
74
75typedef struct _Py_atomic_address {
Benjamin Petersonca470632016-09-06 13:47:26 -070076 uintptr_t _value;
Victor Stinner4f5366e2015-01-09 02:13:19 +010077} _Py_atomic_address;
78
79typedef struct _Py_atomic_int {
80 int _value;
81} _Py_atomic_int;
82
83#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
84 __atomic_signal_fence(ORDER)
85
86#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
87 __atomic_thread_fence(ORDER)
88
89#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
90 (assert((ORDER) == __ATOMIC_RELAXED \
91 || (ORDER) == __ATOMIC_SEQ_CST \
92 || (ORDER) == __ATOMIC_RELEASE), \
93 __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
94
95#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
96 (assert((ORDER) == __ATOMIC_RELAXED \
97 || (ORDER) == __ATOMIC_SEQ_CST \
98 || (ORDER) == __ATOMIC_ACQUIRE \
99 || (ORDER) == __ATOMIC_CONSUME), \
100 __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
101
Pär Björklunde664d7f2017-08-12 11:19:30 +0200102/* Only support GCC (for expression statements) and x86 (for simple
103 * atomic semantics) and MSVC x86/x64/ARM */
104#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000105typedef enum _Py_memory_order {
106 _Py_memory_order_relaxed,
107 _Py_memory_order_acquire,
108 _Py_memory_order_release,
109 _Py_memory_order_acq_rel,
110 _Py_memory_order_seq_cst
111} _Py_memory_order;
112
113typedef struct _Py_atomic_address {
Benjamin Petersonca470632016-09-06 13:47:26 -0700114 uintptr_t _value;
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000115} _Py_atomic_address;
116
117typedef struct _Py_atomic_int {
118 int _value;
119} _Py_atomic_int;
120
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000121
122static __inline__ void
123_Py_atomic_signal_fence(_Py_memory_order order)
124{
125 if (order != _Py_memory_order_relaxed)
126 __asm__ volatile("":::"memory");
127}
128
129static __inline__ void
130_Py_atomic_thread_fence(_Py_memory_order order)
131{
132 if (order != _Py_memory_order_relaxed)
133 __asm__ volatile("mfence":::"memory");
134}
135
136/* Tell the race checker about this operation's effects. */
137static __inline__ void
138_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
139{
Pär Björklunde664d7f2017-08-12 11:19:30 +0200140 (void)address; /* shut up -Wunused-parameter */
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000141 switch(order) {
142 case _Py_memory_order_release:
143 case _Py_memory_order_acq_rel:
144 case _Py_memory_order_seq_cst:
145 _Py_ANNOTATE_HAPPENS_BEFORE(address);
146 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +0200147 case _Py_memory_order_relaxed:
148 case _Py_memory_order_acquire:
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000149 break;
150 }
151 switch(order) {
152 case _Py_memory_order_acquire:
153 case _Py_memory_order_acq_rel:
154 case _Py_memory_order_seq_cst:
155 _Py_ANNOTATE_HAPPENS_AFTER(address);
156 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +0200157 case _Py_memory_order_relaxed:
158 case _Py_memory_order_release:
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000159 break;
160 }
161}
162
163#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
164 __extension__ ({ \
165 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
166 __typeof__(atomic_val->_value) new_val = NEW_VAL;\
167 volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
168 _Py_memory_order order = ORDER; \
169 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
170 \
171 /* Perform the operation. */ \
172 _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
173 switch(order) { \
174 case _Py_memory_order_release: \
175 _Py_atomic_signal_fence(_Py_memory_order_release); \
176 /* fallthrough */ \
177 case _Py_memory_order_relaxed: \
178 *volatile_data = new_val; \
179 break; \
180 \
181 case _Py_memory_order_acquire: \
182 case _Py_memory_order_acq_rel: \
183 case _Py_memory_order_seq_cst: \
184 __asm__ volatile("xchg %0, %1" \
185 : "+r"(new_val) \
186 : "m"(atomic_val->_value) \
187 : "memory"); \
188 break; \
189 } \
190 _Py_ANNOTATE_IGNORE_WRITES_END(); \
191 })
192
193#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
194 __extension__ ({ \
195 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
196 __typeof__(atomic_val->_value) result; \
197 volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
198 _Py_memory_order order = ORDER; \
199 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
200 \
201 /* Perform the operation. */ \
202 _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
203 switch(order) { \
204 case _Py_memory_order_release: \
205 case _Py_memory_order_acq_rel: \
206 case _Py_memory_order_seq_cst: \
207 /* Loads on x86 are not releases by default, so need a */ \
208 /* thread fence. */ \
209 _Py_atomic_thread_fence(_Py_memory_order_release); \
210 break; \
211 default: \
212 /* No fence */ \
213 break; \
214 } \
215 result = *volatile_data; \
216 switch(order) { \
217 case _Py_memory_order_acquire: \
218 case _Py_memory_order_acq_rel: \
219 case _Py_memory_order_seq_cst: \
220 /* Loads on x86 are automatically acquire operations so */ \
221 /* can get by with just a compiler fence. */ \
222 _Py_atomic_signal_fence(_Py_memory_order_acquire); \
223 break; \
224 default: \
225 /* No fence */ \
226 break; \
227 } \
228 _Py_ANNOTATE_IGNORE_READS_END(); \
229 result; \
230 })
231
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300232#elif defined(_MSC_VER)
Pär Björklunde664d7f2017-08-12 11:19:30 +0200233/* _Interlocked* functions provide a full memory barrier and are therefore
234 enough for acq_rel and seq_cst. If the HLE variants aren't available
235 in hardware they will fall back to a full memory barrier as well.
236
237 This might affect performance but likely only in some very specific and
238 hard to meassure scenario.
239*/
240#if defined(_M_IX86) || defined(_M_X64)
241typedef enum _Py_memory_order {
242 _Py_memory_order_relaxed,
243 _Py_memory_order_acquire,
244 _Py_memory_order_release,
245 _Py_memory_order_acq_rel,
246 _Py_memory_order_seq_cst
247} _Py_memory_order;
248
249typedef struct _Py_atomic_address {
250 volatile uintptr_t _value;
251} _Py_atomic_address;
252
253typedef struct _Py_atomic_int {
254 volatile int _value;
255} _Py_atomic_int;
256
257
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300258#if defined(_M_X64)
Pär Björklunde664d7f2017-08-12 11:19:30 +0200259#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
260 switch (ORDER) { \
261 case _Py_memory_order_acquire: \
262 _InterlockedExchange64_HLEAcquire((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
263 break; \
264 case _Py_memory_order_release: \
265 _InterlockedExchange64_HLERelease((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
266 break; \
267 default: \
268 _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
269 break; \
270 }
271#else
272#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
273#endif
274
275#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
276 switch (ORDER) { \
277 case _Py_memory_order_acquire: \
278 _InterlockedExchange_HLEAcquire((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
279 break; \
280 case _Py_memory_order_release: \
281 _InterlockedExchange_HLERelease((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
282 break; \
283 default: \
284 _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
285 break; \
286 }
287
288#if defined(_M_X64)
289/* This has to be an intptr_t for now.
290 gil_created() uses -1 as a sentinel value, if this returns
291 a uintptr_t it will do an unsigned compare and crash
292*/
293inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
Steve Dower05f01d82017-09-07 11:49:23 -0700294 __int64 old;
Pär Björklunde664d7f2017-08-12 11:19:30 +0200295 switch (order) {
296 case _Py_memory_order_acquire:
297 {
298 do {
299 old = *value;
Steve Dower05f01d82017-09-07 11:49:23 -0700300 } while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old);
Pär Björklunde664d7f2017-08-12 11:19:30 +0200301 break;
302 }
303 case _Py_memory_order_release:
304 {
305 do {
306 old = *value;
Steve Dower05f01d82017-09-07 11:49:23 -0700307 } while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old);
Pär Björklunde664d7f2017-08-12 11:19:30 +0200308 break;
309 }
310 case _Py_memory_order_relaxed:
311 old = *value;
312 break;
313 default:
314 {
315 do {
316 old = *value;
Steve Dower05f01d82017-09-07 11:49:23 -0700317 } while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old);
Pär Björklunde664d7f2017-08-12 11:19:30 +0200318 break;
319 }
320 }
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300321 return old;
Pär Björklunde664d7f2017-08-12 11:19:30 +0200322}
323
324#else
325#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
326#endif
327
328inline int _Py_atomic_load_32bit(volatile int* value, int order) {
Steve Dower05f01d82017-09-07 11:49:23 -0700329 long old;
Pär Björklunde664d7f2017-08-12 11:19:30 +0200330 switch (order) {
331 case _Py_memory_order_acquire:
332 {
333 do {
334 old = *value;
Steve Dower05f01d82017-09-07 11:49:23 -0700335 } while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old);
Pär Björklunde664d7f2017-08-12 11:19:30 +0200336 break;
337 }
338 case _Py_memory_order_release:
339 {
340 do {
341 old = *value;
Steve Dower05f01d82017-09-07 11:49:23 -0700342 } while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old);
Pär Björklunde664d7f2017-08-12 11:19:30 +0200343 break;
344 }
345 case _Py_memory_order_relaxed:
346 old = *value;
347 break;
348 default:
349 {
350 do {
351 old = *value;
Steve Dower05f01d82017-09-07 11:49:23 -0700352 } while(_InterlockedCompareExchange((volatile long*)value, old, old) != old);
Pär Björklunde664d7f2017-08-12 11:19:30 +0200353 break;
354 }
355 }
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300356 return old;
Pär Björklunde664d7f2017-08-12 11:19:30 +0200357}
358
359#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
360 if (sizeof(*ATOMIC_VAL._value) == 8) { \
Segev Finer02671282017-08-21 01:45:46 +0300361 _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
362 _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL._value, NEW_VAL, ORDER) }
Pär Björklunde664d7f2017-08-12 11:19:30 +0200363
364#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
365 ( \
366 sizeof(*(ATOMIC_VAL._value)) == 8 ? \
Segev Finer02671282017-08-21 01:45:46 +0300367 _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL._value, ORDER) : \
368 _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL._value, ORDER) \
Pär Björklunde664d7f2017-08-12 11:19:30 +0200369 )
370#elif defined(_M_ARM) || defined(_M_ARM64)
371typedef enum _Py_memory_order {
372 _Py_memory_order_relaxed,
373 _Py_memory_order_acquire,
374 _Py_memory_order_release,
375 _Py_memory_order_acq_rel,
376 _Py_memory_order_seq_cst
377} _Py_memory_order;
378
379typedef struct _Py_atomic_address {
380 volatile uintptr_t _value;
381} _Py_atomic_address;
382
383typedef struct _Py_atomic_int {
384 volatile int _value;
385} _Py_atomic_int;
386
387
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300388#if defined(_M_ARM64)
Pär Björklunde664d7f2017-08-12 11:19:30 +0200389#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
390 switch (ORDER) { \
391 case _Py_memory_order_acquire: \
392 _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
393 break; \
394 case _Py_memory_order_release: \
395 _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
396 break; \
397 default: \
398 _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
399 break; \
400 }
401#else
402#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
403#endif
404
405#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
406 switch (ORDER) { \
407 case _Py_memory_order_acquire: \
408 _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
409 break; \
410 case _Py_memory_order_release: \
411 _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
412 break; \
413 default: \
414 _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
415 break; \
416 }
417
418#if defined(_M_ARM64)
419/* This has to be an intptr_t for now.
420 gil_created() uses -1 as a sentinel value, if this returns
421 a uintptr_t it will do an unsigned compare and crash
422*/
423inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
424 uintptr_t old;
425 switch (order) {
426 case _Py_memory_order_acquire:
427 {
428 do {
429 old = *value;
430 } while(_InterlockedCompareExchange64_acq(value, old, old) != old);
431 break;
432 }
433 case _Py_memory_order_release:
434 {
435 do {
436 old = *value;
437 } while(_InterlockedCompareExchange64_rel(value, old, old) != old);
438 break;
439 }
440 case _Py_memory_order_relaxed:
441 old = *value;
442 break;
443 default:
444 {
445 do {
446 old = *value;
447 } while(_InterlockedCompareExchange64(value, old, old) != old);
448 break;
449 }
450 }
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300451 return old;
Pär Björklunde664d7f2017-08-12 11:19:30 +0200452}
453
454#else
455#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) *ATOMIC_VAL
456#endif
457
458inline int _Py_atomic_load_32bit(volatile int* value, int order) {
459 int old;
460 switch (order) {
461 case _Py_memory_order_acquire:
462 {
463 do {
464 old = *value;
465 } while(_InterlockedCompareExchange_acq(value, old, old) != old);
466 break;
467 }
468 case _Py_memory_order_release:
469 {
470 do {
471 old = *value;
472 } while(_InterlockedCompareExchange_rel(value, old, old) != old);
473 break;
474 }
475 case _Py_memory_order_relaxed:
476 old = *value;
477 break;
478 default:
479 {
480 do {
481 old = *value;
482 } while(_InterlockedCompareExchange(value, old, old) != old);
483 break;
484 }
485 }
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300486 return old;
Pär Björklunde664d7f2017-08-12 11:19:30 +0200487}
488
489#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
490 if (sizeof(*ATOMIC_VAL._value) == 8) { \
491 _Py_atomic_store_64bit(ATOMIC_VAL._value, NEW_VAL, ORDER) } else { \
Serhiy Storchaka13ad3b72017-09-14 09:38:36 +0300492 _Py_atomic_store_32bit(ATOMIC_VAL._value, NEW_VAL, ORDER) }
Pär Björklunde664d7f2017-08-12 11:19:30 +0200493
494#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
495 ( \
496 sizeof(*(ATOMIC_VAL._value)) == 8 ? \
497 _Py_atomic_load_64bit(ATOMIC_VAL._value, ORDER) : \
498 _Py_atomic_load_32bit(ATOMIC_VAL._value, ORDER) \
499 )
500#endif
501#else /* !gcc x86 !_msc_ver */
502typedef enum _Py_memory_order {
503 _Py_memory_order_relaxed,
504 _Py_memory_order_acquire,
505 _Py_memory_order_release,
506 _Py_memory_order_acq_rel,
507 _Py_memory_order_seq_cst
508} _Py_memory_order;
509
510typedef struct _Py_atomic_address {
511 uintptr_t _value;
512} _Py_atomic_address;
513
514typedef struct _Py_atomic_int {
515 int _value;
516} _Py_atomic_int;
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000517/* Fall back to other compilers and processors by assuming that simple
518 volatile accesses are atomic. This is false, so people should port
519 this. */
520#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
521#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
522#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
523 ((ATOMIC_VAL)->_value = NEW_VAL)
524#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
525 ((ATOMIC_VAL)->_value)
Victor Stinner4f5366e2015-01-09 02:13:19 +0100526#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000527
528/* Standardized shortcuts. */
529#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
530 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
531#define _Py_atomic_load(ATOMIC_VAL) \
532 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
533
534/* Python-local extensions */
535
536#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
537 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
538#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
539 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
Victor Stinner31368a42018-10-30 15:14:25 +0100540
541#ifdef __cplusplus
542}
543#endif
Jeffrey Yasskin39370832010-05-03 19:29:34 +0000544#endif /* Py_ATOMIC_H */