blob: d4e19e0070be577cb0d95c265f6ee8845192b4f9 [file] [log] [blame]
Martin v. Löwis4d0d4712010-12-03 20:14:31 +00001#ifndef Py_LIMITED_API
Jeffrey Yasskin39370832010-05-03 19:29:34 +00002#ifndef Py_ATOMIC_H
3#define Py_ATOMIC_H
4/* XXX: When compilers start offering a stdatomic.h with lock-free
5 atomic_int and atomic_address types, include that here and rewrite
6 the atomic operations in terms of it. */
7
8#include "dynamic_annotations.h"
9
10#ifdef __cplusplus
11extern "C" {
12#endif
13
14/* This is modeled after the atomics interface from C1x, according to
15 * the draft at
16 * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
17 * Operations and types are named the same except with a _Py_ prefix
18 * and have the same semantics.
19 *
20 * Beware, the implementations here are deep magic.
21 */
22
23typedef enum _Py_memory_order {
24 _Py_memory_order_relaxed,
25 _Py_memory_order_acquire,
26 _Py_memory_order_release,
27 _Py_memory_order_acq_rel,
28 _Py_memory_order_seq_cst
29} _Py_memory_order;
30
31typedef struct _Py_atomic_address {
32 void *_value;
33} _Py_atomic_address;
34
35typedef struct _Py_atomic_int {
36 int _value;
37} _Py_atomic_int;
38
39/* Only support GCC (for expression statements) and x86 (for simple
40 * atomic semantics) for now */
41#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
42
43static __inline__ void
44_Py_atomic_signal_fence(_Py_memory_order order)
45{
46 if (order != _Py_memory_order_relaxed)
47 __asm__ volatile("":::"memory");
48}
49
50static __inline__ void
51_Py_atomic_thread_fence(_Py_memory_order order)
52{
53 if (order != _Py_memory_order_relaxed)
54 __asm__ volatile("mfence":::"memory");
55}
56
57/* Tell the race checker about this operation's effects. */
58static __inline__ void
59_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
60{
Barry Warsaw9f571352011-12-05 16:45:02 -050061 (void)address; /* shut up -Wunused-parameter */
Jeffrey Yasskin39370832010-05-03 19:29:34 +000062 switch(order) {
63 case _Py_memory_order_release:
64 case _Py_memory_order_acq_rel:
65 case _Py_memory_order_seq_cst:
66 _Py_ANNOTATE_HAPPENS_BEFORE(address);
67 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +020068 case _Py_memory_order_relaxed:
69 case _Py_memory_order_acquire:
Jeffrey Yasskin39370832010-05-03 19:29:34 +000070 break;
71 }
72 switch(order) {
73 case _Py_memory_order_acquire:
74 case _Py_memory_order_acq_rel:
75 case _Py_memory_order_seq_cst:
76 _Py_ANNOTATE_HAPPENS_AFTER(address);
77 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +020078 case _Py_memory_order_relaxed:
79 case _Py_memory_order_release:
Jeffrey Yasskin39370832010-05-03 19:29:34 +000080 break;
81 }
82}
83
84#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
85 __extension__ ({ \
86 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
87 __typeof__(atomic_val->_value) new_val = NEW_VAL;\
88 volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
89 _Py_memory_order order = ORDER; \
90 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
91 \
92 /* Perform the operation. */ \
93 _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
94 switch(order) { \
95 case _Py_memory_order_release: \
96 _Py_atomic_signal_fence(_Py_memory_order_release); \
97 /* fallthrough */ \
98 case _Py_memory_order_relaxed: \
99 *volatile_data = new_val; \
100 break; \
101 \
102 case _Py_memory_order_acquire: \
103 case _Py_memory_order_acq_rel: \
104 case _Py_memory_order_seq_cst: \
105 __asm__ volatile("xchg %0, %1" \
106 : "+r"(new_val) \
107 : "m"(atomic_val->_value) \
108 : "memory"); \
109 break; \
110 } \
111 _Py_ANNOTATE_IGNORE_WRITES_END(); \
112 })
113
114#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
115 __extension__ ({ \
116 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
117 __typeof__(atomic_val->_value) result; \
118 volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
119 _Py_memory_order order = ORDER; \
120 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
121 \
122 /* Perform the operation. */ \
123 _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
124 switch(order) { \
125 case _Py_memory_order_release: \
126 case _Py_memory_order_acq_rel: \
127 case _Py_memory_order_seq_cst: \
128 /* Loads on x86 are not releases by default, so need a */ \
129 /* thread fence. */ \
130 _Py_atomic_thread_fence(_Py_memory_order_release); \
131 break; \
132 default: \
133 /* No fence */ \
134 break; \
135 } \
136 result = *volatile_data; \
137 switch(order) { \
138 case _Py_memory_order_acquire: \
139 case _Py_memory_order_acq_rel: \
140 case _Py_memory_order_seq_cst: \
141 /* Loads on x86 are automatically acquire operations so */ \
142 /* can get by with just a compiler fence. */ \
143 _Py_atomic_signal_fence(_Py_memory_order_acquire); \
144 break; \
145 default: \
146 /* No fence */ \
147 break; \
148 } \
149 _Py_ANNOTATE_IGNORE_READS_END(); \
150 result; \
151 })
152
153#else /* !gcc x86 */
154/* Fall back to other compilers and processors by assuming that simple
155 volatile accesses are atomic. This is false, so people should port
156 this. */
157#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
158#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
159#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
160 ((ATOMIC_VAL)->_value = NEW_VAL)
161#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
162 ((ATOMIC_VAL)->_value)
163
164#endif /* !gcc x86 */
165
166/* Standardized shortcuts. */
167#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
168 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
169#define _Py_atomic_load(ATOMIC_VAL) \
170 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
171
172/* Python-local extensions */
173
174#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
175 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
176#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
177 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
178
179#ifdef __cplusplus
180}
181#endif
182
183#endif /* Py_ATOMIC_H */
Martin v. Löwis4d0d4712010-12-03 20:14:31 +0000184#endif /* Py_LIMITED_API */