blob: da45327b89c982cd11033dba2ffa43966ff5981e [file] [log] [blame]
Martin v. Löwis4d0d4712010-12-03 20:14:31 +00001#ifndef Py_LIMITED_API
Jeffrey Yasskin39370832010-05-03 19:29:34 +00002#ifndef Py_ATOMIC_H
3#define Py_ATOMIC_H
4/* XXX: When compilers start offering a stdatomic.h with lock-free
5 atomic_int and atomic_address types, include that here and rewrite
6 the atomic operations in terms of it. */
7
8#include "dynamic_annotations.h"
9
10#ifdef __cplusplus
11extern "C" {
12#endif
13
14/* This is modeled after the atomics interface from C1x, according to
15 * the draft at
16 * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
17 * Operations and types are named the same except with a _Py_ prefix
18 * and have the same semantics.
19 *
20 * Beware, the implementations here are deep magic.
21 */
22
23typedef enum _Py_memory_order {
24 _Py_memory_order_relaxed,
25 _Py_memory_order_acquire,
26 _Py_memory_order_release,
27 _Py_memory_order_acq_rel,
28 _Py_memory_order_seq_cst
29} _Py_memory_order;
30
31typedef struct _Py_atomic_address {
32 void *_value;
33} _Py_atomic_address;
34
35typedef struct _Py_atomic_int {
36 int _value;
37} _Py_atomic_int;
38
39/* Only support GCC (for expression statements) and x86 (for simple
40 * atomic semantics) for now */
41#if defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
42
43static __inline__ void
44_Py_atomic_signal_fence(_Py_memory_order order)
45{
46 if (order != _Py_memory_order_relaxed)
47 __asm__ volatile("":::"memory");
48}
49
50static __inline__ void
51_Py_atomic_thread_fence(_Py_memory_order order)
52{
53 if (order != _Py_memory_order_relaxed)
54 __asm__ volatile("mfence":::"memory");
55}
56
57/* Tell the race checker about this operation's effects. */
58static __inline__ void
59_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
60{
61 switch(order) {
62 case _Py_memory_order_release:
63 case _Py_memory_order_acq_rel:
64 case _Py_memory_order_seq_cst:
65 _Py_ANNOTATE_HAPPENS_BEFORE(address);
66 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +020067 case _Py_memory_order_relaxed:
68 case _Py_memory_order_acquire:
Jeffrey Yasskin39370832010-05-03 19:29:34 +000069 break;
70 }
71 switch(order) {
72 case _Py_memory_order_acquire:
73 case _Py_memory_order_acq_rel:
74 case _Py_memory_order_seq_cst:
75 _Py_ANNOTATE_HAPPENS_AFTER(address);
76 break;
Petri Lehtinen8d40f162011-11-19 22:03:10 +020077 case _Py_memory_order_relaxed:
78 case _Py_memory_order_release:
Jeffrey Yasskin39370832010-05-03 19:29:34 +000079 break;
80 }
81}
82
83#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
84 __extension__ ({ \
85 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
86 __typeof__(atomic_val->_value) new_val = NEW_VAL;\
87 volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
88 _Py_memory_order order = ORDER; \
89 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
90 \
91 /* Perform the operation. */ \
92 _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
93 switch(order) { \
94 case _Py_memory_order_release: \
95 _Py_atomic_signal_fence(_Py_memory_order_release); \
96 /* fallthrough */ \
97 case _Py_memory_order_relaxed: \
98 *volatile_data = new_val; \
99 break; \
100 \
101 case _Py_memory_order_acquire: \
102 case _Py_memory_order_acq_rel: \
103 case _Py_memory_order_seq_cst: \
104 __asm__ volatile("xchg %0, %1" \
105 : "+r"(new_val) \
106 : "m"(atomic_val->_value) \
107 : "memory"); \
108 break; \
109 } \
110 _Py_ANNOTATE_IGNORE_WRITES_END(); \
111 })
112
113#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
114 __extension__ ({ \
115 __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
116 __typeof__(atomic_val->_value) result; \
117 volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
118 _Py_memory_order order = ORDER; \
119 _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
120 \
121 /* Perform the operation. */ \
122 _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
123 switch(order) { \
124 case _Py_memory_order_release: \
125 case _Py_memory_order_acq_rel: \
126 case _Py_memory_order_seq_cst: \
127 /* Loads on x86 are not releases by default, so need a */ \
128 /* thread fence. */ \
129 _Py_atomic_thread_fence(_Py_memory_order_release); \
130 break; \
131 default: \
132 /* No fence */ \
133 break; \
134 } \
135 result = *volatile_data; \
136 switch(order) { \
137 case _Py_memory_order_acquire: \
138 case _Py_memory_order_acq_rel: \
139 case _Py_memory_order_seq_cst: \
140 /* Loads on x86 are automatically acquire operations so */ \
141 /* can get by with just a compiler fence. */ \
142 _Py_atomic_signal_fence(_Py_memory_order_acquire); \
143 break; \
144 default: \
145 /* No fence */ \
146 break; \
147 } \
148 _Py_ANNOTATE_IGNORE_READS_END(); \
149 result; \
150 })
151
152#else /* !gcc x86 */
153/* Fall back to other compilers and processors by assuming that simple
154 volatile accesses are atomic. This is false, so people should port
155 this. */
156#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
157#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
158#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
159 ((ATOMIC_VAL)->_value = NEW_VAL)
160#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
161 ((ATOMIC_VAL)->_value)
162
163#endif /* !gcc x86 */
164
165/* Standardized shortcuts. */
166#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
167 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
168#define _Py_atomic_load(ATOMIC_VAL) \
169 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
170
171/* Python-local extensions */
172
173#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
174 _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
175#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
176 _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
177
178#ifdef __cplusplus
179}
180#endif
181
182#endif /* Py_ATOMIC_H */
Martin v. Löwis4d0d4712010-12-03 20:14:31 +0000183#endif /* Py_LIMITED_API */