blob: c5841e4c43b12257c99585d5a0c941bcfc86f2fa [file] [log] [blame]
David Chisnall675de5d2012-03-30 08:35:32 +00001/*===-- atomic.c - Implement support functions for atomic operations.------===
2 *
3 * The LLVM Compiler Infrastructure
4 *
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
7 *
8 *===----------------------------------------------------------------------===
9 *
10 * atomic.c defines a set of functions for performing atomic accesses on
11 * arbitrary-sized memory locations. This design uses locks that should
12 * be fast in the uncontended case, for two reasons:
13 *
14 * 1) This code must work with C programs that do not link to anything
15 * (including pthreads) and so it should not depend on any pthread
16 * functions.
17 * 2) Atomic operations, rather than explicit mutexes, are most commonly used
18 * on code where contended operations are rate.
19 *
20 * To avoid needing a per-object lock, this code allocates an array of
21 * locks and hashes the object pointers to find the one that it should use.
22 * For operations that must be atomic on two locations, the lower lock is
23 * always acquired first, to avoid deadlock.
24 *
25 *===----------------------------------------------------------------------===
26 */
27
28#include <stdint.h>
29#include <string.h>
30
31// Clang objects if you redefine a builtin. This little hack allows us to
32// define a function with the same name as an intrinsic.
33#pragma redefine_extname __atomic_load_n __atomic_load
34#pragma redefine_extname __atomic_store_n __atomic_store
35#pragma redefine_extname __atomic_exchange_n __atomic_exchange
36#pragma redefine_extname __atomic_compare_exchange_n __atomic_compare_exchange
37
38/// Number of locks. This allocates one page on 32-bit platforms, two on
39/// 64-bit. This can be specified externally if a different trade between
40/// memory usage and contention probability is required for a given platform.
41#ifndef SPINLOCK_COUNT
42#define SPINLOCK_COUNT (1<<10)
43#endif
44static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
45
46////////////////////////////////////////////////////////////////////////////////
47// Platform-specific lock implementation. Falls back to spinlocks if none is
48// defined. Each platform should define the Lock type, and corresponding
49// lock() and unlock() functions.
50////////////////////////////////////////////////////////////////////////////////
51#ifdef __FreeBSD__
52#include <errno.h>
53#include <sys/types.h>
54#include <machine/atomic.h>
55#include <sys/umtx.h>
56typedef struct _usem Lock;
57inline static void unlock(Lock *l) {
58 __atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
59 __atomic_thread_fence(__ATOMIC_SEQ_CST);
60 if (l->_has_waiters)
61 _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
62}
63inline static void lock(Lock *l) {
64 uint32_t old = 1;
65 while (!__atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
66 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
67 _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
68 old = 1;
69 }
70}
71/// locks for atomic operations
72static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} };
73#else
74typedef _Atomic(uintptr_t) Lock;
75/// Unlock a lock. This is a release operation.
76inline static void unlock(Lock *l) {
77 __atomic_store(l, 0, __ATOMIC_RELEASE);
78}
79/// Locks a lock. In the current implementation, this is potentially
80/// unbounded in the contended case.
81inline static void lock(Lock *l) {
82 uintptr_t old = 0;
83 while (!__atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
84 __ATOMIC_RELAXED))
85 old = 0;
86}
87/// locks for atomic operations
88static Lock locks[SPINLOCK_COUNT];
89#endif
90
91
92/// Returns a lock to use for a given pointer.
93static inline Lock *lock_for_pointer(void *ptr) {
94 intptr_t hash = (intptr_t)ptr;
95 // Disregard the lowest 4 bits. We want all values that may be part of the
96 // same memory operation to hash to the same value and therefore use the same
97 // lock.
98 hash >>= 4;
99 // Use the next bits as the basis for the hash
100 intptr_t low = hash & SPINLOCK_MASK;
101 // Now use the high(er) set of bits to perturb the hash, so that we don't
102 // get collisions from atomic fields in a single object
103 hash >>= 16;
104 hash ^= low;
105 // Return a pointer to the word to use
106 return locks + (hash & SPINLOCK_MASK);
107}
108
109/// Macros for determining whether a size is lock free. Clang can not yet
110/// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
111/// not lock free.
112#define IS_LOCK_FREE_1 __atomic_is_lock_free(1)
113#define IS_LOCK_FREE_2 __atomic_is_lock_free(2)
114#define IS_LOCK_FREE_4 __atomic_is_lock_free(4)
115#define IS_LOCK_FREE_8 __atomic_is_lock_free(8)
116#define IS_LOCK_FREE_16 0
117
118/// Macro that calls the compiler-generated lock-free versions of functions
119/// when they exist.
120#define LOCK_FREE_CASES() \
121 do {\
122 switch (size) {\
123 case 2:\
124 if (IS_LOCK_FREE_2) {\
125 LOCK_FREE_ACTION(uint16_t);\
126 }\
127 case 4:\
128 if (IS_LOCK_FREE_4) {\
129 LOCK_FREE_ACTION(uint32_t);\
130 }\
131 case 8:\
132 if (IS_LOCK_FREE_8) {\
133 LOCK_FREE_ACTION(uint64_t);\
134 }\
135 case 16:\
136 if (IS_LOCK_FREE_16) {\
137 LOCK_FREE_ACTION(__uint128_t);\
138 }\
139 }\
140 } while (0)
141
142
143/// An atomic load operation. This is atomic with respect to the source
144/// pointer only.
145void __atomic_load_n(int size, void *src, void *dest, int model) {
146#define LOCK_FREE_ACTION(type) \
147 *((type*)dest) = __atomic_load((_Atomic(type)*)src, model);\
148 return;
149 LOCK_FREE_CASES();
150#undef LOCK_FREE_ACTION
151 Lock *l = lock_for_pointer(src);
152 lock(l);
153 memcpy(dest, src, size);
154 unlock(l);
155}
156
157/// An atomic store operation. This is atomic with respect to the destination
158/// pointer only.
159void __atomic_store_n(int size, void *dest, void *src, int model) {
160#define LOCK_FREE_ACTION(type) \
161 __atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
162 return;
163 LOCK_FREE_CASES();
164#undef LOCK_FREE_ACTION
165 Lock *l = lock_for_pointer(dest);
166 lock(l);
167 memcpy(dest, src, size);
168 unlock(l);
169}
170
171/// Atomic compare and exchange operation. If the value at *ptr is identical
172/// to the value at *expected, then this copies value at *desired to *ptr. If
173/// they are not, then this stores the current value from *ptr in *expected.
174///
175/// This function returns 1 if the exchange takes place or 0 if it fails.
176int __atomic_compare_exchange_n(int size, void *ptr, void *expected,
177 void *desired, int success, int failure) {
178#define LOCK_FREE_ACTION(type) \
179 return __atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
180 *(type*)desired, success, failure)
181 LOCK_FREE_CASES();
182#undef LOCK_FREE_ACTION
183 Lock *l = lock_for_pointer(ptr);
184 lock(l);
185 if (memcmp(ptr, expected, size) == 0) {
186 memcpy(ptr, desired, size);
187 unlock(l);
188 return 1;
189 }
190 memcpy(expected, ptr, size);
191 unlock(l);
192 return 0;
193}
194
195/// Performs an atomic exchange operation between two pointers. This is atomic
196/// with respect to the target address.
197void __atomic_exchange_n(int size, void *ptr, void *val, void *old, int model) {
198#define LOCK_FREE_ACTION(type) \
199 *(type*)old = __atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
200 model);\
201 return;
202 LOCK_FREE_CASES();
203#undef LOCK_FREE_ACTION
204 Lock *l = lock_for_pointer(ptr);
205 lock(l);
206 memcpy(old, ptr, size);
207 memcpy(ptr, val, size);
208 unlock(l);
209}
210
211////////////////////////////////////////////////////////////////////////////////
212// Where the size is known at compile time, the compiler may emit calls to
213// specialised versions of the above functions.
214////////////////////////////////////////////////////////////////////////////////
215#define OPTIMISED_CASES\
216 OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
217 OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
218 OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
219 OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
220 OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)\
221
222#define OPTIMISED_CASE(n, lockfree, type)\
223type __atomic_load_##n(type *src, int model) {\
224 if (lockfree)\
225 return __atomic_load((_Atomic(type)*)src, model);\
226 Lock *l = lock_for_pointer(src);\
227 lock(l);\
228 type val = *src;\
229 unlock(l);\
230 return val;\
231}
232OPTIMISED_CASES
233#undef OPTIMISED_CASE
234
235#define OPTIMISED_CASE(n, lockfree, type)\
236void __atomic_store_##n(type *dest, type val, int model) {\
237 if (lockfree) {\
238 __atomic_store((_Atomic(type)*)dest, val, model);\
239 return;\
240 }\
241 Lock *l = lock_for_pointer(dest);\
242 lock(l);\
243 *dest = val;\
244 unlock(l);\
245 return;\
246}
247OPTIMISED_CASES
248#undef OPTIMISED_CASE
249
250#define OPTIMISED_CASE(n, lockfree, type)\
251type __atomic_exchange_##n(type *dest, type val, int model) {\
252 if (lockfree)\
253 return __atomic_exchange((_Atomic(type)*)dest, val, model);\
254 Lock *l = lock_for_pointer(dest);\
255 lock(l);\
256 type tmp = *dest;\
257 *dest = val;\
258 unlock(l);\
259 return tmp;\
260}
261OPTIMISED_CASES
262#undef OPTIMISED_CASE
263
264#define OPTIMISED_CASE(n, lockfree, type)\
265int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
266 int success, int failure) {\
267 if (lockfree)\
268 return __atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
269 success, failure);\
270 Lock *l = lock_for_pointer(ptr);\
271 lock(l);\
272 if (*ptr == *expected) {\
273 *ptr = desired;\
274 unlock(l);\
275 return 1;\
276 }\
277 *expected = *ptr;\
278 unlock(l);\
279 return 0;\
280}
281OPTIMISED_CASES
282#undef OPTIMISED_CASE
283
284////////////////////////////////////////////////////////////////////////////////
285// Atomic read-modify-write operations for integers of various sizes.
286////////////////////////////////////////////////////////////////////////////////
287#define ATOMIC_RMW(n, lockfree, type, opname, op) \
288type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
289 if (lockfree) \
290 return __atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
291 Lock *l = lock_for_pointer(ptr);\
292 lock(l);\
293 type tmp = *ptr;\
294 *ptr = tmp op val;\
295 unlock(l);\
296 return tmp;\
297}
298
299#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
300OPTIMISED_CASES
301#undef OPTIMISED_CASE
302#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
303OPTIMISED_CASES
304#undef OPTIMISED_CASE
305#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
306OPTIMISED_CASES
307#undef OPTIMISED_CASE
308#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
309OPTIMISED_CASES
310#undef OPTIMISED_CASE
311#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
312OPTIMISED_CASES
313#undef OPTIMISED_CASE