blob: 33659cd226530fbc39ab7c0ac101377fc6bc26da [file] [log] [blame]
tuexen829d1a72012-05-23 09:39:05 +00001/*-
2 * Copyright (c) 2009-2010 Brad Penoff
3 * Copyright (c) 2009-2010 Humaira Kamal
4 * Copyright (c) 2011-2012 Irene Ruengeler
5 * Copyright (c) 2011-2012 Michael Tuexen
6 *
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
tuexenbca1dae2011-11-01 23:02:02 +000031#ifndef _USER_ATOMIC_H_
32#define _USER_ATOMIC_H_
33
34/* __Userspace__ version of sys/i386/include/atomic.h goes here */
35
36/* TODO In the future, might want to not use i386 specific assembly.
37 * The options include:
38 * - implement them generically (but maybe not truly atomic?) in userspace
39 * - have ifdef's for __Userspace_arch_ perhaps (OS isn't enough...)
40 */
41
42#include <stdio.h>
43#include <sys/types.h>
44
tuexen05c94d02011-12-15 17:19:03 +000045#if defined(__Userspace_os_Darwin) || defined (__Userspace_os_Windows)
46#if defined (__Userspace_os_Windows)
47#define atomic_add_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
48#define atomic_fetchadd_int(addr, val) InterlockedExchangeAdd((LPLONG)addr, (LONG)val)
49#define atomic_subtract_int(addr, val) InterlockedExchangeAdd((LPLONG)addr,-((LONG)val))
50#define atomic_cmpset_int(dst, exp, src) InterlockedCompareExchange((LPLONG)dst, src, exp)
51#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (InterlockedExchangeAdd((LPLONG)addr, (-1L)) == 1)
52#else
tuexenbca1dae2011-11-01 23:02:02 +000053#include <libkern/OSAtomic.h>
tuexen05c94d02011-12-15 17:19:03 +000054#define atomic_add_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
55#define atomic_fetchadd_int(addr, val) OSAtomicAdd32Barrier(val, (int32_t *)addr)
56#define atomic_subtract_int(addr, val) OSAtomicAdd32Barrier(-val, (int32_t *)addr)
tuexenbca1dae2011-11-01 23:02:02 +000057#define atomic_cmpset_int(dst, exp, src) OSAtomicCompareAndSwapIntBarrier(exp, src, (int *)dst)
tuexenbca1dae2011-11-01 23:02:02 +000058#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 0)
tuexen05c94d02011-12-15 17:19:03 +000059#endif
60
tuexenbca1dae2011-11-01 23:02:02 +000061#if defined(INVARIANTS)
62#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
63{ \
64 int32_t newval; \
65 newval = atomic_fetchadd_int(addr, -val); \
66 if (newval < 0) { \
67 panic("Counter goes negative"); \
68 } \
69}
70#else
71#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
72{ \
73 int32_t newval; \
74 newval = atomic_fetchadd_int(addr, -val); \
75 if (newval < 0) { \
76 *addr = 0; \
77 } \
78}
tuexen05c94d02011-12-15 17:19:03 +000079#if defined(__Userspace_os_Windows)
80static void atomic_init() {} /* empty when we are not using atomic_mtx */
81#else
tuexenbca1dae2011-11-01 23:02:02 +000082static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
83#endif
tuexen05c94d02011-12-15 17:19:03 +000084#endif
tuexenbca1dae2011-11-01 23:02:02 +000085
86#else
87/* Using gcc built-in functions for atomic memory operations
88 Reference: http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
89 Requires gcc version 4.1.0
90 compile with -march=i486
91 */
92
93/*Atomically add V to *P.*/
94#define atomic_add_int(P, V) (void) __sync_fetch_and_add(P, V)
95
96/*Atomically subtrace V from *P.*/
97#define atomic_subtract_int(P, V) (void) __sync_fetch_and_sub(P, V)
98
99/*
100 * Atomically add the value of v to the integer pointed to by p and return
101 * the previous value of *p.
102 */
103#define atomic_fetchadd_int(p, v) __sync_fetch_and_add(p, v)
104
105/* Following explanation from src/sys/i386/include/atomic.h,
106 * for atomic compare and set
107 *
108 * if (*dst == exp) *dst = src (all 32 bit words)
109 *
110 * Returns 0 on failure, non-zero on success
111 */
112
113#define atomic_cmpset_int(dst, exp, src) __sync_bool_compare_and_swap(dst, exp, src)
114
115#define SCTP_DECREMENT_AND_CHECK_REFCOUNT(addr) (atomic_fetchadd_int(addr, -1) == 1)
116#if defined(INVARIANTS)
117#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
118{ \
119 int32_t oldval; \
120 oldval = atomic_fetchadd_int(addr, -val); \
121 if (oldval < val) { \
122 panic("Counter goes negative"); \
123 } \
124}
125#else
126#define SCTP_SAVE_ATOMIC_DECREMENT(addr, val) \
127{ \
128 int32_t oldval; \
129 oldval = atomic_fetchadd_int(addr, -val); \
130 if (oldval < val) { \
131 *addr = 0; \
132 } \
133}
134#endif
135static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
136#endif
137
138#if 0 /* using libatomic_ops */
139#include "user_include/atomic_ops.h"
140
141/*Atomically add incr to *P, and return the original value of *P.*/
142#define atomic_add_int(P, V) AO_fetch_and_add((AO_t*)P, V)
143
144#define atomic_subtract_int(P, V) AO_fetch_and_add((AO_t*)P, -(V))
145
146/*
147 * Atomically add the value of v to the integer pointed to by p and return
148 * the previous value of *p.
149 */
150#define atomic_fetchadd_int(p, v) AO_fetch_and_add((AO_t*)p, v)
151
152/* Atomically compare *addr to old_val, and replace *addr by new_val
153 if the first comparison succeeds. Returns nonzero if the comparison
154 succeeded and *addr was updated.
155*/
156/* Following Explanation from src/sys/i386/include/atomic.h, which
157 matches that of AO_compare_and_swap above.
158 * Atomic compare and set, used by the mutex functions
159 *
160 * if (*dst == exp) *dst = src (all 32 bit words)
161 *
162 * Returns 0 on failure, non-zero on success
163 */
164
165#define atomic_cmpset_int(dst, exp, src) AO_compare_and_swap((AO_t*)dst, exp, src)
166
167static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
168#endif /* closing #if for libatomic */
169
170#if 0 /* using atomic_mtx */
171
172#include <pthread.h>
173
tuexen05c94d02011-12-15 17:19:03 +0000174extern userland_mutex_t atomic_mtx;
tuexenbca1dae2011-11-01 23:02:02 +0000175
tuexen05c94d02011-12-15 17:19:03 +0000176#if defined (__Userspace_os_Windows)
177static inline void atomic_init() {
178 InitializeCriticalSection(&atomic_mtx);
179}
180static inline void atomic_destroy() {
181 DeleteCriticalSection(&atomic_mtx);
182}
183static inline void atomic_lock() {
184 EnterCriticalSection(&atomic_mtx);
185}
186static inline void atomic_unlock() {
187 LeaveCriticalSection(&atomic_mtx);
188}
189#else
tuexenbca1dae2011-11-01 23:02:02 +0000190static inline void atomic_init() {
191 (void)pthread_mutex_init(&atomic_mtx, NULL);
192}
193static inline void atomic_destroy() {
194 (void)pthread_mutex_destroy(&atomic_mtx);
195}
196static inline void atomic_lock() {
197 (void)pthread_mutex_lock(&atomic_mtx);
198}
199static inline void atomic_unlock() {
200 (void)pthread_mutex_unlock(&atomic_mtx);
201}
tuexen05c94d02011-12-15 17:19:03 +0000202#endif
tuexenbca1dae2011-11-01 23:02:02 +0000203/*
204 * For userland, always use lock prefixes so that the binaries will run
205 * on both SMP and !SMP systems.
206 */
207
208#define MPLOCKED "lock ; "
209
tuexenbca1dae2011-11-01 23:02:02 +0000210/*
211 * Atomically add the value of v to the integer pointed to by p and return
212 * the previous value of *p.
213 */
214static __inline u_int
215atomic_fetchadd_int(volatile void *n, u_int v)
216{
tuexen05c94d02011-12-15 17:19:03 +0000217 int *p = (int *) n;
tuexen829d1a72012-05-23 09:39:05 +0000218 atomic_lock();
tuexenbca1dae2011-11-01 23:02:02 +0000219 __asm __volatile(
220 " " MPLOCKED " "
221 " xaddl %0, %1 ; "
222 "# atomic_fetchadd_int"
223 : "+r" (v), /* 0 (result) */
224 "=m" (*p) /* 1 */
225 : "m" (*p)); /* 2 */
tuexen829d1a72012-05-23 09:39:05 +0000226 atomic_unlock();
tuexenbca1dae2011-11-01 23:02:02 +0000227
228 return (v);
229}
230
231
232#ifdef CPU_DISABLE_CMPXCHG
233
234static __inline int
235atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
236{
237 u_char res;
238
tuexen05c94d02011-12-15 17:19:03 +0000239 atomic_lock();
tuexenbca1dae2011-11-01 23:02:02 +0000240 __asm __volatile(
241 " pushfl ; "
242 " cli ; "
243 " cmpl %3,%4 ; "
244 " jne 1f ; "
245 " movl %2,%1 ; "
246 "1: "
247 " sete %0 ; "
248 " popfl ; "
249 "# atomic_cmpset_int"
250 : "=q" (res), /* 0 */
251 "=m" (*dst) /* 1 */
252 : "r" (src), /* 2 */
253 "r" (exp), /* 3 */
254 "m" (*dst) /* 4 */
255 : "memory");
tuexen829d1a72012-05-23 09:39:05 +0000256 atomic_unlock();
tuexenbca1dae2011-11-01 23:02:02 +0000257
258 return (res);
259}
260
261#else /* !CPU_DISABLE_CMPXCHG */
262
263static __inline int
264atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
265{
tuexen05c94d02011-12-15 17:19:03 +0000266 atomic_lock();
tuexenbca1dae2011-11-01 23:02:02 +0000267 u_char res;
268
269 __asm __volatile(
270 " " MPLOCKED " "
271 " cmpxchgl %2,%1 ; "
tuexen05c94d02011-12-15 17:19:03 +0000272 " sete %0 ; "
tuexenbca1dae2011-11-01 23:02:02 +0000273 "1: "
274 "# atomic_cmpset_int"
275 : "=a" (res), /* 0 */
276 "=m" (*dst) /* 1 */
277 : "r" (src), /* 2 */
278 "a" (exp), /* 3 */
279 "m" (*dst) /* 4 */
280 : "memory");
tuexen05c94d02011-12-15 17:19:03 +0000281 atomic_unlock();
tuexenbca1dae2011-11-01 23:02:02 +0000282
283 return (res);
284}
285
286#endif /* CPU_DISABLE_CMPXCHG */
287
288#define atomic_add_int(P, V) do { \
289 atomic_lock(); \
290 (*(u_int *)(P) += (V)); \
291 atomic_unlock(); \
292} while(0)
293#define atomic_subtract_int(P, V) do { \
294 atomic_lock(); \
295 (*(u_int *)(P) -= (V)); \
296 atomic_unlock(); \
297} while(0)
298
299#endif
300#endif