blob: f62251e82ffac39579e26c0ee6f77f58c1bd9ac5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H
3
Matthew Wilcoxea4354672009-01-06 14:40:39 -08004#include <linux/types.h>
Andrew Morton0db9ae42005-10-24 23:05:58 -07005#include <asm/barrier.h>
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc...
10 *
11 * But use these as seldom as possible since they are much slower
12 * than regular operations.
13 */
14
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
17#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
18
Anton Blanchardf3d46f92010-05-17 14:33:53 +100019#define atomic_read(v) (*(volatile int *)&(v)->counter)
20#define atomic64_read(v) (*(volatile long *)&(v)->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#define atomic_set(v,i) ((v)->counter = (i))
23#define atomic64_set(v,i) ((v)->counter = (i))
24
25/*
26 * To get proper branch prediction for the main line, we must branch
27 * forward to code at the end of this object's .text section, then
28 * branch back to restart the operation.
29 */
30
31static __inline__ void atomic_add(int i, atomic_t * v)
32{
33 unsigned long temp;
34 __asm__ __volatile__(
35 "1: ldl_l %0,%1\n"
36 " addl %0,%2,%0\n"
37 " stl_c %0,%1\n"
38 " beq %0,2f\n"
39 ".subsection 2\n"
40 "2: br 1b\n"
41 ".previous"
42 :"=&r" (temp), "=m" (v->counter)
43 :"Ir" (i), "m" (v->counter));
44}
45
46static __inline__ void atomic64_add(long i, atomic64_t * v)
47{
48 unsigned long temp;
49 __asm__ __volatile__(
50 "1: ldq_l %0,%1\n"
51 " addq %0,%2,%0\n"
52 " stq_c %0,%1\n"
53 " beq %0,2f\n"
54 ".subsection 2\n"
55 "2: br 1b\n"
56 ".previous"
57 :"=&r" (temp), "=m" (v->counter)
58 :"Ir" (i), "m" (v->counter));
59}
60
61static __inline__ void atomic_sub(int i, atomic_t * v)
62{
63 unsigned long temp;
64 __asm__ __volatile__(
65 "1: ldl_l %0,%1\n"
66 " subl %0,%2,%0\n"
67 " stl_c %0,%1\n"
68 " beq %0,2f\n"
69 ".subsection 2\n"
70 "2: br 1b\n"
71 ".previous"
72 :"=&r" (temp), "=m" (v->counter)
73 :"Ir" (i), "m" (v->counter));
74}
75
76static __inline__ void atomic64_sub(long i, atomic64_t * v)
77{
78 unsigned long temp;
79 __asm__ __volatile__(
80 "1: ldq_l %0,%1\n"
81 " subq %0,%2,%0\n"
82 " stq_c %0,%1\n"
83 " beq %0,2f\n"
84 ".subsection 2\n"
85 "2: br 1b\n"
86 ".previous"
87 :"=&r" (temp), "=m" (v->counter)
88 :"Ir" (i), "m" (v->counter));
89}
90
91
92/*
93 * Same as above, but return the result value
94 */
Andrew Morton26a6e662008-02-04 22:30:02 -080095static inline int atomic_add_return(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
97 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +040098 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 __asm__ __volatile__(
100 "1: ldl_l %0,%1\n"
101 " addl %0,%3,%2\n"
102 " addl %0,%3,%0\n"
103 " stl_c %0,%1\n"
104 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 ".subsection 2\n"
106 "2: br 1b\n"
107 ".previous"
108 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
109 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400110 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return result;
112}
113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static __inline__ long atomic64_add_return(long i, atomic64_t * v)
115{
116 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400117 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 __asm__ __volatile__(
119 "1: ldq_l %0,%1\n"
120 " addq %0,%3,%2\n"
121 " addq %0,%3,%0\n"
122 " stq_c %0,%1\n"
123 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 ".subsection 2\n"
125 "2: br 1b\n"
126 ".previous"
127 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
128 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400129 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return result;
131}
132
133static __inline__ long atomic_sub_return(int i, atomic_t * v)
134{
135 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400136 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 __asm__ __volatile__(
138 "1: ldl_l %0,%1\n"
139 " subl %0,%3,%2\n"
140 " subl %0,%3,%0\n"
141 " stl_c %0,%1\n"
142 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 ".subsection 2\n"
144 "2: br 1b\n"
145 ".previous"
146 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
147 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400148 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 return result;
150}
151
152static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
153{
154 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400155 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 __asm__ __volatile__(
157 "1: ldq_l %0,%1\n"
158 " subq %0,%3,%2\n"
159 " subq %0,%3,%0\n"
160 " stq_c %0,%1\n"
161 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 ".subsection 2\n"
163 "2: br 1b\n"
164 ".previous"
165 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
166 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400167 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 return result;
169}
170
David Howellsec221202012-03-28 18:11:12 +0100171/*
172 * Atomic exchange routines.
173 */
174
175#define __ASM__MB
176#define ____xchg(type, args...) __xchg ## type ## _local(args)
177#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
178#include <asm/xchg.h>
179
180#define xchg_local(ptr,x) \
181 ({ \
182 __typeof__(*(ptr)) _x_ = (x); \
183 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
184 sizeof(*(ptr))); \
185 })
186
187#define cmpxchg_local(ptr, o, n) \
188 ({ \
189 __typeof__(*(ptr)) _o_ = (o); \
190 __typeof__(*(ptr)) _n_ = (n); \
191 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
192 (unsigned long)_n_, \
193 sizeof(*(ptr))); \
194 })
195
196#define cmpxchg64_local(ptr, o, n) \
197 ({ \
198 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
199 cmpxchg_local((ptr), (o), (n)); \
200 })
201
202#ifdef CONFIG_SMP
203#undef __ASM__MB
204#define __ASM__MB "\tmb\n"
205#endif
206#undef ____xchg
207#undef ____cmpxchg
208#define ____xchg(type, args...) __xchg ##type(args)
209#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
210#include <asm/xchg.h>
211
212#define xchg(ptr,x) \
213 ({ \
214 __typeof__(*(ptr)) _x_ = (x); \
215 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
216 sizeof(*(ptr))); \
217 })
218
219#define cmpxchg(ptr, o, n) \
220 ({ \
221 __typeof__(*(ptr)) _o_ = (o); \
222 __typeof__(*(ptr)) _n_ = (n); \
223 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
224 (unsigned long)_n_, sizeof(*(ptr)));\
225 })
226
227#define cmpxchg64(ptr, o, n) \
228 ({ \
229 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
230 cmpxchg((ptr), (o), (n)); \
231 })
232
233#undef __ASM__MB
234#undef ____cmpxchg
235
236#define __HAVE_ARCH_CMPXCHG 1
237
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700238#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
239#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
240
241#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
Ingo Molnarffbf6702006-01-09 15:59:17 -0800242#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800243
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700244/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700245 * __atomic_add_unless - add unless the number is a given value
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700246 * @v: pointer of type atomic_t
247 * @a: the amount to add to v...
248 * @u: ...unless v is equal to u.
249 *
250 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700251 * Returns the old value of @v.
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700252 */
Arun Sharmaf24219b2011-07-26 16:09:07 -0700253static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700254{
255 int c, old;
256 c = atomic_read(v);
257 for (;;) {
258 if (unlikely(c == (u)))
259 break;
260 old = atomic_cmpxchg((v), c, c + (a));
261 if (likely(old == c))
262 break;
263 c = old;
264 }
Arun Sharmaf24219b2011-07-26 16:09:07 -0700265 return c;
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700266}
267
Nick Piggin8426e1f2005-11-13 16:07:25 -0800268
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700269/**
270 * atomic64_add_unless - add unless the number is a given value
271 * @v: pointer of type atomic64_t
272 * @a: the amount to add to v...
273 * @u: ...unless v is equal to u.
274 *
275 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700276 * Returns the old value of @v.
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700277 */
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700278static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
279{
280 long c, old;
281 c = atomic64_read(v);
282 for (;;) {
283 if (unlikely(c == (u)))
284 break;
285 old = atomic64_cmpxchg((v), c, c + (a));
286 if (likely(old == c))
287 break;
288 c = old;
289 }
290 return c != (u);
291}
292
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700293#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
294
Hugh Dickins7c72aaf2005-11-23 13:37:40 -0800295#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
296#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#define atomic_dec_return(v) atomic_sub_return(1,(v))
299#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
300
301#define atomic_inc_return(v) atomic_add_return(1,(v))
302#define atomic64_inc_return(v) atomic64_add_return(1,(v))
303
304#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
305#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
306
307#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
Hugh Dickins7c72aaf2005-11-23 13:37:40 -0800308#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
311#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
312
313#define atomic_inc(v) atomic_add(1,(v))
314#define atomic64_inc(v) atomic64_add(1,(v))
315
316#define atomic_dec(v) atomic_sub(1,(v))
317#define atomic64_dec(v) atomic64_sub(1,(v))
318
319#define smp_mb__before_atomic_dec() smp_mb()
320#define smp_mb__after_atomic_dec() smp_mb()
321#define smp_mb__before_atomic_inc() smp_mb()
322#define smp_mb__after_atomic_inc() smp_mb()
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324#endif /* _ALPHA_ATOMIC_H */