blob: 7b4fba88cbebcffa022d6c1fc472ca37c418f9f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H
3
Andrew Morton0db9ae42005-10-24 23:05:58 -07004#include <asm/barrier.h>
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006/*
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc...
9 *
10 * But use these as seldom as possible since they are much slower
11 * than regular operations.
12 */
13
14
15/*
16 * Counter is volatile to make sure gcc doesn't try to be clever
17 * and move things around on us. We need to use _exactly_ the address
18 * the user gave us, not some alias that contains the same information.
19 */
20typedef struct { volatile int counter; } atomic_t;
21typedef struct { volatile long counter; } atomic64_t;
22
23#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
24#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
25
26#define atomic_read(v) ((v)->counter + 0)
27#define atomic64_read(v) ((v)->counter + 0)
28
29#define atomic_set(v,i) ((v)->counter = (i))
30#define atomic64_set(v,i) ((v)->counter = (i))
31
32/*
33 * To get proper branch prediction for the main line, we must branch
34 * forward to code at the end of this object's .text section, then
35 * branch back to restart the operation.
36 */
37
38static __inline__ void atomic_add(int i, atomic_t * v)
39{
40 unsigned long temp;
41 __asm__ __volatile__(
42 "1: ldl_l %0,%1\n"
43 " addl %0,%2,%0\n"
44 " stl_c %0,%1\n"
45 " beq %0,2f\n"
46 ".subsection 2\n"
47 "2: br 1b\n"
48 ".previous"
49 :"=&r" (temp), "=m" (v->counter)
50 :"Ir" (i), "m" (v->counter));
51}
52
53static __inline__ void atomic64_add(long i, atomic64_t * v)
54{
55 unsigned long temp;
56 __asm__ __volatile__(
57 "1: ldq_l %0,%1\n"
58 " addq %0,%2,%0\n"
59 " stq_c %0,%1\n"
60 " beq %0,2f\n"
61 ".subsection 2\n"
62 "2: br 1b\n"
63 ".previous"
64 :"=&r" (temp), "=m" (v->counter)
65 :"Ir" (i), "m" (v->counter));
66}
67
68static __inline__ void atomic_sub(int i, atomic_t * v)
69{
70 unsigned long temp;
71 __asm__ __volatile__(
72 "1: ldl_l %0,%1\n"
73 " subl %0,%2,%0\n"
74 " stl_c %0,%1\n"
75 " beq %0,2f\n"
76 ".subsection 2\n"
77 "2: br 1b\n"
78 ".previous"
79 :"=&r" (temp), "=m" (v->counter)
80 :"Ir" (i), "m" (v->counter));
81}
82
83static __inline__ void atomic64_sub(long i, atomic64_t * v)
84{
85 unsigned long temp;
86 __asm__ __volatile__(
87 "1: ldq_l %0,%1\n"
88 " subq %0,%2,%0\n"
89 " stq_c %0,%1\n"
90 " beq %0,2f\n"
91 ".subsection 2\n"
92 "2: br 1b\n"
93 ".previous"
94 :"=&r" (temp), "=m" (v->counter)
95 :"Ir" (i), "m" (v->counter));
96}
97
98
99/*
100 * Same as above, but return the result value
101 */
102static __inline__ long atomic_add_return(int i, atomic_t * v)
103{
104 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400105 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 __asm__ __volatile__(
107 "1: ldl_l %0,%1\n"
108 " addl %0,%3,%2\n"
109 " addl %0,%3,%0\n"
110 " stl_c %0,%1\n"
111 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 ".subsection 2\n"
113 "2: br 1b\n"
114 ".previous"
115 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
116 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400117 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return result;
119}
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static __inline__ long atomic64_add_return(long i, atomic64_t * v)
122{
123 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400124 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 __asm__ __volatile__(
126 "1: ldq_l %0,%1\n"
127 " addq %0,%3,%2\n"
128 " addq %0,%3,%0\n"
129 " stq_c %0,%1\n"
130 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 ".subsection 2\n"
132 "2: br 1b\n"
133 ".previous"
134 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
135 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400136 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return result;
138}
139
140static __inline__ long atomic_sub_return(int i, atomic_t * v)
141{
142 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400143 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 __asm__ __volatile__(
145 "1: ldl_l %0,%1\n"
146 " subl %0,%3,%2\n"
147 " subl %0,%3,%0\n"
148 " stl_c %0,%1\n"
149 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 ".subsection 2\n"
151 "2: br 1b\n"
152 ".previous"
153 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
154 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400155 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 return result;
157}
158
159static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
160{
161 long temp, result;
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400162 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 __asm__ __volatile__(
164 "1: ldq_l %0,%1\n"
165 " subq %0,%3,%2\n"
166 " subq %0,%3,%0\n"
167 " stq_c %0,%1\n"
168 " beq %0,2f\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 ".subsection 2\n"
170 "2: br 1b\n"
171 ".previous"
172 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
173 :"Ir" (i), "m" (v->counter) : "memory");
Ivan Kokshayskyd475f3f2005-10-21 22:06:15 +0400174 smp_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 return result;
176}
177
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700178#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
179#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
180
181#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
Ingo Molnarffbf6702006-01-09 15:59:17 -0800182#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800183
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700184/**
185 * atomic_add_unless - add unless the number is a given value
186 * @v: pointer of type atomic_t
187 * @a: the amount to add to v...
188 * @u: ...unless v is equal to u.
189 *
190 * Atomically adds @a to @v, so long as it was not @u.
191 * Returns non-zero if @v was not @u, and zero otherwise.
192 */
Nick Piggin8426e1f2005-11-13 16:07:25 -0800193#define atomic_add_unless(v, a, u) \
194({ \
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700195 __typeof__((v)->counter) c, old; \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800196 c = atomic_read(v); \
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700197 for (;;) { \
198 if (unlikely(c == (u))) \
199 break; \
200 old = atomic_cmpxchg((v), c, c + (a)); \
201 if (likely(old == c)) \
202 break; \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800203 c = old; \
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700204 } \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800205 c != (u); \
206})
207#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
208
Mathieu Desnoyerse96e6992007-05-08 00:34:18 -0700209/**
210 * atomic64_add_unless - add unless the number is a given value
211 * @v: pointer of type atomic64_t
212 * @a: the amount to add to v...
213 * @u: ...unless v is equal to u.
214 *
215 * Atomically adds @a to @v, so long as it was not @u.
216 * Returns non-zero if @v was not @u, and zero otherwise.
217 */
218#define atomic64_add_unless(v, a, u) \
219({ \
220 __typeof__((v)->counter) c, old; \
221 c = atomic64_read(v); \
222 for (;;) { \
223 if (unlikely(c == (u))) \
224 break; \
225 old = atomic64_cmpxchg((v), c, c + (a)); \
226 if (likely(old == c)) \
227 break; \
228 c = old; \
229 } \
230 c != (u); \
231})
232#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
233
Hugh Dickins7c72aaf2005-11-23 13:37:40 -0800234#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
235#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#define atomic_dec_return(v) atomic_sub_return(1,(v))
238#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
239
240#define atomic_inc_return(v) atomic_add_return(1,(v))
241#define atomic64_inc_return(v) atomic64_add_return(1,(v))
242
243#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
244#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
245
246#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
Hugh Dickins7c72aaf2005-11-23 13:37:40 -0800247#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
250#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
251
252#define atomic_inc(v) atomic_add(1,(v))
253#define atomic64_inc(v) atomic64_add(1,(v))
254
255#define atomic_dec(v) atomic_sub(1,(v))
256#define atomic64_dec(v) atomic64_sub(1,(v))
257
258#define smp_mb__before_atomic_dec() smp_mb()
259#define smp_mb__after_atomic_dec() smp_mb()
260#define smp_mb__before_atomic_inc() smp_mb()
261#define smp_mb__after_atomic_inc() smp_mb()
262
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800263#include <asm-generic/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264#endif /* _ALPHA_ATOMIC_H */