blob: c44810b9d3224dbc54474b4a6cb7eab5a2a9af3d [file] [log] [blame]
Becky Brucefeaf7cf2005-09-22 14:20:04 -05001#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004/*
5 * PowerPC atomic operations
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008typedef struct { volatile int counter; } atomic_t;
9
10#ifdef __KERNEL__
Nick Pigginf055aff2006-02-20 10:41:40 +010011#include <linux/compiler.h>
Becky Brucefeaf7cf2005-09-22 14:20:04 -050012#include <asm/synch.h>
David Gibson3ddfbcf2005-11-10 12:56:55 +110013#include <asm/asm-compat.h>
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070014#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Becky Brucefeaf7cf2005-09-22 14:20:04 -050016#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#define atomic_read(v) ((v)->counter)
19#define atomic_set(v,i) (((v)->counter) = (i))
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021static __inline__ void atomic_add(int a, atomic_t *v)
22{
23 int t;
24
25 __asm__ __volatile__(
26"1: lwarx %0,0,%3 # atomic_add\n\
27 add %0,%2,%0\n"
28 PPC405_ERR77(0,%3)
29" stwcx. %0,0,%3 \n\
30 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -070031 : "=&r" (t), "+m" (v->counter)
32 : "r" (a), "r" (&v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 : "cc");
34}
35
36static __inline__ int atomic_add_return(int a, atomic_t *v)
37{
38 int t;
39
40 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +110041 LWSYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -070042"1: lwarx %0,0,%2 # atomic_add_return\n\
43 add %0,%1,%0\n"
44 PPC405_ERR77(0,%2)
45" stwcx. %0,0,%2 \n\
46 bne- 1b"
Becky Brucefeaf7cf2005-09-22 14:20:04 -050047 ISYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 : "=&r" (t)
49 : "r" (a), "r" (&v->counter)
50 : "cc", "memory");
51
52 return t;
53}
54
55#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
56
57static __inline__ void atomic_sub(int a, atomic_t *v)
58{
59 int t;
60
61 __asm__ __volatile__(
62"1: lwarx %0,0,%3 # atomic_sub\n\
63 subf %0,%2,%0\n"
64 PPC405_ERR77(0,%3)
65" stwcx. %0,0,%3 \n\
66 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -070067 : "=&r" (t), "+m" (v->counter)
68 : "r" (a), "r" (&v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 : "cc");
70}
71
72static __inline__ int atomic_sub_return(int a, atomic_t *v)
73{
74 int t;
75
76 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +110077 LWSYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -070078"1: lwarx %0,0,%2 # atomic_sub_return\n\
79 subf %0,%1,%0\n"
80 PPC405_ERR77(0,%2)
81" stwcx. %0,0,%2 \n\
82 bne- 1b"
Becky Brucefeaf7cf2005-09-22 14:20:04 -050083 ISYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 : "=&r" (t)
85 : "r" (a), "r" (&v->counter)
86 : "cc", "memory");
87
88 return t;
89}
90
91static __inline__ void atomic_inc(atomic_t *v)
92{
93 int t;
94
95 __asm__ __volatile__(
96"1: lwarx %0,0,%2 # atomic_inc\n\
97 addic %0,%0,1\n"
98 PPC405_ERR77(0,%2)
99" stwcx. %0,0,%2 \n\
100 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700101 : "=&r" (t), "+m" (v->counter)
102 : "r" (&v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 : "cc");
104}
105
106static __inline__ int atomic_inc_return(atomic_t *v)
107{
108 int t;
109
110 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100111 LWSYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112"1: lwarx %0,0,%1 # atomic_inc_return\n\
113 addic %0,%0,1\n"
114 PPC405_ERR77(0,%1)
115" stwcx. %0,0,%1 \n\
116 bne- 1b"
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500117 ISYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 : "=&r" (t)
119 : "r" (&v->counter)
120 : "cc", "memory");
121
122 return t;
123}
124
125/*
126 * atomic_inc_and_test - increment and test
127 * @v: pointer of type atomic_t
128 *
129 * Atomically increments @v by 1
130 * and returns true if the result is zero, or false for all
131 * other cases.
132 */
133#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
134
135static __inline__ void atomic_dec(atomic_t *v)
136{
137 int t;
138
139 __asm__ __volatile__(
140"1: lwarx %0,0,%2 # atomic_dec\n\
141 addic %0,%0,-1\n"
142 PPC405_ERR77(0,%2)\
143" stwcx. %0,0,%2\n\
144 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700145 : "=&r" (t), "+m" (v->counter)
146 : "r" (&v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 : "cc");
148}
149
150static __inline__ int atomic_dec_return(atomic_t *v)
151{
152 int t;
153
154 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100155 LWSYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156"1: lwarx %0,0,%1 # atomic_dec_return\n\
157 addic %0,%0,-1\n"
158 PPC405_ERR77(0,%1)
159" stwcx. %0,0,%1\n\
160 bne- 1b"
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500161 ISYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 : "=&r" (t)
163 : "r" (&v->counter)
164 : "cc", "memory");
165
166 return t;
167}
168
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700169#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -0800170#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800171
Nick Piggin8426e1f2005-11-13 16:07:25 -0800172/**
173 * atomic_add_unless - add unless the number is a given value
174 * @v: pointer of type atomic_t
175 * @a: the amount to add to v...
176 * @u: ...unless v is equal to u.
177 *
178 * Atomically adds @a to @v, so long as it was not @u.
179 * Returns non-zero if @v was not @u, and zero otherwise.
180 */
Nick Pigginf055aff2006-02-20 10:41:40 +0100181static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
182{
183 int t;
184
185 __asm__ __volatile__ (
186 LWSYNC_ON_SMP
187"1: lwarx %0,0,%1 # atomic_add_unless\n\
188 cmpw 0,%0,%3 \n\
189 beq- 2f \n\
190 add %0,%2,%0 \n"
191 PPC405_ERR77(0,%2)
192" stwcx. %0,0,%1 \n\
193 bne- 1b \n"
194 ISYNC_ON_SMP
195" subf %0,%2,%0 \n\
1962:"
197 : "=&r" (t)
198 : "r" (&v->counter), "r" (a), "r" (u)
199 : "cc", "memory");
200
201 return t != u;
202}
203
Nick Piggin8426e1f2005-11-13 16:07:25 -0800204#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
207#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
208
209/*
210 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600211 * The function returns the old value of *v minus 1, even if
212 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 */
214static __inline__ int atomic_dec_if_positive(atomic_t *v)
215{
216 int t;
217
218 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100219 LWSYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600221 cmpwi %0,1\n\
222 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 blt- 2f\n"
224 PPC405_ERR77(0,%1)
225" stwcx. %0,0,%1\n\
226 bne- 1b"
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500227 ISYNC_ON_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06002292:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 : "r" (&v->counter)
231 : "cc", "memory");
232
233 return t;
234}
235
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500236#define smp_mb__before_atomic_dec() smp_mb()
237#define smp_mb__after_atomic_dec() smp_mb()
238#define smp_mb__before_atomic_inc() smp_mb()
239#define smp_mb__after_atomic_inc() smp_mb()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100241#ifdef __powerpc64__
242
243typedef struct { volatile long counter; } atomic64_t;
244
245#define ATOMIC64_INIT(i) { (i) }
246
247#define atomic64_read(v) ((v)->counter)
248#define atomic64_set(v,i) (((v)->counter) = (i))
249
250static __inline__ void atomic64_add(long a, atomic64_t *v)
251{
252 long t;
253
254 __asm__ __volatile__(
255"1: ldarx %0,0,%3 # atomic64_add\n\
256 add %0,%2,%0\n\
257 stdcx. %0,0,%3 \n\
258 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700259 : "=&r" (t), "+m" (v->counter)
260 : "r" (a), "r" (&v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100261 : "cc");
262}
263
264static __inline__ long atomic64_add_return(long a, atomic64_t *v)
265{
266 long t;
267
268 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100269 LWSYNC_ON_SMP
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100270"1: ldarx %0,0,%2 # atomic64_add_return\n\
271 add %0,%1,%0\n\
272 stdcx. %0,0,%2 \n\
273 bne- 1b"
274 ISYNC_ON_SMP
275 : "=&r" (t)
276 : "r" (a), "r" (&v->counter)
277 : "cc", "memory");
278
279 return t;
280}
281
282#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
283
284static __inline__ void atomic64_sub(long a, atomic64_t *v)
285{
286 long t;
287
288 __asm__ __volatile__(
289"1: ldarx %0,0,%3 # atomic64_sub\n\
290 subf %0,%2,%0\n\
291 stdcx. %0,0,%3 \n\
292 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700293 : "=&r" (t), "+m" (v->counter)
294 : "r" (a), "r" (&v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100295 : "cc");
296}
297
298static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
299{
300 long t;
301
302 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100303 LWSYNC_ON_SMP
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100304"1: ldarx %0,0,%2 # atomic64_sub_return\n\
305 subf %0,%1,%0\n\
306 stdcx. %0,0,%2 \n\
307 bne- 1b"
308 ISYNC_ON_SMP
309 : "=&r" (t)
310 : "r" (a), "r" (&v->counter)
311 : "cc", "memory");
312
313 return t;
314}
315
316static __inline__ void atomic64_inc(atomic64_t *v)
317{
318 long t;
319
320 __asm__ __volatile__(
321"1: ldarx %0,0,%2 # atomic64_inc\n\
322 addic %0,%0,1\n\
323 stdcx. %0,0,%2 \n\
324 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700325 : "=&r" (t), "+m" (v->counter)
326 : "r" (&v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100327 : "cc");
328}
329
330static __inline__ long atomic64_inc_return(atomic64_t *v)
331{
332 long t;
333
334 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100335 LWSYNC_ON_SMP
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100336"1: ldarx %0,0,%1 # atomic64_inc_return\n\
337 addic %0,%0,1\n\
338 stdcx. %0,0,%1 \n\
339 bne- 1b"
340 ISYNC_ON_SMP
341 : "=&r" (t)
342 : "r" (&v->counter)
343 : "cc", "memory");
344
345 return t;
346}
347
348/*
349 * atomic64_inc_and_test - increment and test
350 * @v: pointer of type atomic64_t
351 *
352 * Atomically increments @v by 1
353 * and returns true if the result is zero, or false for all
354 * other cases.
355 */
356#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
357
358static __inline__ void atomic64_dec(atomic64_t *v)
359{
360 long t;
361
362 __asm__ __volatile__(
363"1: ldarx %0,0,%2 # atomic64_dec\n\
364 addic %0,%0,-1\n\
365 stdcx. %0,0,%2\n\
366 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700367 : "=&r" (t), "+m" (v->counter)
368 : "r" (&v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100369 : "cc");
370}
371
372static __inline__ long atomic64_dec_return(atomic64_t *v)
373{
374 long t;
375
376 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100377 LWSYNC_ON_SMP
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100378"1: ldarx %0,0,%1 # atomic64_dec_return\n\
379 addic %0,%0,-1\n\
380 stdcx. %0,0,%1\n\
381 bne- 1b"
382 ISYNC_ON_SMP
383 : "=&r" (t)
384 : "r" (&v->counter)
385 : "cc", "memory");
386
387 return t;
388}
389
390#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
391#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
392
393/*
394 * Atomically test *v and decrement if it is greater than 0.
395 * The function returns the old value of *v minus 1.
396 */
397static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
398{
399 long t;
400
401 __asm__ __volatile__(
Anton Blanchard144b9c12006-01-13 15:37:17 +1100402 LWSYNC_ON_SMP
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100403"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
404 addic. %0,%0,-1\n\
405 blt- 2f\n\
406 stdcx. %0,0,%1\n\
407 bne- 1b"
408 ISYNC_ON_SMP
409 "\n\
4102:" : "=&r" (t)
411 : "r" (&v->counter)
412 : "cc", "memory");
413
414 return t;
415}
416
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700417#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500418#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
419
420/**
421 * atomic64_add_unless - add unless the number is a given value
422 * @v: pointer of type atomic64_t
423 * @a: the amount to add to v...
424 * @u: ...unless v is equal to u.
425 *
426 * Atomically adds @a to @v, so long as it was not @u.
427 * Returns non-zero if @v was not @u, and zero otherwise.
428 */
429static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
430{
431 long t;
432
433 __asm__ __volatile__ (
434 LWSYNC_ON_SMP
435"1: ldarx %0,0,%1 # atomic_add_unless\n\
436 cmpd 0,%0,%3 \n\
437 beq- 2f \n\
438 add %0,%2,%0 \n"
439" stdcx. %0,0,%1 \n\
440 bne- 1b \n"
441 ISYNC_ON_SMP
442" subf %0,%2,%0 \n\
4432:"
444 : "=&r" (t)
445 : "r" (&v->counter), "r" (a), "r" (u)
446 : "cc", "memory");
447
448 return t != u;
449}
450
451#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
452
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100453#endif /* __powerpc64__ */
454
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800455#include <asm-generic/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500457#endif /* _ASM_POWERPC_ATOMIC_H_ */