blob: b8f152ece02572be5a15acb75ec5ac312d9ec0d9 [file] [log] [blame]
Becky Brucefeaf7cf2005-09-22 14:20:04 -05001#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004/*
5 * PowerPC atomic operations
6 */
7
Matthew Wilcoxea4354672009-01-06 14:40:39 -08008#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10#ifdef __KERNEL__
Nick Pigginf055aff2006-02-20 10:41:40 +010011#include <linux/compiler.h>
Becky Brucefeaf7cf2005-09-22 14:20:04 -050012#include <asm/synch.h>
David Gibson3ddfbcf2005-11-10 12:56:55 +110013#include <asm/asm-compat.h>
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070014#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Becky Brucefeaf7cf2005-09-22 14:20:04 -050016#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100018static __inline__ int atomic_read(const atomic_t *v)
19{
20 int t;
21
22 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
23
24 return t;
25}
26
27static __inline__ void atomic_set(atomic_t *v, int i)
28{
29 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
30}
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032static __inline__ void atomic_add(int a, atomic_t *v)
33{
34 int t;
35
36 __asm__ __volatile__(
37"1: lwarx %0,0,%3 # atomic_add\n\
38 add %0,%2,%0\n"
39 PPC405_ERR77(0,%3)
40" stwcx. %0,0,%3 \n\
41 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -070042 : "=&r" (t), "+m" (v->counter)
43 : "r" (a), "r" (&v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 : "cc");
45}
46
47static __inline__ int atomic_add_return(int a, atomic_t *v)
48{
49 int t;
50
51 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +000052 PPC_RELEASE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -070053"1: lwarx %0,0,%2 # atomic_add_return\n\
54 add %0,%1,%0\n"
55 PPC405_ERR77(0,%2)
56" stwcx. %0,0,%2 \n\
57 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +000058 PPC_ACQUIRE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 : "=&r" (t)
60 : "r" (a), "r" (&v->counter)
61 : "cc", "memory");
62
63 return t;
64}
65
66#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
67
68static __inline__ void atomic_sub(int a, atomic_t *v)
69{
70 int t;
71
72 __asm__ __volatile__(
73"1: lwarx %0,0,%3 # atomic_sub\n\
74 subf %0,%2,%0\n"
75 PPC405_ERR77(0,%3)
76" stwcx. %0,0,%3 \n\
77 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -070078 : "=&r" (t), "+m" (v->counter)
79 : "r" (a), "r" (&v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 : "cc");
81}
82
83static __inline__ int atomic_sub_return(int a, atomic_t *v)
84{
85 int t;
86
87 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +000088 PPC_RELEASE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -070089"1: lwarx %0,0,%2 # atomic_sub_return\n\
90 subf %0,%1,%0\n"
91 PPC405_ERR77(0,%2)
92" stwcx. %0,0,%2 \n\
93 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +000094 PPC_ACQUIRE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 : "=&r" (t)
96 : "r" (a), "r" (&v->counter)
97 : "cc", "memory");
98
99 return t;
100}
101
102static __inline__ void atomic_inc(atomic_t *v)
103{
104 int t;
105
106 __asm__ __volatile__(
107"1: lwarx %0,0,%2 # atomic_inc\n\
108 addic %0,%0,1\n"
109 PPC405_ERR77(0,%2)
110" stwcx. %0,0,%2 \n\
111 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700112 : "=&r" (t), "+m" (v->counter)
113 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000114 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
116
117static __inline__ int atomic_inc_return(atomic_t *v)
118{
119 int t;
120
121 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000122 PPC_RELEASE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123"1: lwarx %0,0,%1 # atomic_inc_return\n\
124 addic %0,%0,1\n"
125 PPC405_ERR77(0,%1)
126" stwcx. %0,0,%1 \n\
127 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000128 PPC_ACQUIRE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 : "=&r" (t)
130 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000131 : "cc", "xer", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133 return t;
134}
135
136/*
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
139 *
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
142 * other cases.
143 */
144#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
145
146static __inline__ void atomic_dec(atomic_t *v)
147{
148 int t;
149
150 __asm__ __volatile__(
151"1: lwarx %0,0,%2 # atomic_dec\n\
152 addic %0,%0,-1\n"
153 PPC405_ERR77(0,%2)\
154" stwcx. %0,0,%2\n\
155 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700156 : "=&r" (t), "+m" (v->counter)
157 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000158 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160
161static __inline__ int atomic_dec_return(atomic_t *v)
162{
163 int t;
164
165 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000166 PPC_RELEASE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167"1: lwarx %0,0,%1 # atomic_dec_return\n\
168 addic %0,%0,-1\n"
169 PPC405_ERR77(0,%1)
170" stwcx. %0,0,%1\n\
171 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000172 PPC_ACQUIRE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 : "=&r" (t)
174 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000175 : "cc", "xer", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
177 return t;
178}
179
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700180#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -0800181#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800182
Nick Piggin8426e1f2005-11-13 16:07:25 -0800183/**
184 * atomic_add_unless - add unless the number is a given value
185 * @v: pointer of type atomic_t
186 * @a: the amount to add to v...
187 * @u: ...unless v is equal to u.
188 *
189 * Atomically adds @a to @v, so long as it was not @u.
190 * Returns non-zero if @v was not @u, and zero otherwise.
191 */
Nick Pigginf055aff2006-02-20 10:41:40 +0100192static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
193{
194 int t;
195
196 __asm__ __volatile__ (
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000197 PPC_RELEASE_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100198"1: lwarx %0,0,%1 # atomic_add_unless\n\
199 cmpw 0,%0,%3 \n\
200 beq- 2f \n\
201 add %0,%2,%0 \n"
202 PPC405_ERR77(0,%2)
203" stwcx. %0,0,%1 \n\
204 bne- 1b \n"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000205 PPC_ACQUIRE_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100206" subf %0,%2,%0 \n\
2072:"
208 : "=&r" (t)
209 : "r" (&v->counter), "r" (a), "r" (u)
210 : "cc", "memory");
211
212 return t != u;
213}
214
Nick Piggin8426e1f2005-11-13 16:07:25 -0800215#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
218#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
219
220/*
221 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600222 * The function returns the old value of *v minus 1, even if
223 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 */
225static __inline__ int atomic_dec_if_positive(atomic_t *v)
226{
227 int t;
228
229 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000230 PPC_RELEASE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600232 cmpwi %0,1\n\
233 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 blt- 2f\n"
235 PPC405_ERR77(0,%1)
236" stwcx. %0,0,%1\n\
237 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000238 PPC_ACQUIRE_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06002402:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 : "r" (&v->counter)
242 : "cc", "memory");
243
244 return t;
245}
246
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500247#define smp_mb__before_atomic_dec() smp_mb()
248#define smp_mb__after_atomic_dec() smp_mb()
249#define smp_mb__before_atomic_inc() smp_mb()
250#define smp_mb__after_atomic_inc() smp_mb()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100252#ifdef __powerpc64__
253
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100254#define ATOMIC64_INIT(i) { (i) }
255
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000256static __inline__ long atomic64_read(const atomic64_t *v)
257{
258 long t;
259
260 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
261
262 return t;
263}
264
265static __inline__ void atomic64_set(atomic64_t *v, long i)
266{
267 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
268}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100269
270static __inline__ void atomic64_add(long a, atomic64_t *v)
271{
272 long t;
273
274 __asm__ __volatile__(
275"1: ldarx %0,0,%3 # atomic64_add\n\
276 add %0,%2,%0\n\
277 stdcx. %0,0,%3 \n\
278 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700279 : "=&r" (t), "+m" (v->counter)
280 : "r" (a), "r" (&v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100281 : "cc");
282}
283
284static __inline__ long atomic64_add_return(long a, atomic64_t *v)
285{
286 long t;
287
288 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000289 PPC_RELEASE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100290"1: ldarx %0,0,%2 # atomic64_add_return\n\
291 add %0,%1,%0\n\
292 stdcx. %0,0,%2 \n\
293 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000294 PPC_ACQUIRE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100295 : "=&r" (t)
296 : "r" (a), "r" (&v->counter)
297 : "cc", "memory");
298
299 return t;
300}
301
302#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
303
304static __inline__ void atomic64_sub(long a, atomic64_t *v)
305{
306 long t;
307
308 __asm__ __volatile__(
309"1: ldarx %0,0,%3 # atomic64_sub\n\
310 subf %0,%2,%0\n\
311 stdcx. %0,0,%3 \n\
312 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700313 : "=&r" (t), "+m" (v->counter)
314 : "r" (a), "r" (&v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100315 : "cc");
316}
317
318static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
319{
320 long t;
321
322 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000323 PPC_RELEASE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100324"1: ldarx %0,0,%2 # atomic64_sub_return\n\
325 subf %0,%1,%0\n\
326 stdcx. %0,0,%2 \n\
327 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000328 PPC_ACQUIRE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100329 : "=&r" (t)
330 : "r" (a), "r" (&v->counter)
331 : "cc", "memory");
332
333 return t;
334}
335
336static __inline__ void atomic64_inc(atomic64_t *v)
337{
338 long t;
339
340 __asm__ __volatile__(
341"1: ldarx %0,0,%2 # atomic64_inc\n\
342 addic %0,%0,1\n\
343 stdcx. %0,0,%2 \n\
344 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700345 : "=&r" (t), "+m" (v->counter)
346 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000347 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100348}
349
350static __inline__ long atomic64_inc_return(atomic64_t *v)
351{
352 long t;
353
354 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000355 PPC_RELEASE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100356"1: ldarx %0,0,%1 # atomic64_inc_return\n\
357 addic %0,%0,1\n\
358 stdcx. %0,0,%1 \n\
359 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000360 PPC_ACQUIRE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100361 : "=&r" (t)
362 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000363 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100364
365 return t;
366}
367
368/*
369 * atomic64_inc_and_test - increment and test
370 * @v: pointer of type atomic64_t
371 *
372 * Atomically increments @v by 1
373 * and returns true if the result is zero, or false for all
374 * other cases.
375 */
376#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
377
378static __inline__ void atomic64_dec(atomic64_t *v)
379{
380 long t;
381
382 __asm__ __volatile__(
383"1: ldarx %0,0,%2 # atomic64_dec\n\
384 addic %0,%0,-1\n\
385 stdcx. %0,0,%2\n\
386 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700387 : "=&r" (t), "+m" (v->counter)
388 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000389 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100390}
391
392static __inline__ long atomic64_dec_return(atomic64_t *v)
393{
394 long t;
395
396 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000397 PPC_RELEASE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100398"1: ldarx %0,0,%1 # atomic64_dec_return\n\
399 addic %0,%0,-1\n\
400 stdcx. %0,0,%1\n\
401 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000402 PPC_ACQUIRE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100403 : "=&r" (t)
404 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000405 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100406
407 return t;
408}
409
410#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
411#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
412
413/*
414 * Atomically test *v and decrement if it is greater than 0.
415 * The function returns the old value of *v minus 1.
416 */
417static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
418{
419 long t;
420
421 __asm__ __volatile__(
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000422 PPC_RELEASE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100423"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
424 addic. %0,%0,-1\n\
425 blt- 2f\n\
426 stdcx. %0,0,%1\n\
427 bne- 1b"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000428 PPC_ACQUIRE_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100429 "\n\
4302:" : "=&r" (t)
431 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000432 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100433
434 return t;
435}
436
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700437#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500438#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
439
440/**
441 * atomic64_add_unless - add unless the number is a given value
442 * @v: pointer of type atomic64_t
443 * @a: the amount to add to v...
444 * @u: ...unless v is equal to u.
445 *
446 * Atomically adds @a to @v, so long as it was not @u.
447 * Returns non-zero if @v was not @u, and zero otherwise.
448 */
449static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
450{
451 long t;
452
453 __asm__ __volatile__ (
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000454 PPC_RELEASE_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500455"1: ldarx %0,0,%1 # atomic_add_unless\n\
456 cmpd 0,%0,%3 \n\
457 beq- 2f \n\
458 add %0,%2,%0 \n"
459" stdcx. %0,0,%1 \n\
460 bne- 1b \n"
Anton Blanchardf10e2e52010-02-10 01:04:06 +0000461 PPC_ACQUIRE_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500462" subf %0,%2,%0 \n\
4632:"
464 : "=&r" (t)
465 : "r" (&v->counter), "r" (a), "r" (u)
466 : "cc", "memory");
467
468 return t != u;
469}
470
471#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
472
Paul Mackerrasc2e95c62009-06-12 21:10:41 +0000473#else /* __powerpc64__ */
474#include <asm-generic/atomic64.h>
475
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100476#endif /* __powerpc64__ */
477
Arnd Bergmann72099ed2009-05-13 22:56:29 +0000478#include <asm-generic/atomic-long.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500480#endif /* _ASM_POWERPC_ATOMIC_H_ */