Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ALPHA_ATOMIC_H |
| 2 | #define _ALPHA_ATOMIC_H |
| 3 | |
Matthew Wilcox | ea435467 | 2009-01-06 14:40:39 -0800 | [diff] [blame] | 4 | #include <linux/types.h> |
Andrew Morton | 0db9ae4 | 2005-10-24 23:05:58 -0700 | [diff] [blame] | 5 | #include <asm/barrier.h> |
Mathieu Desnoyers | 2856f5e | 2007-05-08 00:34:38 -0700 | [diff] [blame] | 6 | #include <asm/system.h> |
Andrew Morton | 0db9ae4 | 2005-10-24 23:05:58 -0700 | [diff] [blame] | 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | /* |
| 9 | * Atomic operations that C can't guarantee us. Useful for |
| 10 | * resource counting etc... |
| 11 | * |
| 12 | * But use these as seldom as possible since they are much slower |
| 13 | * than regular operations. |
| 14 | */ |
| 15 | |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
| 18 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) |
| 19 | |
Anton Blanchard | f3d46f9 | 2010-05-17 14:33:53 +1000 | [diff] [blame] | 20 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
| 21 | #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | #define atomic_set(v,i) ((v)->counter = (i)) |
| 24 | #define atomic64_set(v,i) ((v)->counter = (i)) |
| 25 | |
| 26 | /* |
| 27 | * To get proper branch prediction for the main line, we must branch |
| 28 | * forward to code at the end of this object's .text section, then |
| 29 | * branch back to restart the operation. |
| 30 | */ |
| 31 | |
| 32 | static __inline__ void atomic_add(int i, atomic_t * v) |
| 33 | { |
| 34 | unsigned long temp; |
| 35 | __asm__ __volatile__( |
| 36 | "1: ldl_l %0,%1\n" |
| 37 | " addl %0,%2,%0\n" |
| 38 | " stl_c %0,%1\n" |
| 39 | " beq %0,2f\n" |
| 40 | ".subsection 2\n" |
| 41 | "2: br 1b\n" |
| 42 | ".previous" |
| 43 | :"=&r" (temp), "=m" (v->counter) |
| 44 | :"Ir" (i), "m" (v->counter)); |
| 45 | } |
| 46 | |
| 47 | static __inline__ void atomic64_add(long i, atomic64_t * v) |
| 48 | { |
| 49 | unsigned long temp; |
| 50 | __asm__ __volatile__( |
| 51 | "1: ldq_l %0,%1\n" |
| 52 | " addq %0,%2,%0\n" |
| 53 | " stq_c %0,%1\n" |
| 54 | " beq %0,2f\n" |
| 55 | ".subsection 2\n" |
| 56 | "2: br 1b\n" |
| 57 | ".previous" |
| 58 | :"=&r" (temp), "=m" (v->counter) |
| 59 | :"Ir" (i), "m" (v->counter)); |
| 60 | } |
| 61 | |
| 62 | static __inline__ void atomic_sub(int i, atomic_t * v) |
| 63 | { |
| 64 | unsigned long temp; |
| 65 | __asm__ __volatile__( |
| 66 | "1: ldl_l %0,%1\n" |
| 67 | " subl %0,%2,%0\n" |
| 68 | " stl_c %0,%1\n" |
| 69 | " beq %0,2f\n" |
| 70 | ".subsection 2\n" |
| 71 | "2: br 1b\n" |
| 72 | ".previous" |
| 73 | :"=&r" (temp), "=m" (v->counter) |
| 74 | :"Ir" (i), "m" (v->counter)); |
| 75 | } |
| 76 | |
| 77 | static __inline__ void atomic64_sub(long i, atomic64_t * v) |
| 78 | { |
| 79 | unsigned long temp; |
| 80 | __asm__ __volatile__( |
| 81 | "1: ldq_l %0,%1\n" |
| 82 | " subq %0,%2,%0\n" |
| 83 | " stq_c %0,%1\n" |
| 84 | " beq %0,2f\n" |
| 85 | ".subsection 2\n" |
| 86 | "2: br 1b\n" |
| 87 | ".previous" |
| 88 | :"=&r" (temp), "=m" (v->counter) |
| 89 | :"Ir" (i), "m" (v->counter)); |
| 90 | } |
| 91 | |
| 92 | |
| 93 | /* |
| 94 | * Same as above, but return the result value |
| 95 | */ |
Andrew Morton | 26a6e66 | 2008-02-04 22:30:02 -0800 | [diff] [blame] | 96 | static inline int atomic_add_return(int i, atomic_t *v) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { |
| 98 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 99 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | __asm__ __volatile__( |
| 101 | "1: ldl_l %0,%1\n" |
| 102 | " addl %0,%3,%2\n" |
| 103 | " addl %0,%3,%0\n" |
| 104 | " stl_c %0,%1\n" |
| 105 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | ".subsection 2\n" |
| 107 | "2: br 1b\n" |
| 108 | ".previous" |
| 109 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 110 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 111 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | return result; |
| 113 | } |
| 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) |
| 116 | { |
| 117 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 118 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | __asm__ __volatile__( |
| 120 | "1: ldq_l %0,%1\n" |
| 121 | " addq %0,%3,%2\n" |
| 122 | " addq %0,%3,%0\n" |
| 123 | " stq_c %0,%1\n" |
| 124 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | ".subsection 2\n" |
| 126 | "2: br 1b\n" |
| 127 | ".previous" |
| 128 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 129 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 130 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | return result; |
| 132 | } |
| 133 | |
| 134 | static __inline__ long atomic_sub_return(int i, atomic_t * v) |
| 135 | { |
| 136 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 137 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | __asm__ __volatile__( |
| 139 | "1: ldl_l %0,%1\n" |
| 140 | " subl %0,%3,%2\n" |
| 141 | " subl %0,%3,%0\n" |
| 142 | " stl_c %0,%1\n" |
| 143 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | ".subsection 2\n" |
| 145 | "2: br 1b\n" |
| 146 | ".previous" |
| 147 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 148 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 149 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | return result; |
| 151 | } |
| 152 | |
| 153 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) |
| 154 | { |
| 155 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 156 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | __asm__ __volatile__( |
| 158 | "1: ldq_l %0,%1\n" |
| 159 | " subq %0,%3,%2\n" |
| 160 | " subq %0,%3,%0\n" |
| 161 | " stq_c %0,%1\n" |
| 162 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | ".subsection 2\n" |
| 164 | "2: br 1b\n" |
| 165 | ".previous" |
| 166 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 167 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 168 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | return result; |
| 170 | } |
| 171 | |
Mathieu Desnoyers | e96e699 | 2007-05-08 00:34:18 -0700 | [diff] [blame] | 172 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
| 173 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
| 174 | |
| 175 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
Ingo Molnar | ffbf670 | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 176 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
Nick Piggin | 4a6dae6 | 2005-11-13 16:07:24 -0800 | [diff] [blame] | 177 | |
Mathieu Desnoyers | e96e699 | 2007-05-08 00:34:18 -0700 | [diff] [blame] | 178 | /** |
| 179 | * atomic_add_unless - add unless the number is a given value |
| 180 | * @v: pointer of type atomic_t |
| 181 | * @a: the amount to add to v... |
| 182 | * @u: ...unless v is equal to u. |
| 183 | * |
| 184 | * Atomically adds @a to @v, so long as it was not @u. |
| 185 | * Returns non-zero if @v was not @u, and zero otherwise. |
| 186 | */ |
Mathieu Desnoyers | 2856f5e | 2007-05-08 00:34:38 -0700 | [diff] [blame] | 187 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
| 188 | { |
| 189 | int c, old; |
| 190 | c = atomic_read(v); |
| 191 | for (;;) { |
| 192 | if (unlikely(c == (u))) |
| 193 | break; |
| 194 | old = atomic_cmpxchg((v), c, c + (a)); |
| 195 | if (likely(old == c)) |
| 196 | break; |
| 197 | c = old; |
| 198 | } |
| 199 | return c != (u); |
| 200 | } |
| 201 | |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 202 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 203 | |
Mathieu Desnoyers | e96e699 | 2007-05-08 00:34:18 -0700 | [diff] [blame] | 204 | /** |
| 205 | * atomic64_add_unless - add unless the number is a given value |
| 206 | * @v: pointer of type atomic64_t |
| 207 | * @a: the amount to add to v... |
| 208 | * @u: ...unless v is equal to u. |
| 209 | * |
| 210 | * Atomically adds @a to @v, so long as it was not @u. |
| 211 | * Returns non-zero if @v was not @u, and zero otherwise. |
| 212 | */ |
Mathieu Desnoyers | 2856f5e | 2007-05-08 00:34:38 -0700 | [diff] [blame] | 213 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
| 214 | { |
| 215 | long c, old; |
| 216 | c = atomic64_read(v); |
| 217 | for (;;) { |
| 218 | if (unlikely(c == (u))) |
| 219 | break; |
| 220 | old = atomic64_cmpxchg((v), c, c + (a)); |
| 221 | if (likely(old == c)) |
| 222 | break; |
| 223 | c = old; |
| 224 | } |
| 225 | return c != (u); |
| 226 | } |
| 227 | |
Mathieu Desnoyers | e96e699 | 2007-05-08 00:34:18 -0700 | [diff] [blame] | 228 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
| 229 | |
Hugh Dickins | 7c72aaf | 2005-11-23 13:37:40 -0800 | [diff] [blame] | 230 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
| 231 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) |
| 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
| 234 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) |
| 235 | |
| 236 | #define atomic_inc_return(v) atomic_add_return(1,(v)) |
| 237 | #define atomic64_inc_return(v) atomic64_add_return(1,(v)) |
| 238 | |
| 239 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
| 240 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) |
| 241 | |
| 242 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) |
Hugh Dickins | 7c72aaf | 2005-11-23 13:37:40 -0800 | [diff] [blame] | 243 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) |
| 244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
| 246 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) |
| 247 | |
| 248 | #define atomic_inc(v) atomic_add(1,(v)) |
| 249 | #define atomic64_inc(v) atomic64_add(1,(v)) |
| 250 | |
| 251 | #define atomic_dec(v) atomic_sub(1,(v)) |
| 252 | #define atomic64_dec(v) atomic64_sub(1,(v)) |
| 253 | |
| 254 | #define smp_mb__before_atomic_dec() smp_mb() |
| 255 | #define smp_mb__after_atomic_dec() smp_mb() |
| 256 | #define smp_mb__before_atomic_inc() smp_mb() |
| 257 | #define smp_mb__after_atomic_inc() smp_mb() |
| 258 | |
Arnd Bergmann | 72099ed | 2009-05-13 22:56:29 +0000 | [diff] [blame] | 259 | #include <asm-generic/atomic-long.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | #endif /* _ALPHA_ATOMIC_H */ |