Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ALPHA_ATOMIC_H |
| 2 | #define _ALPHA_ATOMIC_H |
| 3 | |
Andrew Morton | 0db9ae4 | 2005-10-24 23:05:58 -0700 | [diff] [blame] | 4 | #include <asm/barrier.h> |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | /* |
| 7 | * Atomic operations that C can't guarantee us. Useful for |
| 8 | * resource counting etc... |
| 9 | * |
| 10 | * But use these as seldom as possible since they are much slower |
| 11 | * than regular operations. |
| 12 | */ |
| 13 | |
| 14 | |
| 15 | /* |
| 16 | * Counter is volatile to make sure gcc doesn't try to be clever |
| 17 | * and move things around on us. We need to use _exactly_ the address |
| 18 | * the user gave us, not some alias that contains the same information. |
| 19 | */ |
| 20 | typedef struct { volatile int counter; } atomic_t; |
| 21 | typedef struct { volatile long counter; } atomic64_t; |
| 22 | |
| 23 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) |
| 24 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) |
| 25 | |
| 26 | #define atomic_read(v) ((v)->counter + 0) |
| 27 | #define atomic64_read(v) ((v)->counter + 0) |
| 28 | |
| 29 | #define atomic_set(v,i) ((v)->counter = (i)) |
| 30 | #define atomic64_set(v,i) ((v)->counter = (i)) |
| 31 | |
| 32 | /* |
| 33 | * To get proper branch prediction for the main line, we must branch |
| 34 | * forward to code at the end of this object's .text section, then |
| 35 | * branch back to restart the operation. |
| 36 | */ |
| 37 | |
| 38 | static __inline__ void atomic_add(int i, atomic_t * v) |
| 39 | { |
| 40 | unsigned long temp; |
| 41 | __asm__ __volatile__( |
| 42 | "1: ldl_l %0,%1\n" |
| 43 | " addl %0,%2,%0\n" |
| 44 | " stl_c %0,%1\n" |
| 45 | " beq %0,2f\n" |
| 46 | ".subsection 2\n" |
| 47 | "2: br 1b\n" |
| 48 | ".previous" |
| 49 | :"=&r" (temp), "=m" (v->counter) |
| 50 | :"Ir" (i), "m" (v->counter)); |
| 51 | } |
| 52 | |
| 53 | static __inline__ void atomic64_add(long i, atomic64_t * v) |
| 54 | { |
| 55 | unsigned long temp; |
| 56 | __asm__ __volatile__( |
| 57 | "1: ldq_l %0,%1\n" |
| 58 | " addq %0,%2,%0\n" |
| 59 | " stq_c %0,%1\n" |
| 60 | " beq %0,2f\n" |
| 61 | ".subsection 2\n" |
| 62 | "2: br 1b\n" |
| 63 | ".previous" |
| 64 | :"=&r" (temp), "=m" (v->counter) |
| 65 | :"Ir" (i), "m" (v->counter)); |
| 66 | } |
| 67 | |
| 68 | static __inline__ void atomic_sub(int i, atomic_t * v) |
| 69 | { |
| 70 | unsigned long temp; |
| 71 | __asm__ __volatile__( |
| 72 | "1: ldl_l %0,%1\n" |
| 73 | " subl %0,%2,%0\n" |
| 74 | " stl_c %0,%1\n" |
| 75 | " beq %0,2f\n" |
| 76 | ".subsection 2\n" |
| 77 | "2: br 1b\n" |
| 78 | ".previous" |
| 79 | :"=&r" (temp), "=m" (v->counter) |
| 80 | :"Ir" (i), "m" (v->counter)); |
| 81 | } |
| 82 | |
| 83 | static __inline__ void atomic64_sub(long i, atomic64_t * v) |
| 84 | { |
| 85 | unsigned long temp; |
| 86 | __asm__ __volatile__( |
| 87 | "1: ldq_l %0,%1\n" |
| 88 | " subq %0,%2,%0\n" |
| 89 | " stq_c %0,%1\n" |
| 90 | " beq %0,2f\n" |
| 91 | ".subsection 2\n" |
| 92 | "2: br 1b\n" |
| 93 | ".previous" |
| 94 | :"=&r" (temp), "=m" (v->counter) |
| 95 | :"Ir" (i), "m" (v->counter)); |
| 96 | } |
| 97 | |
| 98 | |
| 99 | /* |
| 100 | * Same as above, but return the result value |
| 101 | */ |
| 102 | static __inline__ long atomic_add_return(int i, atomic_t * v) |
| 103 | { |
| 104 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 105 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | __asm__ __volatile__( |
| 107 | "1: ldl_l %0,%1\n" |
| 108 | " addl %0,%3,%2\n" |
| 109 | " addl %0,%3,%0\n" |
| 110 | " stl_c %0,%1\n" |
| 111 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | ".subsection 2\n" |
| 113 | "2: br 1b\n" |
| 114 | ".previous" |
| 115 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 116 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 117 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | return result; |
| 119 | } |
| 120 | |
| 121 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
| 122 | |
| 123 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) |
| 124 | { |
| 125 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 126 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | __asm__ __volatile__( |
| 128 | "1: ldq_l %0,%1\n" |
| 129 | " addq %0,%3,%2\n" |
| 130 | " addq %0,%3,%0\n" |
| 131 | " stq_c %0,%1\n" |
| 132 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | ".subsection 2\n" |
| 134 | "2: br 1b\n" |
| 135 | ".previous" |
| 136 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 137 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 138 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | return result; |
| 140 | } |
| 141 | |
| 142 | static __inline__ long atomic_sub_return(int i, atomic_t * v) |
| 143 | { |
| 144 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 145 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | __asm__ __volatile__( |
| 147 | "1: ldl_l %0,%1\n" |
| 148 | " subl %0,%3,%2\n" |
| 149 | " subl %0,%3,%0\n" |
| 150 | " stl_c %0,%1\n" |
| 151 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | ".subsection 2\n" |
| 153 | "2: br 1b\n" |
| 154 | ".previous" |
| 155 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 156 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 157 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | return result; |
| 159 | } |
| 160 | |
| 161 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) |
| 162 | { |
| 163 | long temp, result; |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 164 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | __asm__ __volatile__( |
| 166 | "1: ldq_l %0,%1\n" |
| 167 | " subq %0,%3,%2\n" |
| 168 | " subq %0,%3,%0\n" |
| 169 | " stq_c %0,%1\n" |
| 170 | " beq %0,2f\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | ".subsection 2\n" |
| 172 | "2: br 1b\n" |
| 173 | ".previous" |
| 174 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) |
| 175 | :"Ir" (i), "m" (v->counter) : "memory"); |
Ivan Kokshaysky | d475f3f | 2005-10-21 22:06:15 +0400 | [diff] [blame] | 176 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | return result; |
| 178 | } |
| 179 | |
Nick Piggin | 4a6dae6 | 2005-11-13 16:07:24 -0800 | [diff] [blame] | 180 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) |
| 181 | |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame^] | 182 | #define atomic_add_unless(v, a, u) \ |
| 183 | ({ \ |
| 184 | int c, old; \ |
| 185 | c = atomic_read(v); \ |
| 186 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ |
| 187 | c = old; \ |
| 188 | c != (u); \ |
| 189 | }) |
| 190 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
| 191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
| 193 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) |
| 194 | |
| 195 | #define atomic_inc_return(v) atomic_add_return(1,(v)) |
| 196 | #define atomic64_inc_return(v) atomic64_add_return(1,(v)) |
| 197 | |
| 198 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) |
| 199 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) |
| 200 | |
| 201 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) |
| 202 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
| 203 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) |
| 204 | |
| 205 | #define atomic_inc(v) atomic_add(1,(v)) |
| 206 | #define atomic64_inc(v) atomic64_add(1,(v)) |
| 207 | |
| 208 | #define atomic_dec(v) atomic_sub(1,(v)) |
| 209 | #define atomic64_dec(v) atomic64_sub(1,(v)) |
| 210 | |
| 211 | #define smp_mb__before_atomic_dec() smp_mb() |
| 212 | #define smp_mb__after_atomic_dec() smp_mb() |
| 213 | #define smp_mb__before_atomic_inc() smp_mb() |
| 214 | #define smp_mb__after_atomic_inc() smp_mb() |
| 215 | |
| 216 | #endif /* _ALPHA_ATOMIC_H */ |