Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> |
| 2 | * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org> |
| 3 | */ |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #ifndef _ASM_PARISC_ATOMIC_H_ |
| 6 | #define _ASM_PARISC_ATOMIC_H_ |
| 7 | |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 8 | #include <linux/types.h> |
Paul Gortmaker | 9e5228c | 2012-04-01 16:38:42 -0400 | [diff] [blame] | 9 | #include <asm/cmpxchg.h> |
Peter Zijlstra | e4a65e9 | 2014-03-13 19:00:36 +0100 | [diff] [blame] | 10 | #include <asm/barrier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | /* |
| 13 | * Atomic operations that C can't guarantee us. Useful for |
| 14 | * resource counting etc.. |
| 15 | * |
| 16 | * And probably incredibly slow on parisc. OTOH, we don't |
| 17 | * have to write any serious assembly. prumpf |
| 18 | */ |
| 19 | |
| 20 | #ifdef CONFIG_SMP |
| 21 | #include <asm/spinlock.h> |
| 22 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ |
| 23 | |
| 24 | /* Use an array of spinlocks for our atomic_ts. |
| 25 | * Hash function to index into a different SPINLOCK. |
| 26 | * Since "a" is usually an address, use one spinlock per cacheline. |
| 27 | */ |
| 28 | # define ATOMIC_HASH_SIZE 4 |
James Bottomley | 47e669c | 2009-03-22 03:58:40 +0000 | [diff] [blame] | 29 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 31 | extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 33 | /* Can't use raw_spin_lock_irq because of #include problems, so |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | * this is the substitute */ |
| 35 | #define _atomic_spin_lock_irqsave(l,f) do { \ |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 36 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | local_irq_save(f); \ |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 38 | arch_spin_lock(s); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | } while(0) |
| 40 | |
| 41 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ |
Thomas Gleixner | 445c895 | 2009-12-02 19:49:50 +0100 | [diff] [blame] | 42 | arch_spinlock_t *s = ATOMIC_HASH(l); \ |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 43 | arch_spin_unlock(s); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | local_irq_restore(f); \ |
| 45 | } while(0) |
| 46 | |
| 47 | |
| 48 | #else |
| 49 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) |
| 50 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) |
| 51 | #endif |
| 52 | |
Matthew Wilcox | ea435467 | 2009-01-06 14:40:39 -0800 | [diff] [blame] | 53 | /* |
| 54 | * Note that we need not lock read accesses - aligned word writes/reads |
| 55 | * are atomic, so a reader never sees inconsistent values. |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 56 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 58 | static __inline__ void atomic_set(atomic_t *v, int i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | { |
| 60 | unsigned long flags; |
| 61 | _atomic_spin_lock_irqsave(v, flags); |
| 62 | |
| 63 | v->counter = i; |
| 64 | |
| 65 | _atomic_spin_unlock_irqrestore(v, flags); |
| 66 | } |
| 67 | |
| 68 | static __inline__ int atomic_read(const atomic_t *v) |
| 69 | { |
Pranith Kumar | 2291059 | 2014-09-23 10:29:50 -0400 | [diff] [blame] | 70 | return ACCESS_ONCE((v)->counter); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | /* exported interface */ |
Mathieu Desnoyers | 8ffe9d0 | 2007-05-08 00:34:26 -0700 | [diff] [blame] | 74 | #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) |
Ingo Molnar | ffbf670 | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 75 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 77 | /** |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 78 | * __atomic_add_unless - add unless the number is a given value |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 79 | * @v: pointer of type atomic_t |
| 80 | * @a: the amount to add to v... |
| 81 | * @u: ...unless v is equal to u. |
| 82 | * |
| 83 | * Atomically adds @a to @v, so long as it was not @u. |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 84 | * Returns the old value of @v. |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 85 | */ |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 86 | static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
Mathieu Desnoyers | 2856f5e | 2007-05-08 00:34:38 -0700 | [diff] [blame] | 87 | { |
| 88 | int c, old; |
| 89 | c = atomic_read(v); |
| 90 | for (;;) { |
| 91 | if (unlikely(c == (u))) |
| 92 | break; |
| 93 | old = atomic_cmpxchg((v), c, c + (a)); |
| 94 | if (likely(old == c)) |
| 95 | break; |
| 96 | c = old; |
| 97 | } |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 98 | return c; |
Mathieu Desnoyers | 2856f5e | 2007-05-08 00:34:38 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 101 | #define ATOMIC_OP(op, c_op) \ |
| 102 | static __inline__ void atomic_##op(int i, atomic_t *v) \ |
| 103 | { \ |
| 104 | unsigned long flags; \ |
| 105 | \ |
| 106 | _atomic_spin_lock_irqsave(v, flags); \ |
| 107 | v->counter c_op i; \ |
| 108 | _atomic_spin_unlock_irqrestore(v, flags); \ |
| 109 | } \ |
Nick Piggin | 8426e1f | 2005-11-13 16:07:25 -0800 | [diff] [blame] | 110 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 111 | #define ATOMIC_OP_RETURN(op, c_op) \ |
| 112 | static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ |
| 113 | { \ |
| 114 | unsigned long flags; \ |
| 115 | int ret; \ |
| 116 | \ |
| 117 | _atomic_spin_lock_irqsave(v, flags); \ |
| 118 | ret = (v->counter c_op i); \ |
| 119 | _atomic_spin_unlock_irqrestore(v, flags); \ |
| 120 | \ |
| 121 | return ret; \ |
| 122 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 124 | #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) |
| 125 | |
| 126 | ATOMIC_OPS(add, +=) |
| 127 | ATOMIC_OPS(sub, -=) |
| 128 | |
Peter Zijlstra | aebea93 | 2014-04-23 19:47:25 +0200 | [diff] [blame^] | 129 | #define CONFIG_ARCH_HAS_ATOMIC_OR |
| 130 | |
| 131 | ATOMIC_OP(and, &=) |
| 132 | ATOMIC_OP(or, |=) |
| 133 | ATOMIC_OP(xor, ^=) |
| 134 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 135 | #undef ATOMIC_OPS |
| 136 | #undef ATOMIC_OP_RETURN |
| 137 | #undef ATOMIC_OP |
| 138 | |
| 139 | #define atomic_inc(v) (atomic_add( 1,(v))) |
| 140 | #define atomic_dec(v) (atomic_add( -1,(v))) |
| 141 | |
| 142 | #define atomic_inc_return(v) (atomic_add_return( 1,(v))) |
| 143 | #define atomic_dec_return(v) (atomic_add_return( -1,(v))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
| 145 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
| 146 | |
| 147 | /* |
| 148 | * atomic_inc_and_test - increment and test |
| 149 | * @v: pointer of type atomic_t |
| 150 | * |
| 151 | * Atomically increments @v by 1 |
| 152 | * and returns true if the result is zero, or false for all |
| 153 | * other cases. |
| 154 | */ |
| 155 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
| 156 | |
| 157 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) |
| 158 | |
Kyle McMartin | 4da9f13 | 2006-03-29 19:47:32 -0500 | [diff] [blame] | 159 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) |
| 160 | |
Mel Gorman | bba3d8c | 2012-07-23 12:16:19 +0100 | [diff] [blame] | 161 | #define ATOMIC_INIT(i) { (i) } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | |
Helge Deller | 513e7ec | 2007-01-28 15:09:20 +0100 | [diff] [blame] | 163 | #ifdef CONFIG_64BIT |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 164 | |
Mel Gorman | bba3d8c | 2012-07-23 12:16:19 +0100 | [diff] [blame] | 165 | #define ATOMIC64_INIT(i) { (i) } |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 166 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 167 | #define ATOMIC64_OP(op, c_op) \ |
| 168 | static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ |
| 169 | { \ |
| 170 | unsigned long flags; \ |
| 171 | \ |
| 172 | _atomic_spin_lock_irqsave(v, flags); \ |
| 173 | v->counter c_op i; \ |
| 174 | _atomic_spin_unlock_irqrestore(v, flags); \ |
| 175 | } \ |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 176 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 177 | #define ATOMIC64_OP_RETURN(op, c_op) \ |
| 178 | static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ |
| 179 | { \ |
| 180 | unsigned long flags; \ |
| 181 | s64 ret; \ |
| 182 | \ |
| 183 | _atomic_spin_lock_irqsave(v, flags); \ |
| 184 | ret = (v->counter c_op i); \ |
| 185 | _atomic_spin_unlock_irqrestore(v, flags); \ |
| 186 | \ |
| 187 | return ret; \ |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 190 | #define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op) |
| 191 | |
| 192 | ATOMIC64_OPS(add, +=) |
| 193 | ATOMIC64_OPS(sub, -=) |
Peter Zijlstra | aebea93 | 2014-04-23 19:47:25 +0200 | [diff] [blame^] | 194 | ATOMIC64_OP(and, &=) |
| 195 | ATOMIC64_OP(or, |=) |
| 196 | ATOMIC64_OP(xor, ^=) |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 197 | |
| 198 | #undef ATOMIC64_OPS |
| 199 | #undef ATOMIC64_OP_RETURN |
| 200 | #undef ATOMIC64_OP |
| 201 | |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 202 | static __inline__ void |
| 203 | atomic64_set(atomic64_t *v, s64 i) |
| 204 | { |
| 205 | unsigned long flags; |
| 206 | _atomic_spin_lock_irqsave(v, flags); |
| 207 | |
| 208 | v->counter = i; |
| 209 | |
| 210 | _atomic_spin_unlock_irqrestore(v, flags); |
| 211 | } |
| 212 | |
| 213 | static __inline__ s64 |
| 214 | atomic64_read(const atomic64_t *v) |
| 215 | { |
Pranith Kumar | 2291059 | 2014-09-23 10:29:50 -0400 | [diff] [blame] | 216 | return ACCESS_ONCE((v)->counter); |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 217 | } |
| 218 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 219 | #define atomic64_inc(v) (atomic64_add( 1,(v))) |
| 220 | #define atomic64_dec(v) (atomic64_add( -1,(v))) |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 221 | |
Peter Zijlstra | 15e3f6d | 2014-03-26 18:04:44 +0100 | [diff] [blame] | 222 | #define atomic64_inc_return(v) (atomic64_add_return( 1,(v))) |
| 223 | #define atomic64_dec_return(v) (atomic64_add_return( -1,(v))) |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 224 | |
| 225 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) |
| 226 | |
| 227 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
| 228 | #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) |
Kyle McMartin | 4da9f13 | 2006-03-29 19:47:32 -0500 | [diff] [blame] | 229 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 230 | |
Mathieu Desnoyers | 8ffe9d0 | 2007-05-08 00:34:26 -0700 | [diff] [blame] | 231 | /* exported interface */ |
| 232 | #define atomic64_cmpxchg(v, o, n) \ |
| 233 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) |
| 234 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
| 235 | |
| 236 | /** |
| 237 | * atomic64_add_unless - add unless the number is a given value |
| 238 | * @v: pointer of type atomic64_t |
| 239 | * @a: the amount to add to v... |
| 240 | * @u: ...unless v is equal to u. |
| 241 | * |
| 242 | * Atomically adds @a to @v, so long as it was not @u. |
Arun Sharma | f24219b | 2011-07-26 16:09:07 -0700 | [diff] [blame] | 243 | * Returns the old value of @v. |
Mathieu Desnoyers | 8ffe9d0 | 2007-05-08 00:34:26 -0700 | [diff] [blame] | 244 | */ |
Mathieu Desnoyers | 2856f5e | 2007-05-08 00:34:38 -0700 | [diff] [blame] | 245 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) |
| 246 | { |
| 247 | long c, old; |
| 248 | c = atomic64_read(v); |
| 249 | for (;;) { |
| 250 | if (unlikely(c == (u))) |
| 251 | break; |
| 252 | old = atomic64_cmpxchg((v), c, c + (a)); |
| 253 | if (likely(old == c)) |
| 254 | break; |
| 255 | c = old; |
| 256 | } |
| 257 | return c != (u); |
| 258 | } |
| 259 | |
Mathieu Desnoyers | 8ffe9d0 | 2007-05-08 00:34:26 -0700 | [diff] [blame] | 260 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
| 261 | |
Helge Deller | f21dda0 | 2013-05-06 19:13:33 +0000 | [diff] [blame] | 262 | /* |
| 263 | * atomic64_dec_if_positive - decrement by 1 if old value positive |
| 264 | * @v: pointer of type atomic_t |
| 265 | * |
| 266 | * The function returns the old value of *v minus 1, even if |
| 267 | * the atomic variable, v, was not decremented. |
| 268 | */ |
| 269 | static inline long atomic64_dec_if_positive(atomic64_t *v) |
| 270 | { |
| 271 | long c, old, dec; |
| 272 | c = atomic64_read(v); |
| 273 | for (;;) { |
| 274 | dec = c - 1; |
| 275 | if (unlikely(dec < 0)) |
| 276 | break; |
| 277 | old = atomic64_cmpxchg((v), c, dec); |
| 278 | if (likely(old == c)) |
| 279 | break; |
| 280 | c = old; |
| 281 | } |
| 282 | return dec; |
| 283 | } |
| 284 | |
Kyle McMartin | 64daa44 | 2009-07-02 13:10:29 -0400 | [diff] [blame] | 285 | #endif /* !CONFIG_64BIT */ |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 286 | |
Kyle McMartin | 2e13b31 | 2006-01-17 08:33:01 -0700 | [diff] [blame] | 287 | |
| 288 | #endif /* _ASM_PARISC_ATOMIC_H_ */ |