blob: be2c50ddebd61d3edecda94d7471f9c35ca1de5d [file] [log] [blame]
Kyle McMartin2e13b312006-01-17 08:33:01 -07001/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
Kyle McMartin2e13b312006-01-17 08:33:01 -07008#include <linux/types.h>
Paul Gortmaker9e5228c2012-04-01 16:38:42 -04009#include <asm/cmpxchg.h>
Peter Zijlstrae4a65e92014-03-13 19:00:36 +010010#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12/*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 *
16 * And probably incredibly slow on parisc. OTOH, we don't
17 * have to write any serious assembly. prumpf
18 */
19
20#ifdef CONFIG_SMP
21#include <asm/spinlock.h>
22#include <asm/cache.h> /* we use L1_CACHE_BYTES */
23
24/* Use an array of spinlocks for our atomic_ts.
25 * Hash function to index into a different SPINLOCK.
26 * Since "a" is usually an address, use one spinlock per cacheline.
27 */
28# define ATOMIC_HASH_SIZE 4
James Bottomley47e669c2009-03-22 03:58:40 +000029# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Thomas Gleixner445c8952009-12-02 19:49:50 +010031extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070033/* Can't use raw_spin_lock_irq because of #include problems, so
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * this is the substitute */
35#define _atomic_spin_lock_irqsave(l,f) do { \
Thomas Gleixner445c8952009-12-02 19:49:50 +010036 arch_spinlock_t *s = ATOMIC_HASH(l); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 local_irq_save(f); \
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010038 arch_spin_lock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070039} while(0)
40
41#define _atomic_spin_unlock_irqrestore(l,f) do { \
Thomas Gleixner445c8952009-12-02 19:49:50 +010042 arch_spinlock_t *s = ATOMIC_HASH(l); \
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010043 arch_spin_unlock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 local_irq_restore(f); \
45} while(0)
46
47
48#else
49# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51#endif
52
Matthew Wilcoxea4354672009-01-06 14:40:39 -080053/*
54 * Note that we need not lock read accesses - aligned word writes/reads
55 * are atomic, so a reader never sees inconsistent values.
Kyle McMartin2e13b312006-01-17 08:33:01 -070056 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +010058static __inline__ void atomic_set(atomic_t *v, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
60 unsigned long flags;
61 _atomic_spin_lock_irqsave(v, flags);
62
63 v->counter = i;
64
65 _atomic_spin_unlock_irqrestore(v, flags);
66}
67
68static __inline__ int atomic_read(const atomic_t *v)
69{
Pranith Kumar22910592014-09-23 10:29:50 -040070 return ACCESS_ONCE((v)->counter);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
73/* exported interface */
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -070074#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -080075#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Nick Piggin8426e1f2005-11-13 16:07:25 -080077/**
Arun Sharmaf24219b2011-07-26 16:09:07 -070078 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -080079 * @v: pointer of type atomic_t
80 * @a: the amount to add to v...
81 * @u: ...unless v is equal to u.
82 *
83 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -070084 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -080085 */
Arun Sharmaf24219b2011-07-26 16:09:07 -070086static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070087{
88 int c, old;
89 c = atomic_read(v);
90 for (;;) {
91 if (unlikely(c == (u)))
92 break;
93 old = atomic_cmpxchg((v), c, c + (a));
94 if (likely(old == c))
95 break;
96 c = old;
97 }
Arun Sharmaf24219b2011-07-26 16:09:07 -070098 return c;
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070099}
100
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100101#define ATOMIC_OP(op, c_op) \
102static __inline__ void atomic_##op(int i, atomic_t *v) \
103{ \
104 unsigned long flags; \
105 \
106 _atomic_spin_lock_irqsave(v, flags); \
107 v->counter c_op i; \
108 _atomic_spin_unlock_irqrestore(v, flags); \
109} \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800110
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100111#define ATOMIC_OP_RETURN(op, c_op) \
112static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
113{ \
114 unsigned long flags; \
115 int ret; \
116 \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
120 \
121 return ret; \
122}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100124#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=)
128
Peter Zijlstraaebea932014-04-23 19:47:25 +0200129#define CONFIG_ARCH_HAS_ATOMIC_OR
130
131ATOMIC_OP(and, &=)
132ATOMIC_OP(or, |=)
133ATOMIC_OP(xor, ^=)
134
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100135#undef ATOMIC_OPS
136#undef ATOMIC_OP_RETURN
137#undef ATOMIC_OP
138
139#define atomic_inc(v) (atomic_add( 1,(v)))
140#define atomic_dec(v) (atomic_add( -1,(v)))
141
142#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
143#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
146
147/*
148 * atomic_inc_and_test - increment and test
149 * @v: pointer of type atomic_t
150 *
151 * Atomically increments @v by 1
152 * and returns true if the result is zero, or false for all
153 * other cases.
154 */
155#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
156
157#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
158
Kyle McMartin4da9f132006-03-29 19:47:32 -0500159#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
160
Mel Gormanbba3d8c2012-07-23 12:16:19 +0100161#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Helge Deller513e7ec2007-01-28 15:09:20 +0100163#ifdef CONFIG_64BIT
Kyle McMartin2e13b312006-01-17 08:33:01 -0700164
Mel Gormanbba3d8c2012-07-23 12:16:19 +0100165#define ATOMIC64_INIT(i) { (i) }
Kyle McMartin2e13b312006-01-17 08:33:01 -0700166
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100167#define ATOMIC64_OP(op, c_op) \
168static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
169{ \
170 unsigned long flags; \
171 \
172 _atomic_spin_lock_irqsave(v, flags); \
173 v->counter c_op i; \
174 _atomic_spin_unlock_irqrestore(v, flags); \
175} \
Kyle McMartin2e13b312006-01-17 08:33:01 -0700176
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100177#define ATOMIC64_OP_RETURN(op, c_op) \
178static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
179{ \
180 unsigned long flags; \
181 s64 ret; \
182 \
183 _atomic_spin_lock_irqsave(v, flags); \
184 ret = (v->counter c_op i); \
185 _atomic_spin_unlock_irqrestore(v, flags); \
186 \
187 return ret; \
Kyle McMartin2e13b312006-01-17 08:33:01 -0700188}
189
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100190#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
191
192ATOMIC64_OPS(add, +=)
193ATOMIC64_OPS(sub, -=)
Peter Zijlstraaebea932014-04-23 19:47:25 +0200194ATOMIC64_OP(and, &=)
195ATOMIC64_OP(or, |=)
196ATOMIC64_OP(xor, ^=)
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100197
198#undef ATOMIC64_OPS
199#undef ATOMIC64_OP_RETURN
200#undef ATOMIC64_OP
201
Kyle McMartin2e13b312006-01-17 08:33:01 -0700202static __inline__ void
203atomic64_set(atomic64_t *v, s64 i)
204{
205 unsigned long flags;
206 _atomic_spin_lock_irqsave(v, flags);
207
208 v->counter = i;
209
210 _atomic_spin_unlock_irqrestore(v, flags);
211}
212
213static __inline__ s64
214atomic64_read(const atomic64_t *v)
215{
Pranith Kumar22910592014-09-23 10:29:50 -0400216 return ACCESS_ONCE((v)->counter);
Kyle McMartin2e13b312006-01-17 08:33:01 -0700217}
218
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100219#define atomic64_inc(v) (atomic64_add( 1,(v)))
220#define atomic64_dec(v) (atomic64_add( -1,(v)))
Kyle McMartin2e13b312006-01-17 08:33:01 -0700221
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100222#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
223#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
Kyle McMartin2e13b312006-01-17 08:33:01 -0700224
225#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
226
227#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
228#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
Kyle McMartin4da9f132006-03-29 19:47:32 -0500229#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
Kyle McMartin2e13b312006-01-17 08:33:01 -0700230
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700231/* exported interface */
232#define atomic64_cmpxchg(v, o, n) \
233 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
234#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
235
236/**
237 * atomic64_add_unless - add unless the number is a given value
238 * @v: pointer of type atomic64_t
239 * @a: the amount to add to v...
240 * @u: ...unless v is equal to u.
241 *
242 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700243 * Returns the old value of @v.
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700244 */
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700245static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
246{
247 long c, old;
248 c = atomic64_read(v);
249 for (;;) {
250 if (unlikely(c == (u)))
251 break;
252 old = atomic64_cmpxchg((v), c, c + (a));
253 if (likely(old == c))
254 break;
255 c = old;
256 }
257 return c != (u);
258}
259
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700260#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
261
Helge Dellerf21dda02013-05-06 19:13:33 +0000262/*
263 * atomic64_dec_if_positive - decrement by 1 if old value positive
264 * @v: pointer of type atomic_t
265 *
266 * The function returns the old value of *v minus 1, even if
267 * the atomic variable, v, was not decremented.
268 */
269static inline long atomic64_dec_if_positive(atomic64_t *v)
270{
271 long c, old, dec;
272 c = atomic64_read(v);
273 for (;;) {
274 dec = c - 1;
275 if (unlikely(dec < 0))
276 break;
277 old = atomic64_cmpxchg((v), c, dec);
278 if (likely(old == c))
279 break;
280 c = old;
281 }
282 return dec;
283}
284
Kyle McMartin64daa442009-07-02 13:10:29 -0400285#endif /* !CONFIG_64BIT */
Kyle McMartin2e13b312006-01-17 08:33:01 -0700286
Kyle McMartin2e13b312006-01-17 08:33:01 -0700287
288#endif /* _ASM_PARISC_ATOMIC_H_ */