blob: 17b98a87e5e26774298f955a3da234dd2742e2a4 [file] [log] [blame]
Kyle McMartin2e13b312006-01-17 08:33:01 -07001/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
Kyle McMartin2e13b312006-01-17 08:33:01 -07008#include <linux/types.h>
Paul Gortmaker9e5228c2012-04-01 16:38:42 -04009#include <asm/cmpxchg.h>
Peter Zijlstrae4a65e92014-03-13 19:00:36 +010010#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12/*
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
15 *
16 * And probably incredibly slow on parisc. OTOH, we don't
17 * have to write any serious assembly. prumpf
18 */
19
20#ifdef CONFIG_SMP
21#include <asm/spinlock.h>
22#include <asm/cache.h> /* we use L1_CACHE_BYTES */
23
24/* Use an array of spinlocks for our atomic_ts.
25 * Hash function to index into a different SPINLOCK.
26 * Since "a" is usually an address, use one spinlock per cacheline.
27 */
28# define ATOMIC_HASH_SIZE 4
James Bottomley47e669c2009-03-22 03:58:40 +000029# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Thomas Gleixner445c8952009-12-02 19:49:50 +010031extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070033/* Can't use raw_spin_lock_irq because of #include problems, so
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * this is the substitute */
35#define _atomic_spin_lock_irqsave(l,f) do { \
Thomas Gleixner445c8952009-12-02 19:49:50 +010036 arch_spinlock_t *s = ATOMIC_HASH(l); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 local_irq_save(f); \
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010038 arch_spin_lock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070039} while(0)
40
41#define _atomic_spin_unlock_irqrestore(l,f) do { \
Thomas Gleixner445c8952009-12-02 19:49:50 +010042 arch_spinlock_t *s = ATOMIC_HASH(l); \
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010043 arch_spin_unlock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 local_irq_restore(f); \
45} while(0)
46
47
48#else
49# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51#endif
52
Matthew Wilcoxea4354672009-01-06 14:40:39 -080053/*
54 * Note that we need not lock read accesses - aligned word writes/reads
55 * are atomic, so a reader never sees inconsistent values.
Kyle McMartin2e13b312006-01-17 08:33:01 -070056 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +010058static __inline__ void atomic_set(atomic_t *v, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
60 unsigned long flags;
61 _atomic_spin_lock_irqsave(v, flags);
62
63 v->counter = i;
64
65 _atomic_spin_unlock_irqrestore(v, flags);
66}
67
Peter Zijlstra9d664c0a2017-06-09 13:05:06 +020068#define atomic_set_release(v, i) atomic_set((v), (i))
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static __inline__ int atomic_read(const atomic_t *v)
71{
Peter Zijlstra62e8a322015-09-18 11:13:10 +020072 return READ_ONCE((v)->counter);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* exported interface */
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -070076#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -080077#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Nick Piggin8426e1f2005-11-13 16:07:25 -080079/**
Arun Sharmaf24219b2011-07-26 16:09:07 -070080 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -080081 * @v: pointer of type atomic_t
82 * @a: the amount to add to v...
83 * @u: ...unless v is equal to u.
84 *
85 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -070086 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -080087 */
Arun Sharmaf24219b2011-07-26 16:09:07 -070088static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070089{
90 int c, old;
91 c = atomic_read(v);
92 for (;;) {
93 if (unlikely(c == (u)))
94 break;
95 old = atomic_cmpxchg((v), c, c + (a));
96 if (likely(old == c))
97 break;
98 c = old;
99 }
Arun Sharmaf24219b2011-07-26 16:09:07 -0700100 return c;
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700101}
102
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100103#define ATOMIC_OP(op, c_op) \
104static __inline__ void atomic_##op(int i, atomic_t *v) \
105{ \
106 unsigned long flags; \
107 \
108 _atomic_spin_lock_irqsave(v, flags); \
109 v->counter c_op i; \
110 _atomic_spin_unlock_irqrestore(v, flags); \
111} \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800112
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100113#define ATOMIC_OP_RETURN(op, c_op) \
114static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
115{ \
116 unsigned long flags; \
117 int ret; \
118 \
119 _atomic_spin_lock_irqsave(v, flags); \
120 ret = (v->counter c_op i); \
121 _atomic_spin_unlock_irqrestore(v, flags); \
122 \
123 return ret; \
124}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200126#define ATOMIC_FETCH_OP(op, c_op) \
127static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
128{ \
129 unsigned long flags; \
130 int ret; \
131 \
132 _atomic_spin_lock_irqsave(v, flags); \
133 ret = v->counter; \
134 v->counter c_op i; \
135 _atomic_spin_unlock_irqrestore(v, flags); \
136 \
137 return ret; \
138}
139
140#define ATOMIC_OPS(op, c_op) \
141 ATOMIC_OP(op, c_op) \
142 ATOMIC_OP_RETURN(op, c_op) \
143 ATOMIC_FETCH_OP(op, c_op)
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100144
145ATOMIC_OPS(add, +=)
146ATOMIC_OPS(sub, -=)
147
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200148#undef ATOMIC_OPS
149#define ATOMIC_OPS(op, c_op) \
150 ATOMIC_OP(op, c_op) \
151 ATOMIC_FETCH_OP(op, c_op)
152
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200153ATOMIC_OPS(and, &=)
154ATOMIC_OPS(or, |=)
155ATOMIC_OPS(xor, ^=)
Peter Zijlstraaebea932014-04-23 19:47:25 +0200156
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100157#undef ATOMIC_OPS
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200158#undef ATOMIC_FETCH_OP
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100159#undef ATOMIC_OP_RETURN
160#undef ATOMIC_OP
161
162#define atomic_inc(v) (atomic_add( 1,(v)))
163#define atomic_dec(v) (atomic_add( -1,(v)))
164
165#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
166#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
169
170/*
171 * atomic_inc_and_test - increment and test
172 * @v: pointer of type atomic_t
173 *
174 * Atomically increments @v by 1
175 * and returns true if the result is zero, or false for all
176 * other cases.
177 */
178#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
179
180#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
181
Kyle McMartin4da9f132006-03-29 19:47:32 -0500182#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
183
Mel Gormanbba3d8c2012-07-23 12:16:19 +0100184#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Helge Deller513e7ec2007-01-28 15:09:20 +0100186#ifdef CONFIG_64BIT
Kyle McMartin2e13b312006-01-17 08:33:01 -0700187
Mel Gormanbba3d8c2012-07-23 12:16:19 +0100188#define ATOMIC64_INIT(i) { (i) }
Kyle McMartin2e13b312006-01-17 08:33:01 -0700189
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100190#define ATOMIC64_OP(op, c_op) \
191static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
192{ \
193 unsigned long flags; \
194 \
195 _atomic_spin_lock_irqsave(v, flags); \
196 v->counter c_op i; \
197 _atomic_spin_unlock_irqrestore(v, flags); \
198} \
Kyle McMartin2e13b312006-01-17 08:33:01 -0700199
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100200#define ATOMIC64_OP_RETURN(op, c_op) \
201static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
202{ \
203 unsigned long flags; \
204 s64 ret; \
205 \
206 _atomic_spin_lock_irqsave(v, flags); \
207 ret = (v->counter c_op i); \
208 _atomic_spin_unlock_irqrestore(v, flags); \
209 \
210 return ret; \
Kyle McMartin2e13b312006-01-17 08:33:01 -0700211}
212
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200213#define ATOMIC64_FETCH_OP(op, c_op) \
214static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
215{ \
216 unsigned long flags; \
217 s64 ret; \
218 \
219 _atomic_spin_lock_irqsave(v, flags); \
220 ret = v->counter; \
221 v->counter c_op i; \
222 _atomic_spin_unlock_irqrestore(v, flags); \
223 \
224 return ret; \
225}
226
227#define ATOMIC64_OPS(op, c_op) \
228 ATOMIC64_OP(op, c_op) \
229 ATOMIC64_OP_RETURN(op, c_op) \
230 ATOMIC64_FETCH_OP(op, c_op)
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100231
232ATOMIC64_OPS(add, +=)
233ATOMIC64_OPS(sub, -=)
234
235#undef ATOMIC64_OPS
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200236#define ATOMIC64_OPS(op, c_op) \
237 ATOMIC64_OP(op, c_op) \
238 ATOMIC64_FETCH_OP(op, c_op)
239
240ATOMIC64_OPS(and, &=)
241ATOMIC64_OPS(or, |=)
242ATOMIC64_OPS(xor, ^=)
243
244#undef ATOMIC64_OPS
245#undef ATOMIC64_FETCH_OP
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100246#undef ATOMIC64_OP_RETURN
247#undef ATOMIC64_OP
248
Kyle McMartin2e13b312006-01-17 08:33:01 -0700249static __inline__ void
250atomic64_set(atomic64_t *v, s64 i)
251{
252 unsigned long flags;
253 _atomic_spin_lock_irqsave(v, flags);
254
255 v->counter = i;
256
257 _atomic_spin_unlock_irqrestore(v, flags);
258}
259
260static __inline__ s64
261atomic64_read(const atomic64_t *v)
262{
Pranith Kumar22910592014-09-23 10:29:50 -0400263 return ACCESS_ONCE((v)->counter);
Kyle McMartin2e13b312006-01-17 08:33:01 -0700264}
265
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100266#define atomic64_inc(v) (atomic64_add( 1,(v)))
267#define atomic64_dec(v) (atomic64_add( -1,(v)))
Kyle McMartin2e13b312006-01-17 08:33:01 -0700268
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100269#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
270#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
Kyle McMartin2e13b312006-01-17 08:33:01 -0700271
272#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
273
274#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
275#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
Kyle McMartin4da9f132006-03-29 19:47:32 -0500276#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
Kyle McMartin2e13b312006-01-17 08:33:01 -0700277
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700278/* exported interface */
279#define atomic64_cmpxchg(v, o, n) \
280 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
281#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
282
283/**
284 * atomic64_add_unless - add unless the number is a given value
285 * @v: pointer of type atomic64_t
286 * @a: the amount to add to v...
287 * @u: ...unless v is equal to u.
288 *
289 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700290 * Returns the old value of @v.
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700291 */
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700292static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
293{
294 long c, old;
295 c = atomic64_read(v);
296 for (;;) {
297 if (unlikely(c == (u)))
298 break;
299 old = atomic64_cmpxchg((v), c, c + (a));
300 if (likely(old == c))
301 break;
302 c = old;
303 }
304 return c != (u);
305}
306
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700307#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
308
Helge Dellerf21dda02013-05-06 19:13:33 +0000309/*
310 * atomic64_dec_if_positive - decrement by 1 if old value positive
311 * @v: pointer of type atomic_t
312 *
313 * The function returns the old value of *v minus 1, even if
314 * the atomic variable, v, was not decremented.
315 */
316static inline long atomic64_dec_if_positive(atomic64_t *v)
317{
318 long c, old, dec;
319 c = atomic64_read(v);
320 for (;;) {
321 dec = c - 1;
322 if (unlikely(dec < 0))
323 break;
324 old = atomic64_cmpxchg((v), c, dec);
325 if (likely(old == c))
326 break;
327 c = old;
328 }
329 return dec;
330}
331
Kyle McMartin64daa442009-07-02 13:10:29 -0400332#endif /* !CONFIG_64BIT */
Kyle McMartin2e13b312006-01-17 08:33:01 -0700333
Kyle McMartin2e13b312006-01-17 08:33:01 -0700334
335#endif /* _ASM_PARISC_ATOMIC_H_ */