blob: 88bae6676c9b6ef3823f6a8590882d43b0d83b22 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Kyle McMartin2e13b312006-01-17 08:33:01 -07002/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
4 */
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifndef _ASM_PARISC_ATOMIC_H_
7#define _ASM_PARISC_ATOMIC_H_
8
Kyle McMartin2e13b312006-01-17 08:33:01 -07009#include <linux/types.h>
Paul Gortmaker9e5228c2012-04-01 16:38:42 -040010#include <asm/cmpxchg.h>
Peter Zijlstrae4a65e92014-03-13 19:00:36 +010011#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13/*
14 * Atomic operations that C can't guarantee us. Useful for
15 * resource counting etc..
16 *
17 * And probably incredibly slow on parisc. OTOH, we don't
18 * have to write any serious assembly. prumpf
19 */
20
21#ifdef CONFIG_SMP
22#include <asm/spinlock.h>
23#include <asm/cache.h> /* we use L1_CACHE_BYTES */
24
25/* Use an array of spinlocks for our atomic_ts.
26 * Hash function to index into a different SPINLOCK.
27 * Since "a" is usually an address, use one spinlock per cacheline.
28 */
29# define ATOMIC_HASH_SIZE 4
James Bottomley47e669c2009-03-22 03:58:40 +000030# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Thomas Gleixner445c8952009-12-02 19:49:50 +010032extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070034/* Can't use raw_spin_lock_irq because of #include problems, so
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * this is the substitute */
36#define _atomic_spin_lock_irqsave(l,f) do { \
Thomas Gleixner445c8952009-12-02 19:49:50 +010037 arch_spinlock_t *s = ATOMIC_HASH(l); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 local_irq_save(f); \
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010039 arch_spin_lock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070040} while(0)
41
42#define _atomic_spin_unlock_irqrestore(l,f) do { \
Thomas Gleixner445c8952009-12-02 19:49:50 +010043 arch_spinlock_t *s = ATOMIC_HASH(l); \
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010044 arch_spin_unlock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 local_irq_restore(f); \
46} while(0)
47
48
49#else
50# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52#endif
53
Matthew Wilcoxea4354672009-01-06 14:40:39 -080054/*
55 * Note that we need not lock read accesses - aligned word writes/reads
56 * are atomic, so a reader never sees inconsistent values.
Kyle McMartin2e13b312006-01-17 08:33:01 -070057 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +010059static __inline__ void atomic_set(atomic_t *v, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
61 unsigned long flags;
62 _atomic_spin_lock_irqsave(v, flags);
63
64 v->counter = i;
65
66 _atomic_spin_unlock_irqrestore(v, flags);
67}
68
Peter Zijlstra9d664c0a2017-06-09 13:05:06 +020069#define atomic_set_release(v, i) atomic_set((v), (i))
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071static __inline__ int atomic_read(const atomic_t *v)
72{
Peter Zijlstra62e8a322015-09-18 11:13:10 +020073 return READ_ONCE((v)->counter);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* exported interface */
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -070077#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -080078#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Nick Piggin8426e1f2005-11-13 16:07:25 -080080/**
Arun Sharmaf24219b2011-07-26 16:09:07 -070081 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -080082 * @v: pointer of type atomic_t
83 * @a: the amount to add to v...
84 * @u: ...unless v is equal to u.
85 *
86 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -070087 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -080088 */
Arun Sharmaf24219b2011-07-26 16:09:07 -070089static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070090{
91 int c, old;
92 c = atomic_read(v);
93 for (;;) {
94 if (unlikely(c == (u)))
95 break;
96 old = atomic_cmpxchg((v), c, c + (a));
97 if (likely(old == c))
98 break;
99 c = old;
100 }
Arun Sharmaf24219b2011-07-26 16:09:07 -0700101 return c;
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700102}
103
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100104#define ATOMIC_OP(op, c_op) \
105static __inline__ void atomic_##op(int i, atomic_t *v) \
106{ \
107 unsigned long flags; \
108 \
109 _atomic_spin_lock_irqsave(v, flags); \
110 v->counter c_op i; \
111 _atomic_spin_unlock_irqrestore(v, flags); \
112} \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800113
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100114#define ATOMIC_OP_RETURN(op, c_op) \
115static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
116{ \
117 unsigned long flags; \
118 int ret; \
119 \
120 _atomic_spin_lock_irqsave(v, flags); \
121 ret = (v->counter c_op i); \
122 _atomic_spin_unlock_irqrestore(v, flags); \
123 \
124 return ret; \
125}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200127#define ATOMIC_FETCH_OP(op, c_op) \
128static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
129{ \
130 unsigned long flags; \
131 int ret; \
132 \
133 _atomic_spin_lock_irqsave(v, flags); \
134 ret = v->counter; \
135 v->counter c_op i; \
136 _atomic_spin_unlock_irqrestore(v, flags); \
137 \
138 return ret; \
139}
140
141#define ATOMIC_OPS(op, c_op) \
142 ATOMIC_OP(op, c_op) \
143 ATOMIC_OP_RETURN(op, c_op) \
144 ATOMIC_FETCH_OP(op, c_op)
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100145
146ATOMIC_OPS(add, +=)
147ATOMIC_OPS(sub, -=)
148
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200149#undef ATOMIC_OPS
150#define ATOMIC_OPS(op, c_op) \
151 ATOMIC_OP(op, c_op) \
152 ATOMIC_FETCH_OP(op, c_op)
153
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200154ATOMIC_OPS(and, &=)
155ATOMIC_OPS(or, |=)
156ATOMIC_OPS(xor, ^=)
Peter Zijlstraaebea932014-04-23 19:47:25 +0200157
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100158#undef ATOMIC_OPS
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200159#undef ATOMIC_FETCH_OP
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100160#undef ATOMIC_OP_RETURN
161#undef ATOMIC_OP
162
163#define atomic_inc(v) (atomic_add( 1,(v)))
164#define atomic_dec(v) (atomic_add( -1,(v)))
165
166#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
167#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
170
171/*
172 * atomic_inc_and_test - increment and test
173 * @v: pointer of type atomic_t
174 *
175 * Atomically increments @v by 1
176 * and returns true if the result is zero, or false for all
177 * other cases.
178 */
179#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
180
181#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
182
Kyle McMartin4da9f132006-03-29 19:47:32 -0500183#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
184
Mel Gormanbba3d8c2012-07-23 12:16:19 +0100185#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Helge Deller513e7ec2007-01-28 15:09:20 +0100187#ifdef CONFIG_64BIT
Kyle McMartin2e13b312006-01-17 08:33:01 -0700188
Mel Gormanbba3d8c2012-07-23 12:16:19 +0100189#define ATOMIC64_INIT(i) { (i) }
Kyle McMartin2e13b312006-01-17 08:33:01 -0700190
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100191#define ATOMIC64_OP(op, c_op) \
192static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
193{ \
194 unsigned long flags; \
195 \
196 _atomic_spin_lock_irqsave(v, flags); \
197 v->counter c_op i; \
198 _atomic_spin_unlock_irqrestore(v, flags); \
199} \
Kyle McMartin2e13b312006-01-17 08:33:01 -0700200
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100201#define ATOMIC64_OP_RETURN(op, c_op) \
202static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
203{ \
204 unsigned long flags; \
205 s64 ret; \
206 \
207 _atomic_spin_lock_irqsave(v, flags); \
208 ret = (v->counter c_op i); \
209 _atomic_spin_unlock_irqrestore(v, flags); \
210 \
211 return ret; \
Kyle McMartin2e13b312006-01-17 08:33:01 -0700212}
213
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200214#define ATOMIC64_FETCH_OP(op, c_op) \
215static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
216{ \
217 unsigned long flags; \
218 s64 ret; \
219 \
220 _atomic_spin_lock_irqsave(v, flags); \
221 ret = v->counter; \
222 v->counter c_op i; \
223 _atomic_spin_unlock_irqrestore(v, flags); \
224 \
225 return ret; \
226}
227
228#define ATOMIC64_OPS(op, c_op) \
229 ATOMIC64_OP(op, c_op) \
230 ATOMIC64_OP_RETURN(op, c_op) \
231 ATOMIC64_FETCH_OP(op, c_op)
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100232
233ATOMIC64_OPS(add, +=)
234ATOMIC64_OPS(sub, -=)
235
236#undef ATOMIC64_OPS
Peter Zijlstrae5857a62016-04-18 01:16:05 +0200237#define ATOMIC64_OPS(op, c_op) \
238 ATOMIC64_OP(op, c_op) \
239 ATOMIC64_FETCH_OP(op, c_op)
240
241ATOMIC64_OPS(and, &=)
242ATOMIC64_OPS(or, |=)
243ATOMIC64_OPS(xor, ^=)
244
245#undef ATOMIC64_OPS
246#undef ATOMIC64_FETCH_OP
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100247#undef ATOMIC64_OP_RETURN
248#undef ATOMIC64_OP
249
Kyle McMartin2e13b312006-01-17 08:33:01 -0700250static __inline__ void
251atomic64_set(atomic64_t *v, s64 i)
252{
253 unsigned long flags;
254 _atomic_spin_lock_irqsave(v, flags);
255
256 v->counter = i;
257
258 _atomic_spin_unlock_irqrestore(v, flags);
259}
260
261static __inline__ s64
262atomic64_read(const atomic64_t *v)
263{
Mark Rutland6aa7de02017-10-23 14:07:29 -0700264 return READ_ONCE((v)->counter);
Kyle McMartin2e13b312006-01-17 08:33:01 -0700265}
266
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100267#define atomic64_inc(v) (atomic64_add( 1,(v)))
268#define atomic64_dec(v) (atomic64_add( -1,(v)))
Kyle McMartin2e13b312006-01-17 08:33:01 -0700269
Peter Zijlstra15e3f6d2014-03-26 18:04:44 +0100270#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
271#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
Kyle McMartin2e13b312006-01-17 08:33:01 -0700272
273#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
274
275#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
276#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
Kyle McMartin4da9f132006-03-29 19:47:32 -0500277#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
Kyle McMartin2e13b312006-01-17 08:33:01 -0700278
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700279/* exported interface */
280#define atomic64_cmpxchg(v, o, n) \
281 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
282#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
283
284/**
285 * atomic64_add_unless - add unless the number is a given value
286 * @v: pointer of type atomic64_t
287 * @a: the amount to add to v...
288 * @u: ...unless v is equal to u.
289 *
290 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700291 * Returns the old value of @v.
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700292 */
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700293static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
294{
295 long c, old;
296 c = atomic64_read(v);
297 for (;;) {
298 if (unlikely(c == (u)))
299 break;
300 old = atomic64_cmpxchg((v), c, c + (a));
301 if (likely(old == c))
302 break;
303 c = old;
304 }
305 return c != (u);
306}
307
Mathieu Desnoyers8ffe9d02007-05-08 00:34:26 -0700308#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
309
Helge Dellerf21dda02013-05-06 19:13:33 +0000310/*
311 * atomic64_dec_if_positive - decrement by 1 if old value positive
312 * @v: pointer of type atomic_t
313 *
314 * The function returns the old value of *v minus 1, even if
315 * the atomic variable, v, was not decremented.
316 */
317static inline long atomic64_dec_if_positive(atomic64_t *v)
318{
319 long c, old, dec;
320 c = atomic64_read(v);
321 for (;;) {
322 dec = c - 1;
323 if (unlikely(dec < 0))
324 break;
325 old = atomic64_cmpxchg((v), c, dec);
326 if (likely(old == c))
327 break;
328 c = old;
329 }
330 return dec;
331}
332
Kyle McMartin64daa442009-07-02 13:10:29 -0400333#endif /* !CONFIG_64BIT */
Kyle McMartin2e13b312006-01-17 08:33:01 -0700334
Kyle McMartin2e13b312006-01-17 08:33:01 -0700335
336#endif /* _ASM_PARISC_ATOMIC_H_ */