blob: 4e3c1b6b0806bf1087b36f21cf96e5dfb60715b1 [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
Vineet Gupta14e968b2013-01-18 15:12:16 +053012#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
Noam Camusa5a10d92015-05-16 17:49:35 +030020#ifndef CONFIG_ARC_PLAT_EZNPS
21
Peter Zijlstra62e8a322015-09-18 11:13:10 +020022#define atomic_read(v) READ_ONCE((v)->counter)
Vineet Gupta14e968b2013-01-18 15:12:16 +053023
24#ifdef CONFIG_ARC_HAS_LLSC
25
Peter Zijlstra62e8a322015-09-18 11:13:10 +020026#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +053027
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010028#define ATOMIC_OP(op, c_op, asm_op) \
29static inline void atomic_##op(int i, atomic_t *v) \
30{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053031 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010032 \
33 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030034 "1: llock %[val], [%[ctr]] \n" \
35 " " #asm_op " %[val], %[val], %[i] \n" \
36 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053037 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030038 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
39 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
40 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010041 : "cc"); \
42} \
Vineet Gupta14e968b2013-01-18 15:12:16 +053043
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010044#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
45static inline int atomic_##op##_return(int i, atomic_t *v) \
46{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053047 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010048 \
Vineet Gupta2576c282014-11-20 15:42:09 +053049 /* \
50 * Explicit full memory barrier needed before/after as \
51 * LLOCK/SCOND thmeselves don't provide any such semantics \
52 */ \
53 smp_mb(); \
54 \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010055 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030056 "1: llock %[val], [%[ctr]] \n" \
57 " " #asm_op " %[val], %[val], %[i] \n" \
58 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053059 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030060 : [val] "=&r" (val) \
61 : [ctr] "r" (&v->counter), \
62 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010063 : "cc"); \
64 \
Vineet Gupta2576c282014-11-20 15:42:09 +053065 smp_mb(); \
66 \
Vineet Gupta8ac06652015-07-21 12:05:42 +030067 return val; \
Vineet Gupta14e968b2013-01-18 15:12:16 +053068}
69
Peter Zijlstrafbffe892016-04-18 01:16:09 +020070#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
71static inline int atomic_fetch_##op(int i, atomic_t *v) \
72{ \
73 unsigned int val, orig; \
Peter Zijlstrafbffe892016-04-18 01:16:09 +020074 \
75 /* \
76 * Explicit full memory barrier needed before/after as \
77 * LLOCK/SCOND thmeselves don't provide any such semantics \
78 */ \
79 smp_mb(); \
80 \
81 __asm__ __volatile__( \
82 "1: llock %[orig], [%[ctr]] \n" \
83 " " #asm_op " %[val], %[orig], %[i] \n" \
84 " scond %[val], [%[ctr]] \n" \
85 " \n" \
Peter Zijlstrafbffe892016-04-18 01:16:09 +020086 : [val] "=&r" (val), \
87 [orig] "=&r" (orig) \
Peter Zijlstrafbffe892016-04-18 01:16:09 +020088 : [ctr] "r" (&v->counter), \
89 [i] "ir" (i) \
90 : "cc"); \
91 \
92 smp_mb(); \
93 \
94 return orig; \
95}
96
Vineet Gupta14e968b2013-01-18 15:12:16 +053097#else /* !CONFIG_ARC_HAS_LLSC */
98
99#ifndef CONFIG_SMP
100
101 /* violating atomic_xxx API locking protocol in UP for optimization sake */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200102#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +0530103
104#else
105
106static inline void atomic_set(atomic_t *v, int i)
107{
108 /*
109 * Independent of hardware support, all of the atomic_xxx() APIs need
110 * to follow the same locking rules to make sure that a "hardware"
111 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
112 * sequence
113 *
114 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
115 * requires the locking.
116 */
117 unsigned long flags;
118
119 atomic_ops_lock(flags);
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200120 WRITE_ONCE(v->counter, i);
Vineet Gupta14e968b2013-01-18 15:12:16 +0530121 atomic_ops_unlock(flags);
122}
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100123
Vineet Gupta14e968b2013-01-18 15:12:16 +0530124#endif
125
126/*
127 * Non hardware assisted Atomic-R-M-W
128 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
129 */
130
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100131#define ATOMIC_OP(op, c_op, asm_op) \
132static inline void atomic_##op(int i, atomic_t *v) \
133{ \
134 unsigned long flags; \
135 \
136 atomic_ops_lock(flags); \
137 v->counter c_op i; \
138 atomic_ops_unlock(flags); \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530139}
140
Vineet Guptadaaf40e2015-05-10 12:04:01 +0530141#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100142static inline int atomic_##op##_return(int i, atomic_t *v) \
143{ \
144 unsigned long flags; \
145 unsigned long temp; \
146 \
Vineet Gupta2576c282014-11-20 15:42:09 +0530147 /* \
148 * spin lock/unlock provides the needed smp_mb() before/after \
149 */ \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100150 atomic_ops_lock(flags); \
151 temp = v->counter; \
152 temp c_op i; \
153 v->counter = temp; \
154 atomic_ops_unlock(flags); \
155 \
156 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530157}
158
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200159#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
160static inline int atomic_fetch_##op(int i, atomic_t *v) \
161{ \
162 unsigned long flags; \
163 unsigned long orig; \
164 \
165 /* \
166 * spin lock/unlock provides the needed smp_mb() before/after \
167 */ \
168 atomic_ops_lock(flags); \
169 orig = v->counter; \
170 v->counter c_op i; \
171 atomic_ops_unlock(flags); \
172 \
173 return orig; \
174}
175
Vineet Gupta14e968b2013-01-18 15:12:16 +0530176#endif /* !CONFIG_ARC_HAS_LLSC */
177
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100178#define ATOMIC_OPS(op, c_op, asm_op) \
179 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200180 ATOMIC_OP_RETURN(op, c_op, asm_op) \
181 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100182
183ATOMIC_OPS(add, +=, add)
184ATOMIC_OPS(sub, -=, sub)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100185
Peter Zijlstracda7e412014-04-23 20:06:20 +0200186#define atomic_andnot atomic_andnot
187
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200188#undef ATOMIC_OPS
189#define ATOMIC_OPS(op, c_op, asm_op) \
190 ATOMIC_OP(op, c_op, asm_op) \
191 ATOMIC_FETCH_OP(op, c_op, asm_op)
192
193ATOMIC_OPS(and, &=, and)
194ATOMIC_OPS(andnot, &= ~, bic)
195ATOMIC_OPS(or, |=, or)
196ATOMIC_OPS(xor, ^=, xor)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100197
Noam Camusa5a10d92015-05-16 17:49:35 +0300198#else /* CONFIG_ARC_PLAT_EZNPS */
199
200static inline int atomic_read(const atomic_t *v)
201{
202 int temp;
203
204 __asm__ __volatile__(
205 " ld.di %0, [%1]"
206 : "=r"(temp)
207 : "r"(&v->counter)
208 : "memory");
209 return temp;
210}
211
212static inline void atomic_set(atomic_t *v, int i)
213{
214 __asm__ __volatile__(
215 " st.di %0,[%1]"
216 :
217 : "r"(i), "r"(&v->counter)
218 : "memory");
219}
220
221#define ATOMIC_OP(op, c_op, asm_op) \
222static inline void atomic_##op(int i, atomic_t *v) \
223{ \
224 __asm__ __volatile__( \
225 " mov r2, %0\n" \
226 " mov r3, %1\n" \
227 " .word %2\n" \
228 : \
229 : "r"(i), "r"(&v->counter), "i"(asm_op) \
230 : "r2", "r3", "memory"); \
231} \
232
233#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
234static inline int atomic_##op##_return(int i, atomic_t *v) \
235{ \
236 unsigned int temp = i; \
237 \
238 /* Explicit full memory barrier needed before/after */ \
239 smp_mb(); \
240 \
241 __asm__ __volatile__( \
242 " mov r2, %0\n" \
243 " mov r3, %1\n" \
244 " .word %2\n" \
245 " mov %0, r2" \
246 : "+r"(temp) \
247 : "r"(&v->counter), "i"(asm_op) \
248 : "r2", "r3", "memory"); \
249 \
250 smp_mb(); \
251 \
252 temp c_op i; \
253 \
254 return temp; \
255}
256
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200257#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
258static inline int atomic_fetch_##op(int i, atomic_t *v) \
259{ \
260 unsigned int temp = i; \
261 \
262 /* Explicit full memory barrier needed before/after */ \
263 smp_mb(); \
264 \
265 __asm__ __volatile__( \
266 " mov r2, %0\n" \
267 " mov r3, %1\n" \
268 " .word %2\n" \
269 " mov %0, r2" \
270 : "+r"(temp) \
271 : "r"(&v->counter), "i"(asm_op) \
272 : "r2", "r3", "memory"); \
273 \
274 smp_mb(); \
275 \
276 return temp; \
277}
278
Noam Camusa5a10d92015-05-16 17:49:35 +0300279#define ATOMIC_OPS(op, c_op, asm_op) \
280 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200281 ATOMIC_OP_RETURN(op, c_op, asm_op) \
282 ATOMIC_FETCH_OP(op, c_op, asm_op)
Noam Camusa5a10d92015-05-16 17:49:35 +0300283
284ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
285#define atomic_sub(i, v) atomic_add(-(i), (v))
286#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
287
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200288#undef ATOMIC_OPS
289#define ATOMIC_OPS(op, c_op, asm_op) \
290 ATOMIC_OP(op, c_op, asm_op) \
291 ATOMIC_FETCH_OP(op, c_op, asm_op)
292
293ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
Noam Camusa5a10d92015-05-16 17:49:35 +0300294#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200295ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
296ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
Noam Camusa5a10d92015-05-16 17:49:35 +0300297
298#endif /* CONFIG_ARC_PLAT_EZNPS */
299
300#undef ATOMIC_OPS
Peter Zijlstrafbffe892016-04-18 01:16:09 +0200301#undef ATOMIC_FETCH_OP
Noam Camusa5a10d92015-05-16 17:49:35 +0300302#undef ATOMIC_OP_RETURN
303#undef ATOMIC_OP
304
Vineet Gupta14e968b2013-01-18 15:12:16 +0530305/**
306 * __atomic_add_unless - add unless the number is a given value
307 * @v: pointer of type atomic_t
308 * @a: the amount to add to v...
309 * @u: ...unless v is equal to u.
310 *
311 * Atomically adds @a to @v, so long as it was not @u.
312 * Returns the old value of @v
313 */
314#define __atomic_add_unless(v, a, u) \
315({ \
316 int c, old; \
Vineet Gupta2576c282014-11-20 15:42:09 +0530317 \
318 /* \
319 * Explicit full memory barrier needed before/after as \
320 * LLOCK/SCOND thmeselves don't provide any such semantics \
321 */ \
322 smp_mb(); \
323 \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530324 c = atomic_read(v); \
325 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
326 c = old; \
Vineet Gupta2576c282014-11-20 15:42:09 +0530327 \
328 smp_mb(); \
329 \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530330 c; \
331})
332
333#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
334
335#define atomic_inc(v) atomic_add(1, v)
336#define atomic_dec(v) atomic_sub(1, v)
337
338#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
339#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
340#define atomic_inc_return(v) atomic_add_return(1, (v))
341#define atomic_dec_return(v) atomic_sub_return(1, (v))
342#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
343
344#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
345
346#define ATOMIC_INIT(i) { (i) }
347
348#include <asm-generic/atomic64.h>
349
350#endif
351
352#endif