blob: 5f3dcbbc0cc9c14df199c55ff9e916b64be3be0d [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
Vineet Gupta14e968b2013-01-18 15:12:16 +053012#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
Noam Camusa5a10d92015-05-16 17:49:35 +030020#ifndef CONFIG_ARC_PLAT_EZNPS
21
Peter Zijlstra62e8a322015-09-18 11:13:10 +020022#define atomic_read(v) READ_ONCE((v)->counter)
Vineet Gupta14e968b2013-01-18 15:12:16 +053023
24#ifdef CONFIG_ARC_HAS_LLSC
25
Peter Zijlstra62e8a322015-09-18 11:13:10 +020026#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +053027
Vineet Guptae78fdfe2015-07-14 19:50:18 +053028#ifdef CONFIG_ARC_STAR_9000923308
29
30#define SCOND_FAIL_RETRY_VAR_DEF \
31 unsigned int delay = 1, tmp; \
32
33#define SCOND_FAIL_RETRY_ASM \
34 " bz 4f \n" \
35 " ; --- scond fail delay --- \n" \
36 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
37 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
38 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
Vineet Gupta10971632015-08-07 13:01:39 +053039 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053040 " b 1b \n" /* start over */ \
41 "4: ; --- success --- \n" \
42
43#define SCOND_FAIL_RETRY_VARS \
44 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
45
46#else /* !CONFIG_ARC_STAR_9000923308 */
47
48#define SCOND_FAIL_RETRY_VAR_DEF
49
50#define SCOND_FAIL_RETRY_ASM \
51 " bnz 1b \n" \
52
53#define SCOND_FAIL_RETRY_VARS
54
55#endif
56
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010057#define ATOMIC_OP(op, c_op, asm_op) \
58static inline void atomic_##op(int i, atomic_t *v) \
59{ \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053060 unsigned int val; \
61 SCOND_FAIL_RETRY_VAR_DEF \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010062 \
63 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030064 "1: llock %[val], [%[ctr]] \n" \
65 " " #asm_op " %[val], %[val], %[i] \n" \
66 " scond %[val], [%[ctr]] \n" \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053067 " \n" \
68 SCOND_FAIL_RETRY_ASM \
69 \
Vineet Gupta8ac06652015-07-21 12:05:42 +030070 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053071 SCOND_FAIL_RETRY_VARS \
Vineet Gupta8ac06652015-07-21 12:05:42 +030072 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
73 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010074 : "cc"); \
75} \
Vineet Gupta14e968b2013-01-18 15:12:16 +053076
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010077#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
78static inline int atomic_##op##_return(int i, atomic_t *v) \
79{ \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053080 unsigned int val; \
81 SCOND_FAIL_RETRY_VAR_DEF \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010082 \
Vineet Gupta2576c282014-11-20 15:42:09 +053083 /* \
84 * Explicit full memory barrier needed before/after as \
85 * LLOCK/SCOND thmeselves don't provide any such semantics \
86 */ \
87 smp_mb(); \
88 \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010089 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030090 "1: llock %[val], [%[ctr]] \n" \
91 " " #asm_op " %[val], %[val], %[i] \n" \
92 " scond %[val], [%[ctr]] \n" \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053093 " \n" \
94 SCOND_FAIL_RETRY_ASM \
95 \
Vineet Gupta8ac06652015-07-21 12:05:42 +030096 : [val] "=&r" (val) \
Vineet Guptae78fdfe2015-07-14 19:50:18 +053097 SCOND_FAIL_RETRY_VARS \
Vineet Gupta8ac06652015-07-21 12:05:42 +030098 : [ctr] "r" (&v->counter), \
99 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100100 : "cc"); \
101 \
Vineet Gupta2576c282014-11-20 15:42:09 +0530102 smp_mb(); \
103 \
Vineet Gupta8ac06652015-07-21 12:05:42 +0300104 return val; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530105}
106
107#else /* !CONFIG_ARC_HAS_LLSC */
108
109#ifndef CONFIG_SMP
110
111 /* violating atomic_xxx API locking protocol in UP for optimization sake */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200112#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +0530113
114#else
115
116static inline void atomic_set(atomic_t *v, int i)
117{
118 /*
119 * Independent of hardware support, all of the atomic_xxx() APIs need
120 * to follow the same locking rules to make sure that a "hardware"
121 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
122 * sequence
123 *
124 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
125 * requires the locking.
126 */
127 unsigned long flags;
128
129 atomic_ops_lock(flags);
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200130 WRITE_ONCE(v->counter, i);
Vineet Gupta14e968b2013-01-18 15:12:16 +0530131 atomic_ops_unlock(flags);
132}
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100133
Vineet Gupta14e968b2013-01-18 15:12:16 +0530134#endif
135
136/*
137 * Non hardware assisted Atomic-R-M-W
138 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
139 */
140
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100141#define ATOMIC_OP(op, c_op, asm_op) \
142static inline void atomic_##op(int i, atomic_t *v) \
143{ \
144 unsigned long flags; \
145 \
146 atomic_ops_lock(flags); \
147 v->counter c_op i; \
148 atomic_ops_unlock(flags); \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530149}
150
Vineet Guptadaaf40e2015-05-10 12:04:01 +0530151#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100152static inline int atomic_##op##_return(int i, atomic_t *v) \
153{ \
154 unsigned long flags; \
155 unsigned long temp; \
156 \
Vineet Gupta2576c282014-11-20 15:42:09 +0530157 /* \
158 * spin lock/unlock provides the needed smp_mb() before/after \
159 */ \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100160 atomic_ops_lock(flags); \
161 temp = v->counter; \
162 temp c_op i; \
163 v->counter = temp; \
164 atomic_ops_unlock(flags); \
165 \
166 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530167}
168
169#endif /* !CONFIG_ARC_HAS_LLSC */
170
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100171#define ATOMIC_OPS(op, c_op, asm_op) \
172 ATOMIC_OP(op, c_op, asm_op) \
173 ATOMIC_OP_RETURN(op, c_op, asm_op)
174
175ATOMIC_OPS(add, +=, add)
176ATOMIC_OPS(sub, -=, sub)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100177
Peter Zijlstracda7e412014-04-23 20:06:20 +0200178#define atomic_andnot atomic_andnot
179
180ATOMIC_OP(and, &=, and)
181ATOMIC_OP(andnot, &= ~, bic)
182ATOMIC_OP(or, |=, or)
183ATOMIC_OP(xor, ^=, xor)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100184
Vineet Guptae78fdfe2015-07-14 19:50:18 +0530185#undef SCOND_FAIL_RETRY_VAR_DEF
186#undef SCOND_FAIL_RETRY_ASM
187#undef SCOND_FAIL_RETRY_VARS
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100188
Noam Camusa5a10d92015-05-16 17:49:35 +0300189#else /* CONFIG_ARC_PLAT_EZNPS */
190
191static inline int atomic_read(const atomic_t *v)
192{
193 int temp;
194
195 __asm__ __volatile__(
196 " ld.di %0, [%1]"
197 : "=r"(temp)
198 : "r"(&v->counter)
199 : "memory");
200 return temp;
201}
202
203static inline void atomic_set(atomic_t *v, int i)
204{
205 __asm__ __volatile__(
206 " st.di %0,[%1]"
207 :
208 : "r"(i), "r"(&v->counter)
209 : "memory");
210}
211
212#define ATOMIC_OP(op, c_op, asm_op) \
213static inline void atomic_##op(int i, atomic_t *v) \
214{ \
215 __asm__ __volatile__( \
216 " mov r2, %0\n" \
217 " mov r3, %1\n" \
218 " .word %2\n" \
219 : \
220 : "r"(i), "r"(&v->counter), "i"(asm_op) \
221 : "r2", "r3", "memory"); \
222} \
223
224#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
225static inline int atomic_##op##_return(int i, atomic_t *v) \
226{ \
227 unsigned int temp = i; \
228 \
229 /* Explicit full memory barrier needed before/after */ \
230 smp_mb(); \
231 \
232 __asm__ __volatile__( \
233 " mov r2, %0\n" \
234 " mov r3, %1\n" \
235 " .word %2\n" \
236 " mov %0, r2" \
237 : "+r"(temp) \
238 : "r"(&v->counter), "i"(asm_op) \
239 : "r2", "r3", "memory"); \
240 \
241 smp_mb(); \
242 \
243 temp c_op i; \
244 \
245 return temp; \
246}
247
248#define ATOMIC_OPS(op, c_op, asm_op) \
249 ATOMIC_OP(op, c_op, asm_op) \
250 ATOMIC_OP_RETURN(op, c_op, asm_op)
251
252ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
253#define atomic_sub(i, v) atomic_add(-(i), (v))
254#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
255
256ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
257#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
258ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
259ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
260
261#endif /* CONFIG_ARC_PLAT_EZNPS */
262
263#undef ATOMIC_OPS
264#undef ATOMIC_OP_RETURN
265#undef ATOMIC_OP
266
Vineet Gupta14e968b2013-01-18 15:12:16 +0530267/**
268 * __atomic_add_unless - add unless the number is a given value
269 * @v: pointer of type atomic_t
270 * @a: the amount to add to v...
271 * @u: ...unless v is equal to u.
272 *
273 * Atomically adds @a to @v, so long as it was not @u.
274 * Returns the old value of @v
275 */
276#define __atomic_add_unless(v, a, u) \
277({ \
278 int c, old; \
Vineet Gupta2576c282014-11-20 15:42:09 +0530279 \
280 /* \
281 * Explicit full memory barrier needed before/after as \
282 * LLOCK/SCOND thmeselves don't provide any such semantics \
283 */ \
284 smp_mb(); \
285 \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530286 c = atomic_read(v); \
287 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
288 c = old; \
Vineet Gupta2576c282014-11-20 15:42:09 +0530289 \
290 smp_mb(); \
291 \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530292 c; \
293})
294
295#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
296
297#define atomic_inc(v) atomic_add(1, v)
298#define atomic_dec(v) atomic_sub(1, v)
299
300#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
301#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
302#define atomic_inc_return(v) atomic_add_return(1, (v))
303#define atomic_dec_return(v) atomic_sub_return(1, (v))
304#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
305
306#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
307
308#define ATOMIC_INIT(i) { (i) }
309
310#include <asm-generic/atomic64.h>
311
312#endif
313
314#endif