blob: f9b0b09153e0eaa3b15728fd42471c77c2d1955a [file] [log] [blame]
Will Deaconc0385b22015-02-03 12:39:03 +00001/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ASM_ATOMIC_LSE_H
22#define __ASM_ATOMIC_LSE_H
23
24#ifndef __ARM64_IN_ATOMIC_IMPL
25#error "please don't include this file directly"
26#endif
27
Will Deaconc09d6a02015-02-03 16:14:13 +000028#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
Will Deacon6822a842016-04-22 18:01:32 +010029#define ATOMIC_OP(op, asm_op) \
30static inline void atomic_##op(int i, atomic_t *v) \
31{ \
32 register int w0 asm ("w0") = i; \
33 register atomic_t *x1 asm ("x1") = v; \
34 \
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
36" " #asm_op " %w[i], %[v]\n") \
37 : [i] "+r" (w0), [v] "+Q" (v->counter) \
38 : "r" (x1) \
39 : __LL_SC_CLOBBERS); \
Will Deaconc0385b22015-02-03 12:39:03 +000040}
41
Will Deacon6822a842016-04-22 18:01:32 +010042ATOMIC_OP(andnot, stclr)
43ATOMIC_OP(or, stset)
44ATOMIC_OP(xor, steor)
45ATOMIC_OP(add, stadd)
Will Deaconc0385b22015-02-03 12:39:03 +000046
Will Deacon6822a842016-04-22 18:01:32 +010047#undef ATOMIC_OP
Will Deaconc09d6a02015-02-03 16:14:13 +000048
Will Deacon2efe95f2016-04-22 18:01:33 +010049#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
50static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
51{ \
52 register int w0 asm ("w0") = i; \
53 register atomic_t *x1 asm ("x1") = v; \
54 \
55 asm volatile(ARM64_LSE_ATOMIC_INSN( \
56 /* LL/SC */ \
57 __LL_SC_ATOMIC(fetch_##op##name), \
58 /* LSE atomics */ \
59" " #asm_op #mb " %w[i], %w[i], %[v]") \
60 : [i] "+r" (w0), [v] "+Q" (v->counter) \
61 : "r" (x1) \
62 : __LL_SC_CLOBBERS, ##cl); \
63 \
64 return w0; \
65}
66
67#define ATOMIC_FETCH_OPS(op, asm_op) \
68 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
69 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
70 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
71 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
72
73ATOMIC_FETCH_OPS(andnot, ldclr)
74ATOMIC_FETCH_OPS(or, ldset)
75ATOMIC_FETCH_OPS(xor, ldeor)
76ATOMIC_FETCH_OPS(add, ldadd)
77
78#undef ATOMIC_FETCH_OP
79#undef ATOMIC_FETCH_OPS
80
Will Deacon305d4542015-10-08 20:15:18 +010081#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
82static inline int atomic_add_return##name(int i, atomic_t *v) \
83{ \
84 register int w0 asm ("w0") = i; \
85 register atomic_t *x1 asm ("x1") = v; \
86 \
87 asm volatile(ARM64_LSE_ATOMIC_INSN( \
88 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +010089 __LL_SC_ATOMIC(add_return##name) \
90 __nops(1), \
Will Deacon305d4542015-10-08 20:15:18 +010091 /* LSE atomics */ \
92 " ldadd" #mb " %w[i], w30, %[v]\n" \
93 " add %w[i], %w[i], w30") \
94 : [i] "+r" (w0), [v] "+Q" (v->counter) \
95 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +010096 : __LL_SC_CLOBBERS, ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +010097 \
98 return w0; \
Will Deaconc09d6a02015-02-03 16:14:13 +000099}
100
Will Deacon305d4542015-10-08 20:15:18 +0100101ATOMIC_OP_ADD_RETURN(_relaxed, )
102ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
103ATOMIC_OP_ADD_RETURN(_release, l, "memory")
104ATOMIC_OP_ADD_RETURN( , al, "memory")
105
106#undef ATOMIC_OP_ADD_RETURN
107
Will Deaconc09d6a02015-02-03 16:14:13 +0000108static inline void atomic_and(int i, atomic_t *v)
109{
110 register int w0 asm ("w0") = i;
111 register atomic_t *x1 asm ("x1") = v;
112
113 asm volatile(ARM64_LSE_ATOMIC_INSN(
114 /* LL/SC */
Will Deacon05492f22016-09-06 16:42:58 +0100115 __LL_SC_ATOMIC(and)
116 __nops(1),
Will Deaconc09d6a02015-02-03 16:14:13 +0000117 /* LSE atomics */
118 " mvn %w[i], %w[i]\n"
119 " stclr %w[i], %[v]")
Will Deacon32c3fa72018-05-21 17:44:57 +0100120 : [i] "+&r" (w0), [v] "+Q" (v->counter)
Will Deaconc09d6a02015-02-03 16:14:13 +0000121 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100122 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000123}
124
Will Deacon2efe95f2016-04-22 18:01:33 +0100125#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
126static inline int atomic_fetch_and##name(int i, atomic_t *v) \
127{ \
128 register int w0 asm ("w0") = i; \
129 register atomic_t *x1 asm ("x1") = v; \
130 \
131 asm volatile(ARM64_LSE_ATOMIC_INSN( \
132 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100133 __LL_SC_ATOMIC(fetch_and##name) \
134 __nops(1), \
Will Deacon2efe95f2016-04-22 18:01:33 +0100135 /* LSE atomics */ \
136 " mvn %w[i], %w[i]\n" \
137 " ldclr" #mb " %w[i], %w[i], %[v]") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100138 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
Will Deacon2efe95f2016-04-22 18:01:33 +0100139 : "r" (x1) \
140 : __LL_SC_CLOBBERS, ##cl); \
141 \
142 return w0; \
143}
144
145ATOMIC_FETCH_OP_AND(_relaxed, )
146ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
147ATOMIC_FETCH_OP_AND(_release, l, "memory")
148ATOMIC_FETCH_OP_AND( , al, "memory")
149
150#undef ATOMIC_FETCH_OP_AND
151
Will Deaconc09d6a02015-02-03 16:14:13 +0000152static inline void atomic_sub(int i, atomic_t *v)
153{
154 register int w0 asm ("w0") = i;
155 register atomic_t *x1 asm ("x1") = v;
156
157 asm volatile(ARM64_LSE_ATOMIC_INSN(
158 /* LL/SC */
Will Deacon05492f22016-09-06 16:42:58 +0100159 __LL_SC_ATOMIC(sub)
160 __nops(1),
Will Deaconc09d6a02015-02-03 16:14:13 +0000161 /* LSE atomics */
162 " neg %w[i], %w[i]\n"
163 " stadd %w[i], %[v]")
Will Deacon32c3fa72018-05-21 17:44:57 +0100164 : [i] "+&r" (w0), [v] "+Q" (v->counter)
Will Deaconc09d6a02015-02-03 16:14:13 +0000165 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100166 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000167}
168
Will Deacon305d4542015-10-08 20:15:18 +0100169#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
170static inline int atomic_sub_return##name(int i, atomic_t *v) \
171{ \
172 register int w0 asm ("w0") = i; \
173 register atomic_t *x1 asm ("x1") = v; \
174 \
175 asm volatile(ARM64_LSE_ATOMIC_INSN( \
176 /* LL/SC */ \
Will Deacon305d4542015-10-08 20:15:18 +0100177 __LL_SC_ATOMIC(sub_return##name) \
Will Deacon05492f22016-09-06 16:42:58 +0100178 __nops(2), \
Will Deacon305d4542015-10-08 20:15:18 +0100179 /* LSE atomics */ \
180 " neg %w[i], %w[i]\n" \
181 " ldadd" #mb " %w[i], w30, %[v]\n" \
182 " add %w[i], %w[i], w30") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100183 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
Will Deacon305d4542015-10-08 20:15:18 +0100184 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100185 : __LL_SC_CLOBBERS , ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +0100186 \
187 return w0; \
Will Deaconc09d6a02015-02-03 16:14:13 +0000188}
Will Deaconc0385b22015-02-03 12:39:03 +0000189
Will Deacon305d4542015-10-08 20:15:18 +0100190ATOMIC_OP_SUB_RETURN(_relaxed, )
191ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
192ATOMIC_OP_SUB_RETURN(_release, l, "memory")
193ATOMIC_OP_SUB_RETURN( , al, "memory")
194
195#undef ATOMIC_OP_SUB_RETURN
Will Deacon2efe95f2016-04-22 18:01:33 +0100196
197#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
198static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
199{ \
200 register int w0 asm ("w0") = i; \
201 register atomic_t *x1 asm ("x1") = v; \
202 \
203 asm volatile(ARM64_LSE_ATOMIC_INSN( \
204 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100205 __LL_SC_ATOMIC(fetch_sub##name) \
206 __nops(1), \
Will Deacon2efe95f2016-04-22 18:01:33 +0100207 /* LSE atomics */ \
208 " neg %w[i], %w[i]\n" \
209 " ldadd" #mb " %w[i], %w[i], %[v]") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100210 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
Will Deacon2efe95f2016-04-22 18:01:33 +0100211 : "r" (x1) \
212 : __LL_SC_CLOBBERS, ##cl); \
213 \
214 return w0; \
215}
216
217ATOMIC_FETCH_OP_SUB(_relaxed, )
218ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
219ATOMIC_FETCH_OP_SUB(_release, l, "memory")
220ATOMIC_FETCH_OP_SUB( , al, "memory")
221
222#undef ATOMIC_FETCH_OP_SUB
Will Deaconc09d6a02015-02-03 16:14:13 +0000223#undef __LL_SC_ATOMIC
Will Deaconc0385b22015-02-03 12:39:03 +0000224
Will Deaconc09d6a02015-02-03 16:14:13 +0000225#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
Will Deacon6822a842016-04-22 18:01:32 +0100226#define ATOMIC64_OP(op, asm_op) \
227static inline void atomic64_##op(long i, atomic64_t *v) \
228{ \
229 register long x0 asm ("x0") = i; \
230 register atomic64_t *x1 asm ("x1") = v; \
231 \
232 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
233" " #asm_op " %[i], %[v]\n") \
234 : [i] "+r" (x0), [v] "+Q" (v->counter) \
235 : "r" (x1) \
236 : __LL_SC_CLOBBERS); \
Will Deaconc0385b22015-02-03 12:39:03 +0000237}
238
Will Deacon6822a842016-04-22 18:01:32 +0100239ATOMIC64_OP(andnot, stclr)
240ATOMIC64_OP(or, stset)
241ATOMIC64_OP(xor, steor)
242ATOMIC64_OP(add, stadd)
Will Deaconc0385b22015-02-03 12:39:03 +0000243
Will Deacon6822a842016-04-22 18:01:32 +0100244#undef ATOMIC64_OP
Will Deaconc09d6a02015-02-03 16:14:13 +0000245
Will Deacon2efe95f2016-04-22 18:01:33 +0100246#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
247static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
248{ \
249 register long x0 asm ("x0") = i; \
250 register atomic64_t *x1 asm ("x1") = v; \
251 \
252 asm volatile(ARM64_LSE_ATOMIC_INSN( \
253 /* LL/SC */ \
254 __LL_SC_ATOMIC64(fetch_##op##name), \
255 /* LSE atomics */ \
256" " #asm_op #mb " %[i], %[i], %[v]") \
257 : [i] "+r" (x0), [v] "+Q" (v->counter) \
258 : "r" (x1) \
259 : __LL_SC_CLOBBERS, ##cl); \
260 \
261 return x0; \
262}
263
264#define ATOMIC64_FETCH_OPS(op, asm_op) \
265 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
266 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
267 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
268 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
269
270ATOMIC64_FETCH_OPS(andnot, ldclr)
271ATOMIC64_FETCH_OPS(or, ldset)
272ATOMIC64_FETCH_OPS(xor, ldeor)
273ATOMIC64_FETCH_OPS(add, ldadd)
274
275#undef ATOMIC64_FETCH_OP
276#undef ATOMIC64_FETCH_OPS
277
Will Deacon305d4542015-10-08 20:15:18 +0100278#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
279static inline long atomic64_add_return##name(long i, atomic64_t *v) \
280{ \
281 register long x0 asm ("x0") = i; \
282 register atomic64_t *x1 asm ("x1") = v; \
283 \
284 asm volatile(ARM64_LSE_ATOMIC_INSN( \
285 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100286 __LL_SC_ATOMIC64(add_return##name) \
287 __nops(1), \
Will Deacon305d4542015-10-08 20:15:18 +0100288 /* LSE atomics */ \
289 " ldadd" #mb " %[i], x30, %[v]\n" \
290 " add %[i], %[i], x30") \
291 : [i] "+r" (x0), [v] "+Q" (v->counter) \
292 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100293 : __LL_SC_CLOBBERS, ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +0100294 \
295 return x0; \
Will Deaconc09d6a02015-02-03 16:14:13 +0000296}
297
Will Deacon305d4542015-10-08 20:15:18 +0100298ATOMIC64_OP_ADD_RETURN(_relaxed, )
299ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
300ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
301ATOMIC64_OP_ADD_RETURN( , al, "memory")
302
303#undef ATOMIC64_OP_ADD_RETURN
304
Will Deaconc09d6a02015-02-03 16:14:13 +0000305static inline void atomic64_and(long i, atomic64_t *v)
306{
307 register long x0 asm ("x0") = i;
308 register atomic64_t *x1 asm ("x1") = v;
309
310 asm volatile(ARM64_LSE_ATOMIC_INSN(
311 /* LL/SC */
Will Deacon05492f22016-09-06 16:42:58 +0100312 __LL_SC_ATOMIC64(and)
313 __nops(1),
Will Deaconc09d6a02015-02-03 16:14:13 +0000314 /* LSE atomics */
315 " mvn %[i], %[i]\n"
316 " stclr %[i], %[v]")
Will Deacon32c3fa72018-05-21 17:44:57 +0100317 : [i] "+&r" (x0), [v] "+Q" (v->counter)
Will Deaconc09d6a02015-02-03 16:14:13 +0000318 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100319 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000320}
321
Will Deacon2efe95f2016-04-22 18:01:33 +0100322#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
323static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
324{ \
Mark Rutland8997c932017-05-03 16:09:37 +0100325 register long x0 asm ("x0") = i; \
Will Deacon2efe95f2016-04-22 18:01:33 +0100326 register atomic64_t *x1 asm ("x1") = v; \
327 \
328 asm volatile(ARM64_LSE_ATOMIC_INSN( \
329 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100330 __LL_SC_ATOMIC64(fetch_and##name) \
331 __nops(1), \
Will Deacon2efe95f2016-04-22 18:01:33 +0100332 /* LSE atomics */ \
333 " mvn %[i], %[i]\n" \
334 " ldclr" #mb " %[i], %[i], %[v]") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100335 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
Will Deacon2efe95f2016-04-22 18:01:33 +0100336 : "r" (x1) \
337 : __LL_SC_CLOBBERS, ##cl); \
338 \
339 return x0; \
340}
341
342ATOMIC64_FETCH_OP_AND(_relaxed, )
343ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
344ATOMIC64_FETCH_OP_AND(_release, l, "memory")
345ATOMIC64_FETCH_OP_AND( , al, "memory")
346
347#undef ATOMIC64_FETCH_OP_AND
348
Will Deaconc09d6a02015-02-03 16:14:13 +0000349static inline void atomic64_sub(long i, atomic64_t *v)
350{
351 register long x0 asm ("x0") = i;
352 register atomic64_t *x1 asm ("x1") = v;
353
354 asm volatile(ARM64_LSE_ATOMIC_INSN(
355 /* LL/SC */
Will Deacon05492f22016-09-06 16:42:58 +0100356 __LL_SC_ATOMIC64(sub)
357 __nops(1),
Will Deaconc09d6a02015-02-03 16:14:13 +0000358 /* LSE atomics */
359 " neg %[i], %[i]\n"
360 " stadd %[i], %[v]")
Will Deacon32c3fa72018-05-21 17:44:57 +0100361 : [i] "+&r" (x0), [v] "+Q" (v->counter)
Will Deaconc09d6a02015-02-03 16:14:13 +0000362 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100363 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000364}
365
Will Deacon305d4542015-10-08 20:15:18 +0100366#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
367static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
368{ \
369 register long x0 asm ("x0") = i; \
370 register atomic64_t *x1 asm ("x1") = v; \
371 \
372 asm volatile(ARM64_LSE_ATOMIC_INSN( \
373 /* LL/SC */ \
Will Deacon305d4542015-10-08 20:15:18 +0100374 __LL_SC_ATOMIC64(sub_return##name) \
Will Deacon05492f22016-09-06 16:42:58 +0100375 __nops(2), \
Will Deacon305d4542015-10-08 20:15:18 +0100376 /* LSE atomics */ \
377 " neg %[i], %[i]\n" \
378 " ldadd" #mb " %[i], x30, %[v]\n" \
379 " add %[i], %[i], x30") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100380 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
Will Deacon305d4542015-10-08 20:15:18 +0100381 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100382 : __LL_SC_CLOBBERS, ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +0100383 \
384 return x0; \
Will Deaconc09d6a02015-02-03 16:14:13 +0000385}
Will Deaconc0385b22015-02-03 12:39:03 +0000386
Will Deacon305d4542015-10-08 20:15:18 +0100387ATOMIC64_OP_SUB_RETURN(_relaxed, )
388ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
389ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
390ATOMIC64_OP_SUB_RETURN( , al, "memory")
391
392#undef ATOMIC64_OP_SUB_RETURN
393
Will Deacon2efe95f2016-04-22 18:01:33 +0100394#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
395static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
396{ \
Mark Rutland8997c932017-05-03 16:09:37 +0100397 register long x0 asm ("x0") = i; \
Will Deacon2efe95f2016-04-22 18:01:33 +0100398 register atomic64_t *x1 asm ("x1") = v; \
399 \
400 asm volatile(ARM64_LSE_ATOMIC_INSN( \
401 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100402 __LL_SC_ATOMIC64(fetch_sub##name) \
403 __nops(1), \
Will Deacon2efe95f2016-04-22 18:01:33 +0100404 /* LSE atomics */ \
405 " neg %[i], %[i]\n" \
406 " ldadd" #mb " %[i], %[i], %[v]") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100407 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
Will Deacon2efe95f2016-04-22 18:01:33 +0100408 : "r" (x1) \
409 : __LL_SC_CLOBBERS, ##cl); \
410 \
411 return x0; \
412}
413
414ATOMIC64_FETCH_OP_SUB(_relaxed, )
415ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
416ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
417ATOMIC64_FETCH_OP_SUB( , al, "memory")
418
419#undef ATOMIC64_FETCH_OP_SUB
420
Will Deaconc0385b22015-02-03 12:39:03 +0000421static inline long atomic64_dec_if_positive(atomic64_t *v)
422{
Will Deaconc09d6a02015-02-03 16:14:13 +0000423 register long x0 asm ("x0") = (long)v;
Will Deaconc0385b22015-02-03 12:39:03 +0000424
Will Deaconc09d6a02015-02-03 16:14:13 +0000425 asm volatile(ARM64_LSE_ATOMIC_INSN(
426 /* LL/SC */
Will Deaconc09d6a02015-02-03 16:14:13 +0000427 __LL_SC_ATOMIC64(dec_if_positive)
Will Deacon05492f22016-09-06 16:42:58 +0100428 __nops(6),
Will Deaconc09d6a02015-02-03 16:14:13 +0000429 /* LSE atomics */
430 "1: ldr x30, %[v]\n"
431 " subs %[ret], x30, #1\n"
Will Deacondb262172015-05-29 14:44:06 +0100432 " b.lt 2f\n"
Will Deaconc09d6a02015-02-03 16:14:13 +0000433 " casal x30, %[ret], %[v]\n"
434 " sub x30, x30, #1\n"
435 " sub x30, x30, %[ret]\n"
436 " cbnz x30, 1b\n"
437 "2:")
Will Deacon32c3fa72018-05-21 17:44:57 +0100438 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
Will Deaconc0385b22015-02-03 12:39:03 +0000439 :
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100440 : __LL_SC_CLOBBERS, "cc", "memory");
Will Deaconc0385b22015-02-03 12:39:03 +0000441
442 return x0;
443}
444
Will Deaconc09d6a02015-02-03 16:14:13 +0000445#undef __LL_SC_ATOMIC64
446
Will Deaconc342f782015-04-23 20:08:49 +0100447#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
448
449#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
450static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
451 unsigned long old, \
452 unsigned long new) \
453{ \
454 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
455 register unsigned long x1 asm ("x1") = old; \
456 register unsigned long x2 asm ("x2") = new; \
457 \
458 asm volatile(ARM64_LSE_ATOMIC_INSN( \
459 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100460 __LL_SC_CMPXCHG(name) \
461 __nops(2), \
Will Deaconc342f782015-04-23 20:08:49 +0100462 /* LSE atomics */ \
463 " mov " #w "30, %" #w "[old]\n" \
464 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
465 " mov %" #w "[ret], " #w "30") \
466 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
467 : [old] "r" (x1), [new] "r" (x2) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100468 : __LL_SC_CLOBBERS, ##cl); \
Will Deaconc342f782015-04-23 20:08:49 +0100469 \
470 return x0; \
471}
472
Will Deacon305d4542015-10-08 20:15:18 +0100473__CMPXCHG_CASE(w, b, 1, )
474__CMPXCHG_CASE(w, h, 2, )
475__CMPXCHG_CASE(w, , 4, )
476__CMPXCHG_CASE(x, , 8, )
477__CMPXCHG_CASE(w, b, acq_1, a, "memory")
478__CMPXCHG_CASE(w, h, acq_2, a, "memory")
479__CMPXCHG_CASE(w, , acq_4, a, "memory")
480__CMPXCHG_CASE(x, , acq_8, a, "memory")
481__CMPXCHG_CASE(w, b, rel_1, l, "memory")
482__CMPXCHG_CASE(w, h, rel_2, l, "memory")
483__CMPXCHG_CASE(w, , rel_4, l, "memory")
484__CMPXCHG_CASE(x, , rel_8, l, "memory")
485__CMPXCHG_CASE(w, b, mb_1, al, "memory")
486__CMPXCHG_CASE(w, h, mb_2, al, "memory")
487__CMPXCHG_CASE(w, , mb_4, al, "memory")
488__CMPXCHG_CASE(x, , mb_8, al, "memory")
Will Deaconc342f782015-04-23 20:08:49 +0100489
490#undef __LL_SC_CMPXCHG
491#undef __CMPXCHG_CASE
492
Will Deacone9a4b792015-05-14 18:05:50 +0100493#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
494
495#define __CMPXCHG_DBL(name, mb, cl...) \
Lorenzo Pieralisi57a65662015-11-05 14:00:56 +0000496static inline long __cmpxchg_double##name(unsigned long old1, \
Will Deacone9a4b792015-05-14 18:05:50 +0100497 unsigned long old2, \
498 unsigned long new1, \
499 unsigned long new2, \
500 volatile void *ptr) \
501{ \
502 unsigned long oldval1 = old1; \
503 unsigned long oldval2 = old2; \
504 register unsigned long x0 asm ("x0") = old1; \
505 register unsigned long x1 asm ("x1") = old2; \
506 register unsigned long x2 asm ("x2") = new1; \
507 register unsigned long x3 asm ("x3") = new2; \
508 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
509 \
510 asm volatile(ARM64_LSE_ATOMIC_INSN( \
511 /* LL/SC */ \
Will Deacon05492f22016-09-06 16:42:58 +0100512 __LL_SC_CMPXCHG_DBL(name) \
513 __nops(3), \
Will Deacone9a4b792015-05-14 18:05:50 +0100514 /* LSE atomics */ \
515 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
516 " eor %[old1], %[old1], %[oldval1]\n" \
517 " eor %[old2], %[old2], %[oldval2]\n" \
518 " orr %[old1], %[old1], %[old2]") \
Will Deacon32c3fa72018-05-21 17:44:57 +0100519 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
Will Deacone9a4b792015-05-14 18:05:50 +0100520 [v] "+Q" (*(unsigned long *)ptr) \
521 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
522 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100523 : __LL_SC_CLOBBERS, ##cl); \
Will Deacone9a4b792015-05-14 18:05:50 +0100524 \
525 return x0; \
526}
527
528__CMPXCHG_DBL( , )
529__CMPXCHG_DBL(_mb, al, "memory")
530
531#undef __LL_SC_CMPXCHG_DBL
532#undef __CMPXCHG_DBL
533
Will Deaconc0385b22015-02-03 12:39:03 +0000534#endif /* __ASM_ATOMIC_LSE_H */