blob: 66d0e215a773cb66d3baaa5a08cbc91056fe00be [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/atomic.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
Russell King8dc39b82005-11-16 17:23:57 +000014#include <linux/compiler.h>
Will Deaconf38d9992013-07-04 11:43:18 +010015#include <linux/prefetch.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080016#include <linux/types.h>
David Howells9f97da72012-03-28 18:30:01 +010017#include <linux/irqflags.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define ATOMIC_INIT(i) { (i) }
22
23#ifdef __KERNEL__
24
Catalin Marinas200b8122009-09-18 23:27:05 +010025/*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020030#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#if __LINUX_ARM_ARCH__ >= 6
34
35/*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
Catalin Marinas200b8122009-09-18 23:27:05 +010038 * to ensure that the update happens.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
Russell Kingbac4e962009-05-25 20:58:00 +010040
Peter Zijlstraaee9a552014-03-23 16:38:18 +010041#define ATOMIC_OP(op, c_op, asm_op) \
42static inline void atomic_##op(int i, atomic_t *v) \
43{ \
44 unsigned long tmp; \
45 int result; \
46 \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49"1: ldrex %0, [%3]\n" \
50" " #asm_op " %0, %0, %4\n" \
51" strex %1, %0, [%3]\n" \
52" teq %1, #0\n" \
53" bne 1b" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
56 : "cc"); \
57} \
Russell Kingbac4e962009-05-25 20:58:00 +010058
Peter Zijlstraaee9a552014-03-23 16:38:18 +010059#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Will Deacon0ca326d2015-08-06 17:54:44 +010060static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010061{ \
62 unsigned long tmp; \
63 int result; \
64 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010065 prefetchw(&v->counter); \
66 \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68"1: ldrex %0, [%3]\n" \
69" " #asm_op " %0, %0, %4\n" \
70" strex %1, %0, [%3]\n" \
71" teq %1, #0\n" \
72" bne 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
75 : "cc"); \
76 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010077 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
Peter Zijlstra6da068c2016-04-18 01:10:52 +020080#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
81static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
82{ \
83 unsigned long tmp; \
84 int result, val; \
85 \
86 prefetchw(&v->counter); \
87 \
88 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
89"1: ldrex %0, [%4]\n" \
90" " #asm_op " %1, %0, %5\n" \
91" strex %2, %1, [%4]\n" \
92" teq %2, #0\n" \
93" bne 1b" \
94 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
95 : "r" (&v->counter), "Ir" (i) \
96 : "cc"); \
97 \
98 return result; \
99}
100
Will Deacon0ca326d2015-08-06 17:54:44 +0100101#define atomic_add_return_relaxed atomic_add_return_relaxed
102#define atomic_sub_return_relaxed atomic_sub_return_relaxed
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200103#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
104#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
105
106#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
107#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
108#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
109#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
Will Deacon0ca326d2015-08-06 17:54:44 +0100110
111static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800112{
Chen Gang4dcc1cf2013-10-26 15:07:25 +0100113 int oldval;
114 unsigned long res;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800115
Will Deaconc32ffce2014-02-21 17:01:48 +0100116 prefetchw(&ptr->counter);
Russell Kingbac4e962009-05-25 20:58:00 +0100117
Nick Piggin4a6dae62005-11-13 16:07:24 -0800118 do {
119 __asm__ __volatile__("@ atomic_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100120 "ldrex %1, [%3]\n"
Nicolas Pitrea7d06832005-11-16 15:05:11 +0000121 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100122 "teq %1, %4\n"
123 "strexeq %0, %5, [%3]\n"
124 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800125 : "r" (&ptr->counter), "Ir" (old), "r" (new)
126 : "cc");
127 } while (res);
128
129 return oldval;
130}
Will Deacon0ca326d2015-08-06 17:54:44 +0100131#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
Nick Piggin4a6dae62005-11-13 16:07:24 -0800132
Will Deacondb38ee82014-02-21 17:01:48 +0100133static inline int __atomic_add_unless(atomic_t *v, int a, int u)
134{
135 int oldval, newval;
136 unsigned long tmp;
137
138 smp_mb();
139 prefetchw(&v->counter);
140
141 __asm__ __volatile__ ("@ atomic_add_unless\n"
142"1: ldrex %0, [%4]\n"
143" teq %0, %5\n"
144" beq 2f\n"
145" add %1, %0, %6\n"
146" strex %2, %1, [%4]\n"
147" teq %2, #0\n"
148" bne 1b\n"
149"2:"
150 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 : "r" (&v->counter), "r" (u), "r" (a)
152 : "cc");
153
154 if (oldval != u)
155 smp_mb();
156
157 return oldval;
158}
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160#else /* ARM_ARCH_6 */
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#ifdef CONFIG_SMP
163#error SMP not supported on pre-ARMv6 CPUs
164#endif
165
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100166#define ATOMIC_OP(op, c_op, asm_op) \
167static inline void atomic_##op(int i, atomic_t *v) \
168{ \
169 unsigned long flags; \
170 \
171 raw_local_irq_save(flags); \
172 v->counter c_op i; \
173 raw_local_irq_restore(flags); \
174} \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100176#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
177static inline int atomic_##op##_return(int i, atomic_t *v) \
178{ \
179 unsigned long flags; \
180 int val; \
181 \
182 raw_local_irq_save(flags); \
183 v->counter c_op i; \
184 val = v->counter; \
185 raw_local_irq_restore(flags); \
186 \
187 return val; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200190#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
191static inline int atomic_fetch_##op(int i, atomic_t *v) \
192{ \
193 unsigned long flags; \
194 int val; \
195 \
196 raw_local_irq_save(flags); \
197 val = v->counter; \
198 v->counter c_op i; \
199 raw_local_irq_restore(flags); \
200 \
201 return val; \
202}
203
Nick Piggin4a6dae62005-11-13 16:07:24 -0800204static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
205{
206 int ret;
207 unsigned long flags;
208
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100209 raw_local_irq_save(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800210 ret = v->counter;
211 if (likely(ret == old))
212 v->counter = new;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100213 raw_local_irq_restore(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800214
215 return ret;
216}
217
Arun Sharmaf24219b2011-07-26 16:09:07 -0700218static inline int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Piggin8426e1f2005-11-13 16:07:25 -0800219{
220 int c, old;
221
222 c = atomic_read(v);
223 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
224 c = old;
Arun Sharmaf24219b2011-07-26 16:09:07 -0700225 return c;
Nick Piggin8426e1f2005-11-13 16:07:25 -0800226}
Nick Piggin8426e1f2005-11-13 16:07:25 -0800227
Will Deacondb38ee82014-02-21 17:01:48 +0100228#endif /* __LINUX_ARM_ARCH__ */
229
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100230#define ATOMIC_OPS(op, c_op, asm_op) \
231 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200232 ATOMIC_OP_RETURN(op, c_op, asm_op) \
233 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100234
235ATOMIC_OPS(add, +=, add)
236ATOMIC_OPS(sub, -=, sub)
237
Peter Zijlstra12589792014-04-23 20:04:39 +0200238#define atomic_andnot atomic_andnot
239
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200240#undef ATOMIC_OPS
241#define ATOMIC_OPS(op, c_op, asm_op) \
242 ATOMIC_OP(op, c_op, asm_op) \
243 ATOMIC_FETCH_OP(op, c_op, asm_op)
244
245ATOMIC_OPS(and, &=, and)
246ATOMIC_OPS(andnot, &= ~, bic)
247ATOMIC_OPS(or, |=, orr)
248ATOMIC_OPS(xor, ^=, eor)
Peter Zijlstra12589792014-04-23 20:04:39 +0200249
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100250#undef ATOMIC_OPS
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200251#undef ATOMIC_FETCH_OP
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100252#undef ATOMIC_OP_RETURN
253#undef ATOMIC_OP
254
Will Deacondb38ee82014-02-21 17:01:48 +0100255#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
256
Russell Kingbac4e962009-05-25 20:58:00 +0100257#define atomic_inc(v) atomic_add(1, v)
258#define atomic_dec(v) atomic_sub(1, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
261#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
Will Deacon6e490b02015-10-07 15:10:38 +0100262#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
263#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
265
266#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
267
Will Deacon24b44a62010-01-20 19:05:07 +0100268#ifndef CONFIG_GENERIC_ATOMIC64
269typedef struct {
Chen Gang237f1232013-10-26 15:07:04 +0100270 long long counter;
Will Deacon24b44a62010-01-20 19:05:07 +0100271} atomic64_t;
272
273#define ATOMIC64_INIT(i) { (i) }
274
Will Deacon4fd75912013-03-28 11:25:03 +0100275#ifdef CONFIG_ARM_LPAE
Chen Gang237f1232013-10-26 15:07:04 +0100276static inline long long atomic64_read(const atomic64_t *v)
Will Deacon4fd75912013-03-28 11:25:03 +0100277{
Chen Gang237f1232013-10-26 15:07:04 +0100278 long long result;
Will Deacon4fd75912013-03-28 11:25:03 +0100279
280 __asm__ __volatile__("@ atomic64_read\n"
281" ldrd %0, %H0, [%1]"
282 : "=&r" (result)
283 : "r" (&v->counter), "Qo" (v->counter)
284 );
285
286 return result;
287}
288
Chen Gang237f1232013-10-26 15:07:04 +0100289static inline void atomic64_set(atomic64_t *v, long long i)
Will Deacon4fd75912013-03-28 11:25:03 +0100290{
291 __asm__ __volatile__("@ atomic64_set\n"
292" strd %2, %H2, [%1]"
293 : "=Qo" (v->counter)
294 : "r" (&v->counter), "r" (i)
295 );
296}
297#else
Chen Gang237f1232013-10-26 15:07:04 +0100298static inline long long atomic64_read(const atomic64_t *v)
Will Deacon24b44a62010-01-20 19:05:07 +0100299{
Chen Gang237f1232013-10-26 15:07:04 +0100300 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100301
302 __asm__ __volatile__("@ atomic64_read\n"
303" ldrexd %0, %H0, [%1]"
304 : "=&r" (result)
Will Deacon398aa662010-07-08 10:59:16 +0100305 : "r" (&v->counter), "Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100306 );
307
308 return result;
309}
310
Chen Gang237f1232013-10-26 15:07:04 +0100311static inline void atomic64_set(atomic64_t *v, long long i)
Will Deacon24b44a62010-01-20 19:05:07 +0100312{
Chen Gang237f1232013-10-26 15:07:04 +0100313 long long tmp;
Will Deacon24b44a62010-01-20 19:05:07 +0100314
Will Deaconf38d9992013-07-04 11:43:18 +0100315 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100316 __asm__ __volatile__("@ atomic64_set\n"
Will Deacon398aa662010-07-08 10:59:16 +0100317"1: ldrexd %0, %H0, [%2]\n"
318" strexd %0, %3, %H3, [%2]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100319" teq %0, #0\n"
320" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100321 : "=&r" (tmp), "=Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100322 : "r" (&v->counter), "r" (i)
323 : "cc");
324}
Will Deacon4fd75912013-03-28 11:25:03 +0100325#endif
Will Deacon24b44a62010-01-20 19:05:07 +0100326
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100327#define ATOMIC64_OP(op, op1, op2) \
328static inline void atomic64_##op(long long i, atomic64_t *v) \
329{ \
330 long long result; \
331 unsigned long tmp; \
332 \
333 prefetchw(&v->counter); \
334 __asm__ __volatile__("@ atomic64_" #op "\n" \
335"1: ldrexd %0, %H0, [%3]\n" \
336" " #op1 " %Q0, %Q0, %Q4\n" \
337" " #op2 " %R0, %R0, %R4\n" \
338" strexd %1, %0, %H0, [%3]\n" \
339" teq %1, #0\n" \
340" bne 1b" \
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
342 : "r" (&v->counter), "r" (i) \
343 : "cc"); \
344} \
Will Deacon24b44a62010-01-20 19:05:07 +0100345
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100346#define ATOMIC64_OP_RETURN(op, op1, op2) \
Will Deacon0ca326d2015-08-06 17:54:44 +0100347static inline long long \
348atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100349{ \
350 long long result; \
351 unsigned long tmp; \
352 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100353 prefetchw(&v->counter); \
354 \
355 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
356"1: ldrexd %0, %H0, [%3]\n" \
357" " #op1 " %Q0, %Q0, %Q4\n" \
358" " #op2 " %R0, %R0, %R4\n" \
359" strexd %1, %0, %H0, [%3]\n" \
360" teq %1, #0\n" \
361" bne 1b" \
362 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
363 : "r" (&v->counter), "r" (i) \
364 : "cc"); \
365 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100366 return result; \
Will Deacon24b44a62010-01-20 19:05:07 +0100367}
368
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200369#define ATOMIC64_FETCH_OP(op, op1, op2) \
370static inline long long \
371atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
372{ \
373 long long result, val; \
374 unsigned long tmp; \
375 \
376 prefetchw(&v->counter); \
377 \
378 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
379"1: ldrexd %0, %H0, [%4]\n" \
380" " #op1 " %Q1, %Q0, %Q5\n" \
381" " #op2 " %R1, %R0, %R5\n" \
382" strexd %2, %1, %H1, [%4]\n" \
383" teq %2, #0\n" \
384" bne 1b" \
385 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
386 : "r" (&v->counter), "r" (i) \
387 : "cc"); \
388 \
389 return result; \
390}
391
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100392#define ATOMIC64_OPS(op, op1, op2) \
393 ATOMIC64_OP(op, op1, op2) \
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200394 ATOMIC64_OP_RETURN(op, op1, op2) \
395 ATOMIC64_FETCH_OP(op, op1, op2)
Will Deacon24b44a62010-01-20 19:05:07 +0100396
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100397ATOMIC64_OPS(add, adds, adc)
398ATOMIC64_OPS(sub, subs, sbc)
Will Deacon24b44a62010-01-20 19:05:07 +0100399
Will Deacon0ca326d2015-08-06 17:54:44 +0100400#define atomic64_add_return_relaxed atomic64_add_return_relaxed
401#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200402#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
403#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
404
405#undef ATOMIC64_OPS
406#define ATOMIC64_OPS(op, op1, op2) \
407 ATOMIC64_OP(op, op1, op2) \
408 ATOMIC64_FETCH_OP(op, op1, op2)
Will Deacon0ca326d2015-08-06 17:54:44 +0100409
Peter Zijlstra12589792014-04-23 20:04:39 +0200410#define atomic64_andnot atomic64_andnot
411
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200412ATOMIC64_OPS(and, and, and)
413ATOMIC64_OPS(andnot, bic, bic)
414ATOMIC64_OPS(or, orr, orr)
415ATOMIC64_OPS(xor, eor, eor)
416
417#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
418#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
419#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
420#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
Peter Zijlstra12589792014-04-23 20:04:39 +0200421
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100422#undef ATOMIC64_OPS
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200423#undef ATOMIC64_FETCH_OP
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100424#undef ATOMIC64_OP_RETURN
425#undef ATOMIC64_OP
Will Deacon24b44a62010-01-20 19:05:07 +0100426
Will Deacon0ca326d2015-08-06 17:54:44 +0100427static inline long long
428atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
Will Deacon24b44a62010-01-20 19:05:07 +0100429{
Chen Gang237f1232013-10-26 15:07:04 +0100430 long long oldval;
Will Deacon24b44a62010-01-20 19:05:07 +0100431 unsigned long res;
432
Will Deaconc32ffce2014-02-21 17:01:48 +0100433 prefetchw(&ptr->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100434
435 do {
436 __asm__ __volatile__("@ atomic64_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100437 "ldrexd %1, %H1, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100438 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100439 "teq %1, %4\n"
440 "teqeq %H1, %H4\n"
441 "strexdeq %0, %5, %H5, [%3]"
442 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100443 : "r" (&ptr->counter), "r" (old), "r" (new)
444 : "cc");
445 } while (res);
446
Will Deacon24b44a62010-01-20 19:05:07 +0100447 return oldval;
448}
Will Deacon0ca326d2015-08-06 17:54:44 +0100449#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
Will Deacon24b44a62010-01-20 19:05:07 +0100450
Will Deacon0ca326d2015-08-06 17:54:44 +0100451static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
Will Deacon24b44a62010-01-20 19:05:07 +0100452{
Chen Gang237f1232013-10-26 15:07:04 +0100453 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100454 unsigned long tmp;
455
Will Deaconc32ffce2014-02-21 17:01:48 +0100456 prefetchw(&ptr->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100457
458 __asm__ __volatile__("@ atomic64_xchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100459"1: ldrexd %0, %H0, [%3]\n"
460" strexd %1, %4, %H4, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100461" teq %1, #0\n"
462" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100463 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100464 : "r" (&ptr->counter), "r" (new)
465 : "cc");
466
Will Deacon24b44a62010-01-20 19:05:07 +0100467 return result;
468}
Will Deacon0ca326d2015-08-06 17:54:44 +0100469#define atomic64_xchg_relaxed atomic64_xchg_relaxed
Will Deacon24b44a62010-01-20 19:05:07 +0100470
Chen Gang237f1232013-10-26 15:07:04 +0100471static inline long long atomic64_dec_if_positive(atomic64_t *v)
Will Deacon24b44a62010-01-20 19:05:07 +0100472{
Chen Gang237f1232013-10-26 15:07:04 +0100473 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100474 unsigned long tmp;
475
476 smp_mb();
Will Deaconc32ffce2014-02-21 17:01:48 +0100477 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100478
479 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
Will Deacon398aa662010-07-08 10:59:16 +0100480"1: ldrexd %0, %H0, [%3]\n"
Victor Kamensky2245f922013-07-26 09:28:53 -0700481" subs %Q0, %Q0, #1\n"
482" sbc %R0, %R0, #0\n"
483" teq %R0, #0\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100484" bmi 2f\n"
Will Deacon398aa662010-07-08 10:59:16 +0100485" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100486" teq %1, #0\n"
487" bne 1b\n"
488"2:"
Will Deacon398aa662010-07-08 10:59:16 +0100489 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100490 : "r" (&v->counter)
491 : "cc");
492
493 smp_mb();
494
495 return result;
496}
497
Chen Gang237f1232013-10-26 15:07:04 +0100498static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
Will Deacon24b44a62010-01-20 19:05:07 +0100499{
Chen Gang237f1232013-10-26 15:07:04 +0100500 long long val;
Will Deacon24b44a62010-01-20 19:05:07 +0100501 unsigned long tmp;
502 int ret = 1;
503
504 smp_mb();
Will Deaconc32ffce2014-02-21 17:01:48 +0100505 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100506
507 __asm__ __volatile__("@ atomic64_add_unless\n"
Will Deacon398aa662010-07-08 10:59:16 +0100508"1: ldrexd %0, %H0, [%4]\n"
509" teq %0, %5\n"
510" teqeq %H0, %H5\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100511" moveq %1, #0\n"
512" beq 2f\n"
Victor Kamensky2245f922013-07-26 09:28:53 -0700513" adds %Q0, %Q0, %Q6\n"
514" adc %R0, %R0, %R6\n"
Will Deacon398aa662010-07-08 10:59:16 +0100515" strexd %2, %0, %H0, [%4]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100516" teq %2, #0\n"
517" bne 1b\n"
518"2:"
Will Deacon398aa662010-07-08 10:59:16 +0100519 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100520 : "r" (&v->counter), "r" (u), "r" (a)
521 : "cc");
522
523 if (ret)
524 smp_mb();
525
526 return ret;
527}
528
529#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
530#define atomic64_inc(v) atomic64_add(1LL, (v))
Will Deacon6e490b02015-10-07 15:10:38 +0100531#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
Will Deacon24b44a62010-01-20 19:05:07 +0100532#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
533#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
534#define atomic64_dec(v) atomic64_sub(1LL, (v))
Will Deacon6e490b02015-10-07 15:10:38 +0100535#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
Will Deacon24b44a62010-01-20 19:05:07 +0100536#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
537#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
538
Arun Sharma78477772011-07-26 16:09:08 -0700539#endif /* !CONFIG_GENERIC_ATOMIC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540#endif
541#endif