blob: 86976d03438213975c0aae962012f15b89275de8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/atomic.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
Russell King8dc39b82005-11-16 17:23:57 +000014#include <linux/compiler.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080015#include <linux/types.h>
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070016#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
Catalin Marinas200b8122009-09-18 23:27:05 +010022/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
Anton Blanchardf3d46f92010-05-17 14:33:53 +100027#define atomic_read(v) (*(volatile int *)&(v)->counter)
Catalin Marinas200b8122009-09-18 23:27:05 +010028#define atomic_set(v,i) (((v)->counter) = (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#if __LINUX_ARM_ARCH__ >= 6
31
32/*
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
Catalin Marinas200b8122009-09-18 23:27:05 +010035 * to ensure that the update happens.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 */
Russell Kingbac4e962009-05-25 20:58:00 +010037static inline void atomic_add(int i, atomic_t *v)
38{
39 unsigned long tmp;
40 int result;
41
42 __asm__ __volatile__("@ atomic_add\n"
Will Deacon398aa662010-07-08 10:59:16 +010043"1: ldrex %0, [%3]\n"
44" add %0, %0, %4\n"
45" strex %1, %0, [%3]\n"
Russell Kingbac4e962009-05-25 20:58:00 +010046" teq %1, #0\n"
47" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +010048 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Russell Kingbac4e962009-05-25 20:58:00 +010049 : "r" (&v->counter), "Ir" (i)
50 : "cc");
51}
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static inline int atomic_add_return(int i, atomic_t *v)
54{
55 unsigned long tmp;
56 int result;
57
Russell Kingbac4e962009-05-25 20:58:00 +010058 smp_mb();
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 __asm__ __volatile__("@ atomic_add_return\n"
Will Deacon398aa662010-07-08 10:59:16 +010061"1: ldrex %0, [%3]\n"
62" add %0, %0, %4\n"
63" strex %1, %0, [%3]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070064" teq %1, #0\n"
65" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +010066 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 : "r" (&v->counter), "Ir" (i)
68 : "cc");
69
Russell Kingbac4e962009-05-25 20:58:00 +010070 smp_mb();
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 return result;
73}
74
Russell Kingbac4e962009-05-25 20:58:00 +010075static inline void atomic_sub(int i, atomic_t *v)
76{
77 unsigned long tmp;
78 int result;
79
80 __asm__ __volatile__("@ atomic_sub\n"
Will Deacon398aa662010-07-08 10:59:16 +010081"1: ldrex %0, [%3]\n"
82" sub %0, %0, %4\n"
83" strex %1, %0, [%3]\n"
Russell Kingbac4e962009-05-25 20:58:00 +010084" teq %1, #0\n"
85" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +010086 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Russell Kingbac4e962009-05-25 20:58:00 +010087 : "r" (&v->counter), "Ir" (i)
88 : "cc");
89}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091static inline int atomic_sub_return(int i, atomic_t *v)
92{
93 unsigned long tmp;
94 int result;
95
Russell Kingbac4e962009-05-25 20:58:00 +010096 smp_mb();
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 __asm__ __volatile__("@ atomic_sub_return\n"
Will Deacon398aa662010-07-08 10:59:16 +010099"1: ldrex %0, [%3]\n"
100" sub %0, %0, %4\n"
101" strex %1, %0, [%3]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102" teq %1, #0\n"
103" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100104 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 : "r" (&v->counter), "Ir" (i)
106 : "cc");
107
Russell Kingbac4e962009-05-25 20:58:00 +0100108 smp_mb();
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 return result;
111}
112
Nick Piggin4a6dae62005-11-13 16:07:24 -0800113static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
114{
Russell King49ee57a2005-11-16 18:03:10 +0000115 unsigned long oldval, res;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800116
Russell Kingbac4e962009-05-25 20:58:00 +0100117 smp_mb();
118
Nick Piggin4a6dae62005-11-13 16:07:24 -0800119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100121 "ldrex %1, [%3]\n"
Nicolas Pitrea7d06832005-11-16 15:05:11 +0000122 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100123 "teq %1, %4\n"
124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc");
128 } while (res);
129
Russell Kingbac4e962009-05-25 20:58:00 +0100130 smp_mb();
131
Nick Piggin4a6dae62005-11-13 16:07:24 -0800132 return oldval;
133}
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
136{
137 unsigned long tmp, tmp2;
138
139 __asm__ __volatile__("@ atomic_clear_mask\n"
Will Deacon398aa662010-07-08 10:59:16 +0100140"1: ldrex %0, [%3]\n"
141" bic %0, %0, %4\n"
142" strex %1, %0, [%3]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143" teq %1, #0\n"
144" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100145 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 : "r" (addr), "Ir" (mask)
147 : "cc");
148}
149
150#else /* ARM_ARCH_6 */
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152#ifdef CONFIG_SMP
153#error SMP not supported on pre-ARMv6 CPUs
154#endif
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static inline int atomic_add_return(int i, atomic_t *v)
157{
158 unsigned long flags;
159 int val;
160
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100161 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 val = v->counter;
163 v->counter = val += i;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100164 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 return val;
167}
Russell Kingbac4e962009-05-25 20:58:00 +0100168#define atomic_add(i, v) (void) atomic_add_return(i, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170static inline int atomic_sub_return(int i, atomic_t *v)
171{
172 unsigned long flags;
173 int val;
174
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100175 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 val = v->counter;
177 v->counter = val -= i;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100178 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180 return val;
181}
Russell Kingbac4e962009-05-25 20:58:00 +0100182#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Nick Piggin4a6dae62005-11-13 16:07:24 -0800184static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185{
186 int ret;
187 unsigned long flags;
188
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100189 raw_local_irq_save(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800190 ret = v->counter;
191 if (likely(ret == old))
192 v->counter = new;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100193 raw_local_irq_restore(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800194
195 return ret;
196}
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
199{
200 unsigned long flags;
201
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100202 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 *addr &= ~mask;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100204 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
207#endif /* __LINUX_ARM_ARCH__ */
208
Ingo Molnarffbf6702006-01-09 15:59:17 -0800209#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210
Arun Sharmaf24219b2011-07-26 16:09:07 -0700211static inline int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Piggin8426e1f2005-11-13 16:07:25 -0800212{
213 int c, old;
214
215 c = atomic_read(v);
216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217 c = old;
Arun Sharmaf24219b2011-07-26 16:09:07 -0700218 return c;
Nick Piggin8426e1f2005-11-13 16:07:25 -0800219}
Nick Piggin8426e1f2005-11-13 16:07:25 -0800220
Russell Kingbac4e962009-05-25 20:58:00 +0100221#define atomic_inc(v) atomic_add(1, v)
222#define atomic_dec(v) atomic_sub(1, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
225#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
226#define atomic_inc_return(v) (atomic_add_return(1, v))
227#define atomic_dec_return(v) (atomic_sub_return(1, v))
228#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
229
230#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
231
Russell Kingbac4e962009-05-25 20:58:00 +0100232#define smp_mb__before_atomic_dec() smp_mb()
233#define smp_mb__after_atomic_dec() smp_mb()
234#define smp_mb__before_atomic_inc() smp_mb()
235#define smp_mb__after_atomic_inc() smp_mb()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Will Deacon24b44a62010-01-20 19:05:07 +0100237#ifndef CONFIG_GENERIC_ATOMIC64
238typedef struct {
239 u64 __aligned(8) counter;
240} atomic64_t;
241
242#define ATOMIC64_INIT(i) { (i) }
243
244static inline u64 atomic64_read(atomic64_t *v)
245{
246 u64 result;
247
248 __asm__ __volatile__("@ atomic64_read\n"
249" ldrexd %0, %H0, [%1]"
250 : "=&r" (result)
Will Deacon398aa662010-07-08 10:59:16 +0100251 : "r" (&v->counter), "Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100252 );
253
254 return result;
255}
256
257static inline void atomic64_set(atomic64_t *v, u64 i)
258{
259 u64 tmp;
260
261 __asm__ __volatile__("@ atomic64_set\n"
Will Deacon398aa662010-07-08 10:59:16 +0100262"1: ldrexd %0, %H0, [%2]\n"
263" strexd %0, %3, %H3, [%2]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100264" teq %0, #0\n"
265" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100266 : "=&r" (tmp), "=Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100267 : "r" (&v->counter), "r" (i)
268 : "cc");
269}
270
271static inline void atomic64_add(u64 i, atomic64_t *v)
272{
273 u64 result;
274 unsigned long tmp;
275
276 __asm__ __volatile__("@ atomic64_add\n"
Will Deacon398aa662010-07-08 10:59:16 +0100277"1: ldrexd %0, %H0, [%3]\n"
278" adds %0, %0, %4\n"
279" adc %H0, %H0, %H4\n"
280" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100281" teq %1, #0\n"
282" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100283 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100284 : "r" (&v->counter), "r" (i)
285 : "cc");
286}
287
288static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
289{
290 u64 result;
291 unsigned long tmp;
292
293 smp_mb();
294
295 __asm__ __volatile__("@ atomic64_add_return\n"
Will Deacon398aa662010-07-08 10:59:16 +0100296"1: ldrexd %0, %H0, [%3]\n"
297" adds %0, %0, %4\n"
298" adc %H0, %H0, %H4\n"
299" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100300" teq %1, #0\n"
301" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100302 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100303 : "r" (&v->counter), "r" (i)
304 : "cc");
305
306 smp_mb();
307
308 return result;
309}
310
311static inline void atomic64_sub(u64 i, atomic64_t *v)
312{
313 u64 result;
314 unsigned long tmp;
315
316 __asm__ __volatile__("@ atomic64_sub\n"
Will Deacon398aa662010-07-08 10:59:16 +0100317"1: ldrexd %0, %H0, [%3]\n"
318" subs %0, %0, %4\n"
319" sbc %H0, %H0, %H4\n"
320" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100321" teq %1, #0\n"
322" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100324 : "r" (&v->counter), "r" (i)
325 : "cc");
326}
327
328static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
329{
330 u64 result;
331 unsigned long tmp;
332
333 smp_mb();
334
335 __asm__ __volatile__("@ atomic64_sub_return\n"
Will Deacon398aa662010-07-08 10:59:16 +0100336"1: ldrexd %0, %H0, [%3]\n"
337" subs %0, %0, %4\n"
338" sbc %H0, %H0, %H4\n"
339" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100340" teq %1, #0\n"
341" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100342 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100343 : "r" (&v->counter), "r" (i)
344 : "cc");
345
346 smp_mb();
347
348 return result;
349}
350
351static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
352{
353 u64 oldval;
354 unsigned long res;
355
356 smp_mb();
357
358 do {
359 __asm__ __volatile__("@ atomic64_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100360 "ldrexd %1, %H1, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100361 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100362 "teq %1, %4\n"
363 "teqeq %H1, %H4\n"
364 "strexdeq %0, %5, %H5, [%3]"
365 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100366 : "r" (&ptr->counter), "r" (old), "r" (new)
367 : "cc");
368 } while (res);
369
370 smp_mb();
371
372 return oldval;
373}
374
375static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
376{
377 u64 result;
378 unsigned long tmp;
379
380 smp_mb();
381
382 __asm__ __volatile__("@ atomic64_xchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100383"1: ldrexd %0, %H0, [%3]\n"
384" strexd %1, %4, %H4, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100385" teq %1, #0\n"
386" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100387 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100388 : "r" (&ptr->counter), "r" (new)
389 : "cc");
390
391 smp_mb();
392
393 return result;
394}
395
396static inline u64 atomic64_dec_if_positive(atomic64_t *v)
397{
398 u64 result;
399 unsigned long tmp;
400
401 smp_mb();
402
403 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
Will Deacon398aa662010-07-08 10:59:16 +0100404"1: ldrexd %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100405" subs %0, %0, #1\n"
406" sbc %H0, %H0, #0\n"
407" teq %H0, #0\n"
408" bmi 2f\n"
Will Deacon398aa662010-07-08 10:59:16 +0100409" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100410" teq %1, #0\n"
411" bne 1b\n"
412"2:"
Will Deacon398aa662010-07-08 10:59:16 +0100413 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100414 : "r" (&v->counter)
415 : "cc");
416
417 smp_mb();
418
419 return result;
420}
421
422static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
423{
424 u64 val;
425 unsigned long tmp;
426 int ret = 1;
427
428 smp_mb();
429
430 __asm__ __volatile__("@ atomic64_add_unless\n"
Will Deacon398aa662010-07-08 10:59:16 +0100431"1: ldrexd %0, %H0, [%4]\n"
432" teq %0, %5\n"
433" teqeq %H0, %H5\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100434" moveq %1, #0\n"
435" beq 2f\n"
Will Deacon398aa662010-07-08 10:59:16 +0100436" adds %0, %0, %6\n"
437" adc %H0, %H0, %H6\n"
438" strexd %2, %0, %H0, [%4]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100439" teq %2, #0\n"
440" bne 1b\n"
441"2:"
Will Deacon398aa662010-07-08 10:59:16 +0100442 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100443 : "r" (&v->counter), "r" (u), "r" (a)
444 : "cc");
445
446 if (ret)
447 smp_mb();
448
449 return ret;
450}
451
452#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
453#define atomic64_inc(v) atomic64_add(1LL, (v))
454#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
455#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
457#define atomic64_dec(v) atomic64_sub(1LL, (v))
458#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
459#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
460#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
461
Arun Sharma78477772011-07-26 16:09:08 -0700462#endif /* !CONFIG_GENERIC_ATOMIC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463#endif
464#endif