blob: 9e07fe50702913f6324b21faa9dbee4640334b20 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/atomic.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
Russell King8dc39b82005-11-16 17:23:57 +000014#include <linux/compiler.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080015#include <linux/types.h>
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -070016#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
22#define atomic_read(v) ((v)->counter)
23
24#if __LINUX_ARM_ARCH__ >= 6
25
26/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter'
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
Russell Kingbac4e962009-05-25 20:58:00 +010047static inline void atomic_add(int i, atomic_t *v)
48{
49 unsigned long tmp;
50 int result;
51
52 __asm__ __volatile__("@ atomic_add\n"
53"1: ldrex %0, [%2]\n"
54" add %0, %0, %3\n"
55" strex %1, %0, [%2]\n"
56" teq %1, #0\n"
57" bne 1b"
58 : "=&r" (result), "=&r" (tmp)
59 : "r" (&v->counter), "Ir" (i)
60 : "cc");
61}
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063static inline int atomic_add_return(int i, atomic_t *v)
64{
65 unsigned long tmp;
66 int result;
67
Russell Kingbac4e962009-05-25 20:58:00 +010068 smp_mb();
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 __asm__ __volatile__("@ atomic_add_return\n"
71"1: ldrex %0, [%2]\n"
72" add %0, %0, %3\n"
73" strex %1, %0, [%2]\n"
74" teq %1, #0\n"
75" bne 1b"
76 : "=&r" (result), "=&r" (tmp)
77 : "r" (&v->counter), "Ir" (i)
78 : "cc");
79
Russell Kingbac4e962009-05-25 20:58:00 +010080 smp_mb();
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 return result;
83}
84
Russell Kingbac4e962009-05-25 20:58:00 +010085static inline void atomic_sub(int i, atomic_t *v)
86{
87 unsigned long tmp;
88 int result;
89
90 __asm__ __volatile__("@ atomic_sub\n"
91"1: ldrex %0, [%2]\n"
92" sub %0, %0, %3\n"
93" strex %1, %0, [%2]\n"
94" teq %1, #0\n"
95" bne 1b"
96 : "=&r" (result), "=&r" (tmp)
97 : "r" (&v->counter), "Ir" (i)
98 : "cc");
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101static inline int atomic_sub_return(int i, atomic_t *v)
102{
103 unsigned long tmp;
104 int result;
105
Russell Kingbac4e962009-05-25 20:58:00 +0100106 smp_mb();
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 __asm__ __volatile__("@ atomic_sub_return\n"
109"1: ldrex %0, [%2]\n"
110" sub %0, %0, %3\n"
111" strex %1, %0, [%2]\n"
112" teq %1, #0\n"
113" bne 1b"
114 : "=&r" (result), "=&r" (tmp)
115 : "r" (&v->counter), "Ir" (i)
116 : "cc");
117
Russell Kingbac4e962009-05-25 20:58:00 +0100118 smp_mb();
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return result;
121}
122
Nick Piggin4a6dae62005-11-13 16:07:24 -0800123static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
124{
Russell King49ee57a2005-11-16 18:03:10 +0000125 unsigned long oldval, res;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800126
Russell Kingbac4e962009-05-25 20:58:00 +0100127 smp_mb();
128
Nick Piggin4a6dae62005-11-13 16:07:24 -0800129 do {
130 __asm__ __volatile__("@ atomic_cmpxchg\n"
131 "ldrex %1, [%2]\n"
Nicolas Pitrea7d06832005-11-16 15:05:11 +0000132 "mov %0, #0\n"
Nick Piggin4a6dae62005-11-13 16:07:24 -0800133 "teq %1, %3\n"
134 "strexeq %0, %4, [%2]\n"
135 : "=&r" (res), "=&r" (oldval)
136 : "r" (&ptr->counter), "Ir" (old), "r" (new)
137 : "cc");
138 } while (res);
139
Russell Kingbac4e962009-05-25 20:58:00 +0100140 smp_mb();
141
Nick Piggin4a6dae62005-11-13 16:07:24 -0800142 return oldval;
143}
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
146{
147 unsigned long tmp, tmp2;
148
149 __asm__ __volatile__("@ atomic_clear_mask\n"
Stelian Pop0803c302007-03-15 16:54:27 +0100150"1: ldrex %0, [%2]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151" bic %0, %0, %3\n"
Stelian Pop0803c302007-03-15 16:54:27 +0100152" strex %1, %0, [%2]\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153" teq %1, #0\n"
154" bne 1b"
155 : "=&r" (tmp), "=&r" (tmp2)
156 : "r" (addr), "Ir" (mask)
157 : "cc");
158}
159
160#else /* ARM_ARCH_6 */
161
162#include <asm/system.h>
163
164#ifdef CONFIG_SMP
165#error SMP not supported on pre-ARMv6 CPUs
166#endif
167
168#define atomic_set(v,i) (((v)->counter) = (i))
169
170static inline int atomic_add_return(int i, atomic_t *v)
171{
172 unsigned long flags;
173 int val;
174
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100175 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 val = v->counter;
177 v->counter = val += i;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100178 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
180 return val;
181}
Russell Kingbac4e962009-05-25 20:58:00 +0100182#define atomic_add(i, v) (void) atomic_add_return(i, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184static inline int atomic_sub_return(int i, atomic_t *v)
185{
186 unsigned long flags;
187 int val;
188
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100189 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 val = v->counter;
191 v->counter = val -= i;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100192 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 return val;
195}
Russell Kingbac4e962009-05-25 20:58:00 +0100196#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Nick Piggin4a6dae62005-11-13 16:07:24 -0800198static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
199{
200 int ret;
201 unsigned long flags;
202
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100203 raw_local_irq_save(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800204 ret = v->counter;
205 if (likely(ret == old))
206 v->counter = new;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100207 raw_local_irq_restore(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800208
209 return ret;
210}
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
213{
214 unsigned long flags;
215
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100216 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 *addr &= ~mask;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100218 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221#endif /* __LINUX_ARM_ARCH__ */
222
Ingo Molnarffbf6702006-01-09 15:59:17 -0800223#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
224
Nick Piggin8426e1f2005-11-13 16:07:25 -0800225static inline int atomic_add_unless(atomic_t *v, int a, int u)
226{
227 int c, old;
228
229 c = atomic_read(v);
230 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
231 c = old;
232 return c != u;
233}
234#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
235
Russell Kingbac4e962009-05-25 20:58:00 +0100236#define atomic_inc(v) atomic_add(1, v)
237#define atomic_dec(v) atomic_sub(1, v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
240#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
241#define atomic_inc_return(v) (atomic_add_return(1, v))
242#define atomic_dec_return(v) (atomic_sub_return(1, v))
243#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
244
245#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
246
Russell Kingbac4e962009-05-25 20:58:00 +0100247#define smp_mb__before_atomic_dec() smp_mb()
248#define smp_mb__after_atomic_dec() smp_mb()
249#define smp_mb__before_atomic_inc() smp_mb()
250#define smp_mb__after_atomic_inc() smp_mb()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Arnd Bergmann72099ed2009-05-13 22:56:29 +0000252#include <asm-generic/atomic-long.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253#endif
254#endif