blob: 4eba796c00d4ae0c22008a9058b9bb6267c37ac4 [file] [log] [blame]
Greg Ungerer69f99742010-09-08 10:31:11 +10001#ifndef __ARCH_M68K_ATOMIC__
2#define __ARCH_M68K_ATOMIC__
3
4#include <linux/types.h>
5#include <asm/system.h>
6
7/*
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc..
10 */
11
12/*
13 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */
15
16#define ATOMIC_INIT(i) { (i) }
17
18#define atomic_read(v) (*(volatile int *)&(v)->counter)
19#define atomic_set(v, i) (((v)->counter) = i)
20
21/*
22 * The ColdFire parts cannot do some immediate to memory operations,
23 * so for them we do not specify the "i" asm constraint.
24 */
25#ifdef CONFIG_COLDFIRE
26#define ASM_DI "d"
Sam Ravnborg49148022009-01-16 21:58:10 +100027#else
Greg Ungerer69f99742010-09-08 10:31:11 +100028#define ASM_DI "di"
Sam Ravnborg49148022009-01-16 21:58:10 +100029#endif
Geert Uytterhoevenb417b712010-05-23 10:44:30 +020030
Greg Ungerer69f99742010-09-08 10:31:11 +100031static inline void atomic_add(int i, atomic_t *v)
32{
33 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
34}
35
36static inline void atomic_sub(int i, atomic_t *v)
37{
38 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
39}
40
41static inline void atomic_inc(atomic_t *v)
42{
43 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
44}
45
46static inline void atomic_dec(atomic_t *v)
47{
48 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
49}
50
51static inline int atomic_dec_and_test(atomic_t *v)
52{
53 char c;
54 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
55 return c != 0;
56}
57
Greg Ungerer83b73d62011-10-18 15:07:29 +100058static inline int atomic_dec_and_test_lt(atomic_t *v)
59{
60 char c;
61 __asm__ __volatile__(
62 "subql #1,%1; slt %0"
63 : "=d" (c), "=m" (*v)
64 : "m" (*v));
65 return c != 0;
66}
67
Greg Ungerer69f99742010-09-08 10:31:11 +100068static inline int atomic_inc_and_test(atomic_t *v)
69{
70 char c;
71 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
72 return c != 0;
73}
74
75#ifdef CONFIG_RMW_INSNS
76
77static inline int atomic_add_return(int i, atomic_t *v)
78{
79 int t, tmp;
80
81 __asm__ __volatile__(
82 "1: movel %2,%1\n"
83 " addl %3,%1\n"
84 " casl %2,%1,%0\n"
85 " jne 1b"
86 : "+m" (*v), "=&d" (t), "=&d" (tmp)
87 : "g" (i), "2" (atomic_read(v)));
88 return t;
89}
90
91static inline int atomic_sub_return(int i, atomic_t *v)
92{
93 int t, tmp;
94
95 __asm__ __volatile__(
96 "1: movel %2,%1\n"
97 " subl %3,%1\n"
98 " casl %2,%1,%0\n"
99 " jne 1b"
100 : "+m" (*v), "=&d" (t), "=&d" (tmp)
101 : "g" (i), "2" (atomic_read(v)));
102 return t;
103}
104
105#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
106#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
107
108#else /* !CONFIG_RMW_INSNS */
109
110static inline int atomic_add_return(int i, atomic_t * v)
111{
112 unsigned long flags;
113 int t;
114
115 local_irq_save(flags);
116 t = atomic_read(v);
117 t += i;
118 atomic_set(v, t);
119 local_irq_restore(flags);
120
121 return t;
122}
123
124static inline int atomic_sub_return(int i, atomic_t * v)
125{
126 unsigned long flags;
127 int t;
128
129 local_irq_save(flags);
130 t = atomic_read(v);
131 t -= i;
132 atomic_set(v, t);
133 local_irq_restore(flags);
134
135 return t;
136}
137
138static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
139{
140 unsigned long flags;
141 int prev;
142
143 local_irq_save(flags);
144 prev = atomic_read(v);
145 if (prev == old)
146 atomic_set(v, new);
147 local_irq_restore(flags);
148 return prev;
149}
150
151static inline int atomic_xchg(atomic_t *v, int new)
152{
153 unsigned long flags;
154 int prev;
155
156 local_irq_save(flags);
157 prev = atomic_read(v);
158 atomic_set(v, new);
159 local_irq_restore(flags);
160 return prev;
161}
162
163#endif /* !CONFIG_RMW_INSNS */
164
165#define atomic_dec_return(v) atomic_sub_return(1, (v))
166#define atomic_inc_return(v) atomic_add_return(1, (v))
167
168static inline int atomic_sub_and_test(int i, atomic_t *v)
169{
170 char c;
171 __asm__ __volatile__("subl %2,%1; seq %0"
172 : "=d" (c), "+m" (*v)
173 : ASM_DI (i));
174 return c != 0;
175}
176
177static inline int atomic_add_negative(int i, atomic_t *v)
178{
179 char c;
180 __asm__ __volatile__("addl %2,%1; smi %0"
181 : "=d" (c), "+m" (*v)
Greg Ungerer35de6742011-07-04 15:30:55 +1000182 : ASM_DI (i));
Greg Ungerer69f99742010-09-08 10:31:11 +1000183 return c != 0;
184}
185
186static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
187{
Greg Ungerer35de6742011-07-04 15:30:55 +1000188 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
Greg Ungerer69f99742010-09-08 10:31:11 +1000189}
190
191static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
192{
Greg Ungerer35de6742011-07-04 15:30:55 +1000193 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
Greg Ungerer69f99742010-09-08 10:31:11 +1000194}
195
Arun Sharmaf24219b2011-07-26 16:09:07 -0700196static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Greg Ungerer69f99742010-09-08 10:31:11 +1000197{
198 int c, old;
199 c = atomic_read(v);
200 for (;;) {
201 if (unlikely(c == (u)))
202 break;
203 old = atomic_cmpxchg((v), c, c + (a));
204 if (likely(old == c))
205 break;
206 c = old;
207 }
Arun Sharmaf24219b2011-07-26 16:09:07 -0700208 return c;
Greg Ungerer69f99742010-09-08 10:31:11 +1000209}
210
Greg Ungerer69f99742010-09-08 10:31:11 +1000211
212/* Atomic operations are already serializing */
213#define smp_mb__before_atomic_dec() barrier()
214#define smp_mb__after_atomic_dec() barrier()
215#define smp_mb__before_atomic_inc() barrier()
216#define smp_mb__after_atomic_inc() barrier()
217
Greg Ungerer69f99742010-09-08 10:31:11 +1000218#endif /* __ARCH_M68K_ATOMIC __ */