blob: 8bdc1ba56f736570788c445bf5ef4f4dc1bde5a8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_ATOMIC_H
2#define __ASM_SH_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 */
9
10typedef struct { volatile int counter; } atomic_t;
11
12#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
13
14#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) ((v)->counter = (i))
16
Paul Mundte4c2cfe2006-09-27 12:31:01 +090017#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/system.h>
19
20/*
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
24 */
Paul Mundt781125c2006-09-27 17:52:19 +090025static inline void atomic_add(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026{
Paul Mundt781125c2006-09-27 17:52:19 +090027#ifdef CONFIG_CPU_SH4A
28 unsigned long tmp;
29
30 __asm__ __volatile__ (
31"1: movli.l @%3, %0 ! atomic_add \n"
32" add %2, %0 \n"
33" movco.l %0, @%3 \n"
34" bf 1b \n"
35 : "=&z" (tmp), "=r" (&v->counter)
36 : "r" (i), "r" (&v->counter)
37 : "t");
38#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 unsigned long flags;
40
41 local_irq_save(flags);
42 *(long *)v += i;
43 local_irq_restore(flags);
Paul Mundt781125c2006-09-27 17:52:19 +090044#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070045}
46
Paul Mundt781125c2006-09-27 17:52:19 +090047static inline void atomic_sub(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Paul Mundt781125c2006-09-27 17:52:19 +090049#ifdef CONFIG_CPU_SH4A
50 unsigned long tmp;
51
52 __asm__ __volatile__ (
53"1: movli.l @%3, %0 ! atomic_sub \n"
54" sub %2, %0 \n"
55" movco.l %0, @%3 \n"
56" bf 1b \n"
57 : "=&z" (tmp), "=r" (&v->counter)
58 : "r" (i), "r" (&v->counter)
59 : "t");
60#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 unsigned long flags;
62
63 local_irq_save(flags);
64 *(long *)v -= i;
65 local_irq_restore(flags);
Paul Mundt781125c2006-09-27 17:52:19 +090066#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070067}
68
Paul Mundt781125c2006-09-27 17:52:19 +090069/*
70 * SH-4A note:
71 *
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
76 */
77static inline int atomic_add_return(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Paul Mundt781125c2006-09-27 17:52:19 +090079 unsigned long temp;
80
81#ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83"1: movli.l @%3, %0 ! atomic_add_return \n"
84" add %2, %0 \n"
85" movco.l %0, @%3 \n"
86" bf 1b \n"
87" synco \n"
88 : "=&z" (temp), "=r" (&v->counter)
89 : "r" (i), "r" (&v->counter)
90 : "t");
91#else
92 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94 local_irq_save(flags);
95 temp = *(long *)v;
96 temp += i;
97 *(long *)v = temp;
98 local_irq_restore(flags);
Paul Mundt781125c2006-09-27 17:52:19 +090099#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 return temp;
102}
103
104#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
105
Paul Mundt781125c2006-09-27 17:52:19 +0900106static inline int atomic_sub_return(int i, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Paul Mundt781125c2006-09-27 17:52:19 +0900108 unsigned long temp;
109
110#ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112"1: movli.l @%3, %0 ! atomic_sub_return \n"
113" sub %2, %0 \n"
114" movco.l %0, @%3 \n"
115" bf 1b \n"
116" synco \n"
117 : "=&z" (temp), "=r" (&v->counter)
118 : "r" (i), "r" (&v->counter)
119 : "t");
120#else
121 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 local_irq_save(flags);
124 temp = *(long *)v;
125 temp -= i;
126 *(long *)v = temp;
127 local_irq_restore(flags);
Paul Mundt781125c2006-09-27 17:52:19 +0900128#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 return temp;
131}
132
133#define atomic_dec_return(v) atomic_sub_return(1,(v))
134#define atomic_inc_return(v) atomic_add_return(1,(v))
135
136/*
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
139 *
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
142 * other cases.
143 */
144#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
145
146#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
147#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
148
149#define atomic_inc(v) atomic_add(1,(v))
150#define atomic_dec(v) atomic_sub(1,(v))
151
Nick Piggin4a6dae62005-11-13 16:07:24 -0800152static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
153{
154 int ret;
155 unsigned long flags;
156
157 local_irq_save(flags);
158 ret = v->counter;
159 if (likely(ret == old))
160 v->counter = new;
161 local_irq_restore(flags);
162
163 return ret;
164}
165
Ingo Molnarffbf6702006-01-09 15:59:17 -0800166#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
167
Nick Piggin8426e1f2005-11-13 16:07:25 -0800168static inline int atomic_add_unless(atomic_t *v, int a, int u)
169{
170 int ret;
171 unsigned long flags;
172
173 local_irq_save(flags);
174 ret = v->counter;
175 if (ret != u)
176 v->counter += a;
177 local_irq_restore(flags);
178
179 return ret != u;
180}
181#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
182
Paul Mundt781125c2006-09-27 17:52:19 +0900183static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Paul Mundt781125c2006-09-27 17:52:19 +0900185#ifdef CONFIG_CPU_SH4A
186 unsigned long tmp;
187
188 __asm__ __volatile__ (
189"1: movli.l @%3, %0 ! atomic_clear_mask \n"
190" and %2, %0 \n"
191" movco.l %0, @%3 \n"
192" bf 1b \n"
193 : "=&z" (tmp), "=r" (&v->counter)
194 : "r" (~mask), "r" (&v->counter)
195 : "t");
196#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 unsigned long flags;
198
199 local_irq_save(flags);
200 *(long *)v &= ~mask;
201 local_irq_restore(flags);
Paul Mundt781125c2006-09-27 17:52:19 +0900202#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Paul Mundt781125c2006-09-27 17:52:19 +0900205static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Paul Mundt781125c2006-09-27 17:52:19 +0900207#ifdef CONFIG_CPU_SH4A
208 unsigned long tmp;
209
210 __asm__ __volatile__ (
211"1: movli.l @%3, %0 ! atomic_set_mask \n"
212" or %2, %0 \n"
213" movco.l %0, @%3 \n"
214" bf 1b \n"
215 : "=&z" (tmp), "=r" (&v->counter)
216 : "r" (mask), "r" (&v->counter)
217 : "t");
218#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 unsigned long flags;
220
221 local_irq_save(flags);
222 *(long *)v |= mask;
223 local_irq_restore(flags);
Paul Mundt781125c2006-09-27 17:52:19 +0900224#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
227/* Atomic operations are already serializing on SH */
228#define smp_mb__before_atomic_dec() barrier()
229#define smp_mb__after_atomic_dec() barrier()
230#define smp_mb__before_atomic_inc() barrier()
231#define smp_mb__after_atomic_inc() barrier()
232
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800233#include <asm-generic/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234#endif /* __ASM_SH_ATOMIC_H */