blob: d7fa7d9c0e0f0c87c80e82366be1b800b6de01aa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _M68KNOMMU_BITOPS_H
2#define _M68KNOMMU_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/compiler.h>
9#include <asm/byteorder.h> /* swab32 */
10#include <asm/system.h> /* save_flags */
11
12#ifdef __KERNEL__
13
Akinobu Mitad2d7cdc2006-03-26 01:39:29 -080014#include <asm-generic/bitops/ffs.h>
15#include <asm-generic/bitops/__ffs.h>
16#include <asm-generic/bitops/sched.h>
17#include <asm-generic/bitops/ffz.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19static __inline__ void set_bit(int nr, volatile unsigned long * addr)
20{
21#ifdef CONFIG_COLDFIRE
22 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
23 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
24 : "d" (nr)
25 : "%a0", "cc");
26#else
27 __asm__ __volatile__ ("bset %1,%0"
28 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
29 : "di" (nr)
30 : "cc");
31#endif
32}
33
34#define __set_bit(nr, addr) set_bit(nr, addr)
35
36/*
37 * clear_bit() doesn't provide any barrier for the compiler.
38 */
39#define smp_mb__before_clear_bit() barrier()
40#define smp_mb__after_clear_bit() barrier()
41
42static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
43{
44#ifdef CONFIG_COLDFIRE
45 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
46 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
47 : "d" (nr)
48 : "%a0", "cc");
49#else
50 __asm__ __volatile__ ("bclr %1,%0"
51 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
52 : "di" (nr)
53 : "cc");
54#endif
55}
56
57#define __clear_bit(nr, addr) clear_bit(nr, addr)
58
59static __inline__ void change_bit(int nr, volatile unsigned long * addr)
60{
61#ifdef CONFIG_COLDFIRE
62 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
63 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
64 : "d" (nr)
65 : "%a0", "cc");
66#else
67 __asm__ __volatile__ ("bchg %1,%0"
68 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
69 : "di" (nr)
70 : "cc");
71#endif
72}
73
74#define __change_bit(nr, addr) change_bit(nr, addr)
75
76static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
77{
78 char retval;
79
80#ifdef CONFIG_COLDFIRE
81 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
82 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
83 : "d" (nr)
84 : "%a0");
85#else
86 __asm__ __volatile__ ("bset %2,%1; sne %0"
87 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
88 : "di" (nr)
89 /* No clobber */);
90#endif
91
92 return retval;
93}
94
95#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
96
97static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
98{
99 char retval;
100
101#ifdef CONFIG_COLDFIRE
102 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
103 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
104 : "d" (nr)
105 : "%a0");
106#else
107 __asm__ __volatile__ ("bclr %2,%1; sne %0"
108 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
109 : "di" (nr)
110 /* No clobber */);
111#endif
112
113 return retval;
114}
115
116#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
117
118static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
119{
120 char retval;
121
122#ifdef CONFIG_COLDFIRE
123 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
124 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
125 : "d" (nr)
126 : "%a0");
127#else
128 __asm__ __volatile__ ("bchg %2,%1; sne %0"
129 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
130 : "di" (nr)
131 /* No clobber */);
132#endif
133
134 return retval;
135}
136
137#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
138
139/*
140 * This routine doesn't need to be atomic.
141 */
142static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
143{
144 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
145}
146
147static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
148{
149 int * a = (int *) addr;
150 int mask;
151
152 a += nr >> 5;
153 mask = 1 << (nr & 0x1f);
154 return ((mask & *a) != 0);
155}
156
157#define test_bit(nr,addr) \
158(__builtin_constant_p(nr) ? \
159 __constant_test_bit((nr),(addr)) : \
160 __test_bit((nr),(addr)))
161
Akinobu Mitad2d7cdc2006-03-26 01:39:29 -0800162#include <asm-generic/bitops/find.h>
163#include <asm-generic/bitops/hweight.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165static __inline__ int ext2_set_bit(int nr, volatile void * addr)
166{
167 char retval;
168
169#ifdef CONFIG_COLDFIRE
170 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
171 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
172 : "d" (nr)
173 : "%a0");
174#else
175 __asm__ __volatile__ ("bset %2,%1; sne %0"
176 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
177 : "di" (nr)
178 /* No clobber */);
179#endif
180
181 return retval;
182}
183
184static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
185{
186 char retval;
187
188#ifdef CONFIG_COLDFIRE
189 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
190 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
191 : "d" (nr)
192 : "%a0");
193#else
194 __asm__ __volatile__ ("bclr %2,%1; sne %0"
195 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
196 : "di" (nr)
197 /* No clobber */);
198#endif
199
200 return retval;
201}
202
203#define ext2_set_bit_atomic(lock, nr, addr) \
204 ({ \
205 int ret; \
206 spin_lock(lock); \
207 ret = ext2_set_bit((nr), (addr)); \
208 spin_unlock(lock); \
209 ret; \
210 })
211
212#define ext2_clear_bit_atomic(lock, nr, addr) \
213 ({ \
214 int ret; \
215 spin_lock(lock); \
216 ret = ext2_clear_bit((nr), (addr)); \
217 spin_unlock(lock); \
218 ret; \
219 })
220
221static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
222{
223 char retval;
224
225#ifdef CONFIG_COLDFIRE
226 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
227 : "=d" (retval)
228 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
229 : "%a0");
230#else
231 __asm__ __volatile__ ("btst %2,%1; sne %0"
232 : "=d" (retval)
233 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
234 /* No clobber */);
235#endif
236
237 return retval;
238}
239
240#define ext2_find_first_zero_bit(addr, size) \
241 ext2_find_next_zero_bit((addr), (size), 0)
242
243static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
244{
245 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
246 unsigned long result = offset & ~31UL;
247 unsigned long tmp;
248
249 if (offset >= size)
250 return size;
251 size -= result;
252 offset &= 31UL;
253 if(offset) {
254 /* We hold the little endian value in tmp, but then the
255 * shift is illegal. So we could keep a big endian value
256 * in tmp, like this:
257 *
258 * tmp = __swab32(*(p++));
259 * tmp |= ~0UL >> (32-offset);
260 *
261 * but this would decrease preformance, so we change the
262 * shift:
263 */
264 tmp = *(p++);
265 tmp |= __swab32(~0UL >> (32-offset));
266 if(size < 32)
267 goto found_first;
268 if(~tmp)
269 goto found_middle;
270 size -= 32;
271 result += 32;
272 }
273 while(size & ~31UL) {
274 if(~(tmp = *(p++)))
275 goto found_middle;
276 result += 32;
277 size -= 32;
278 }
279 if(!size)
280 return result;
281 tmp = *p;
282
283found_first:
284 /* tmp is little endian, so we would have to swab the shift,
285 * see above. But then we have to swab tmp below for ffz, so
286 * we might as well do this here.
287 */
288 return result + ffz(__swab32(tmp) | (~0UL << size));
289found_middle:
290 return result + ffz(__swab32(tmp));
291}
292
Akinobu Mitad2d7cdc2006-03-26 01:39:29 -0800293#include <asm-generic/bitops/minix.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295#endif /* __KERNEL__ */
296
Akinobu Mitad2d7cdc2006-03-26 01:39:29 -0800297#include <asm-generic/bitops/fls.h>
298#include <asm-generic/bitops/fls64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300#endif /* _M68KNOMMU_BITOPS_H */