blob: 2ba20730a3e5e5345238d77e160728269be0bf61 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#include <linux/config.h>
13#include <linux/compiler.h>
14#include <linux/types.h>
15#include <asm/byteorder.h> /* sigh ... */
16#include <asm/cpu-features.h>
17
18#if (_MIPS_SZLONG == 32)
19#define SZLONG_LOG 5
20#define SZLONG_MASK 31UL
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000021#define __LL "ll "
22#define __SC "sc "
Ralf Baechle42a3b4f2005-09-03 15:56:17 -070023#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#elif (_MIPS_SZLONG == 64)
25#define SZLONG_LOG 6
26#define SZLONG_MASK 63UL
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000027#define __LL "lld "
28#define __SC "scd "
Ralf Baechle42a3b4f2005-09-03 15:56:17 -070029#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#endif
31
32#ifdef __KERNEL__
33
34#include <asm/interrupt.h>
35#include <asm/sgidefs.h>
36#include <asm/war.h>
37
38/*
39 * clear_bit() doesn't provide any barrier for the compiler.
40 */
41#define smp_mb__before_clear_bit() smp_mb()
42#define smp_mb__after_clear_bit() smp_mb()
43
44/*
45 * Only disable interrupt for kernel mode stuff to keep usermode stuff
46 * that dares to use kernel include files alive.
47 */
48
49#define __bi_flags unsigned long flags
50#define __bi_local_irq_save(x) local_irq_save(x)
51#define __bi_local_irq_restore(x) local_irq_restore(x)
52#else
53#define __bi_flags
54#define __bi_local_irq_save(x)
55#define __bi_local_irq_restore(x)
56#endif /* __KERNEL__ */
57
58/*
59 * set_bit - Atomically set a bit in memory
60 * @nr: the bit to set
61 * @addr: the address to start counting from
62 *
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
67 */
68static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69{
70 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
71 unsigned long temp;
72
73 if (cpu_has_llsc && R10000_LLSC_WAR) {
74 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000075 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 "1: " __LL "%0, %1 # set_bit \n"
77 " or %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000078 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000080 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 : "=&r" (temp), "=m" (*m)
82 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
83 } else if (cpu_has_llsc) {
84 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000085 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 "1: " __LL "%0, %1 # set_bit \n"
87 " or %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000088 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000090 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 : "=&r" (temp), "=m" (*m)
92 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
93 } else {
94 volatile unsigned long *a = addr;
95 unsigned long mask;
96 __bi_flags;
97
98 a += nr >> SZLONG_LOG;
99 mask = 1UL << (nr & SZLONG_MASK);
100 __bi_local_irq_save(flags);
101 *a |= mask;
102 __bi_local_irq_restore(flags);
103 }
104}
105
106/*
107 * __set_bit - Set a bit in memory
108 * @nr: the bit to set
109 * @addr: the address to start counting from
110 *
111 * Unlike set_bit(), this function is non-atomic and may be reordered.
112 * If it's called on the same region of memory simultaneously, the effect
113 * may be that only one operation succeeds.
114 */
115static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
116{
117 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
118
119 *m |= 1UL << (nr & SZLONG_MASK);
120}
121
122/*
123 * clear_bit - Clears a bit in memory
124 * @nr: Bit to clear
125 * @addr: Address to start counting from
126 *
127 * clear_bit() is atomic and may not be reordered. However, it does
128 * not contain a memory barrier, so if it is used for locking purposes,
129 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
130 * in order to ensure changes are visible on other processors.
131 */
132static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
133{
134 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
135 unsigned long temp;
136
137 if (cpu_has_llsc && R10000_LLSC_WAR) {
138 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000139 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 "1: " __LL "%0, %1 # clear_bit \n"
141 " and %0, %2 \n"
142 " " __SC "%0, %1 \n"
143 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000144 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 : "=&r" (temp), "=m" (*m)
146 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
147 } else if (cpu_has_llsc) {
148 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000149 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 "1: " __LL "%0, %1 # clear_bit \n"
151 " and %0, %2 \n"
152 " " __SC "%0, %1 \n"
153 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000154 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 : "=&r" (temp), "=m" (*m)
156 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
157 } else {
158 volatile unsigned long *a = addr;
159 unsigned long mask;
160 __bi_flags;
161
162 a += nr >> SZLONG_LOG;
163 mask = 1UL << (nr & SZLONG_MASK);
164 __bi_local_irq_save(flags);
165 *a &= ~mask;
166 __bi_local_irq_restore(flags);
167 }
168}
169
170/*
171 * __clear_bit - Clears a bit in memory
172 * @nr: Bit to clear
173 * @addr: Address to start counting from
174 *
175 * Unlike clear_bit(), this function is non-atomic and may be reordered.
176 * If it's called on the same region of memory simultaneously, the effect
177 * may be that only one operation succeeds.
178 */
179static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
180{
181 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
182
183 *m &= ~(1UL << (nr & SZLONG_MASK));
184}
185
186/*
187 * change_bit - Toggle a bit in memory
188 * @nr: Bit to change
189 * @addr: Address to start counting from
190 *
191 * change_bit() is atomic and may not be reordered.
192 * Note that @nr may be almost arbitrarily large; this function is not
193 * restricted to acting on a single-word quantity.
194 */
195static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
196{
197 if (cpu_has_llsc && R10000_LLSC_WAR) {
198 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
199 unsigned long temp;
200
201 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000202 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 "1: " __LL "%0, %1 # change_bit \n"
204 " xor %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000205 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000207 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 : "=&r" (temp), "=m" (*m)
209 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
210 } else if (cpu_has_llsc) {
211 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
212 unsigned long temp;
213
214 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000215 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 "1: " __LL "%0, %1 # change_bit \n"
217 " xor %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000218 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000220 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 : "=&r" (temp), "=m" (*m)
222 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
223 } else {
224 volatile unsigned long *a = addr;
225 unsigned long mask;
226 __bi_flags;
227
228 a += nr >> SZLONG_LOG;
229 mask = 1UL << (nr & SZLONG_MASK);
230 __bi_local_irq_save(flags);
231 *a ^= mask;
232 __bi_local_irq_restore(flags);
233 }
234}
235
236/*
237 * __change_bit - Toggle a bit in memory
238 * @nr: the bit to change
239 * @addr: the address to start counting from
240 *
241 * Unlike change_bit(), this function is non-atomic and may be reordered.
242 * If it's called on the same region of memory simultaneously, the effect
243 * may be that only one operation succeeds.
244 */
245static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
246{
247 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
248
249 *m ^= 1UL << (nr & SZLONG_MASK);
250}
251
252/*
253 * test_and_set_bit - Set a bit and return its old value
254 * @nr: Bit to set
255 * @addr: Address to count from
256 *
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
259 */
260static inline int test_and_set_bit(unsigned long nr,
261 volatile unsigned long *addr)
262{
263 if (cpu_has_llsc && R10000_LLSC_WAR) {
264 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
265 unsigned long temp, res;
266
267 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000268 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "1: " __LL "%0, %1 # test_and_set_bit \n"
270 " or %2, %0, %3 \n"
271 " " __SC "%2, %1 \n"
272 " beqzl %2, 1b \n"
273 " and %2, %0, %3 \n"
274#ifdef CONFIG_SMP
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000275 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000277 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 : "=&r" (temp), "=m" (*m), "=&r" (res)
279 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
280 : "memory");
281
282 return res != 0;
283 } else if (cpu_has_llsc) {
284 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
285 unsigned long temp, res;
286
287 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000288 " .set push \n"
289 " .set noreorder \n"
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000290 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000291 "1: " __LL "%0, %1 # test_and_set_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 " or %2, %0, %3 \n"
293 " " __SC "%2, %1 \n"
294 " beqz %2, 1b \n"
295 " and %2, %0, %3 \n"
296#ifdef CONFIG_SMP
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000297 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000299 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 : "=&r" (temp), "=m" (*m), "=&r" (res)
301 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
302 : "memory");
303
304 return res != 0;
305 } else {
306 volatile unsigned long *a = addr;
307 unsigned long mask;
308 int retval;
309 __bi_flags;
310
311 a += nr >> SZLONG_LOG;
312 mask = 1UL << (nr & SZLONG_MASK);
313 __bi_local_irq_save(flags);
314 retval = (mask & *a) != 0;
315 *a |= mask;
316 __bi_local_irq_restore(flags);
317
318 return retval;
319 }
320}
321
322/*
323 * __test_and_set_bit - Set a bit and return its old value
324 * @nr: Bit to set
325 * @addr: Address to count from
326 *
327 * This operation is non-atomic and can be reordered.
328 * If two examples of this operation race, one can appear to succeed
329 * but actually fail. You must protect multiple accesses with a lock.
330 */
331static inline int __test_and_set_bit(unsigned long nr,
332 volatile unsigned long *addr)
333{
334 volatile unsigned long *a = addr;
335 unsigned long mask;
336 int retval;
337
338 a += nr >> SZLONG_LOG;
339 mask = 1UL << (nr & SZLONG_MASK);
340 retval = (mask & *a) != 0;
341 *a |= mask;
342
343 return retval;
344}
345
346/*
347 * test_and_clear_bit - Clear a bit and return its old value
348 * @nr: Bit to clear
349 * @addr: Address to count from
350 *
351 * This operation is atomic and cannot be reordered.
352 * It also implies a memory barrier.
353 */
354static inline int test_and_clear_bit(unsigned long nr,
355 volatile unsigned long *addr)
356{
357 if (cpu_has_llsc && R10000_LLSC_WAR) {
358 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
359 unsigned long temp, res;
360
361 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000362 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "1: " __LL "%0, %1 # test_and_clear_bit \n"
364 " or %2, %0, %3 \n"
365 " xor %2, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000366 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 " beqzl %2, 1b \n"
368 " and %2, %0, %3 \n"
369#ifdef CONFIG_SMP
370 " sync \n"
371#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000372 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 : "=&r" (temp), "=m" (*m), "=&r" (res)
374 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
375 : "memory");
376
377 return res != 0;
378 } else if (cpu_has_llsc) {
379 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
380 unsigned long temp, res;
381
382 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000383 " .set push \n"
384 " .set noreorder \n"
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000385 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000386 "1: " __LL "%0, %1 # test_and_clear_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 " or %2, %0, %3 \n"
388 " xor %2, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000389 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 " beqz %2, 1b \n"
391 " and %2, %0, %3 \n"
392#ifdef CONFIG_SMP
393 " sync \n"
394#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000395 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 : "=&r" (temp), "=m" (*m), "=&r" (res)
397 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
398 : "memory");
399
400 return res != 0;
401 } else {
402 volatile unsigned long *a = addr;
403 unsigned long mask;
404 int retval;
405 __bi_flags;
406
407 a += nr >> SZLONG_LOG;
408 mask = 1UL << (nr & SZLONG_MASK);
409 __bi_local_irq_save(flags);
410 retval = (mask & *a) != 0;
411 *a &= ~mask;
412 __bi_local_irq_restore(flags);
413
414 return retval;
415 }
416}
417
418/*
419 * __test_and_clear_bit - Clear a bit and return its old value
420 * @nr: Bit to clear
421 * @addr: Address to count from
422 *
423 * This operation is non-atomic and can be reordered.
424 * If two examples of this operation race, one can appear to succeed
425 * but actually fail. You must protect multiple accesses with a lock.
426 */
427static inline int __test_and_clear_bit(unsigned long nr,
428 volatile unsigned long * addr)
429{
430 volatile unsigned long *a = addr;
431 unsigned long mask;
432 int retval;
433
434 a += (nr >> SZLONG_LOG);
435 mask = 1UL << (nr & SZLONG_MASK);
436 retval = ((mask & *a) != 0);
437 *a &= ~mask;
438
439 return retval;
440}
441
442/*
443 * test_and_change_bit - Change a bit and return its old value
444 * @nr: Bit to change
445 * @addr: Address to count from
446 *
447 * This operation is atomic and cannot be reordered.
448 * It also implies a memory barrier.
449 */
450static inline int test_and_change_bit(unsigned long nr,
451 volatile unsigned long *addr)
452{
453 if (cpu_has_llsc && R10000_LLSC_WAR) {
454 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
455 unsigned long temp, res;
456
457 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000458 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000459 "1: " __LL "%0, %1 # test_and_change_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 " xor %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000461 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 " beqzl %2, 1b \n"
463 " and %2, %0, %3 \n"
464#ifdef CONFIG_SMP
465 " sync \n"
466#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000467 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 : "=&r" (temp), "=m" (*m), "=&r" (res)
469 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
470 : "memory");
471
472 return res != 0;
473 } else if (cpu_has_llsc) {
474 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
475 unsigned long temp, res;
476
477 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000478 " .set push \n"
479 " .set noreorder \n"
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000480 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000481 "1: " __LL "%0, %1 # test_and_change_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 " xor %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000483 " " __SC "\t%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 " beqz %2, 1b \n"
485 " and %2, %0, %3 \n"
486#ifdef CONFIG_SMP
487 " sync \n"
488#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000489 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 : "=&r" (temp), "=m" (*m), "=&r" (res)
491 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
492 : "memory");
493
494 return res != 0;
495 } else {
496 volatile unsigned long *a = addr;
497 unsigned long mask, retval;
498 __bi_flags;
499
500 a += nr >> SZLONG_LOG;
501 mask = 1UL << (nr & SZLONG_MASK);
502 __bi_local_irq_save(flags);
503 retval = (mask & *a) != 0;
504 *a ^= mask;
505 __bi_local_irq_restore(flags);
506
507 return retval;
508 }
509}
510
511/*
512 * __test_and_change_bit - Change a bit and return its old value
513 * @nr: Bit to change
514 * @addr: Address to count from
515 *
516 * This operation is non-atomic and can be reordered.
517 * If two examples of this operation race, one can appear to succeed
518 * but actually fail. You must protect multiple accesses with a lock.
519 */
520static inline int __test_and_change_bit(unsigned long nr,
521 volatile unsigned long *addr)
522{
523 volatile unsigned long *a = addr;
524 unsigned long mask;
525 int retval;
526
527 a += (nr >> SZLONG_LOG);
528 mask = 1UL << (nr & SZLONG_MASK);
529 retval = ((mask & *a) != 0);
530 *a ^= mask;
531
532 return retval;
533}
534
535#undef __bi_flags
536#undef __bi_local_irq_save
537#undef __bi_local_irq_restore
538
539/*
540 * test_bit - Determine whether a bit is set
541 * @nr: bit number to test
542 * @addr: Address to start counting from
543 */
544static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
545{
546 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
547}
548
Ralf Baechle65903262005-07-12 12:50:30 +0000549#ifdef CONFIG_CPU_MIPS32_R1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550/*
Ralf Baechle65903262005-07-12 12:50:30 +0000551 * Return the bit position (0..31) of the most significant 1 bit in a word
552 * Returns -1 if no 1 bit exists
553 */
554static __inline__ int __ilog2(unsigned long x)
555{
556 int lz;
557
558 __asm__ (
559 " .set push \n"
560 " .set mips32 \n"
561 " clz %0, %1 \n"
562 " .set pop \n"
563 : "=r" (lz)
564 : "r" (x));
565
566 return 31 - lz;
567}
568#elif defined(CONFIG_CPU_MIPS64_R1)
569/*
570 * Return the bit position (0..63) of the most significant 1 bit in a word
571 * Returns -1 if no 1 bit exists
572 */
573static __inline__ int __ilog2(unsigned long x)
574{
575 int lz;
576
577 __asm__ (
578 " .set push \n"
579 " .set mips64 \n"
580 " dclz %0, %1 \n"
581 " .set pop \n"
582 : "=r" (lz)
583 : "r" (x));
584
585 return 63 - lz;
586}
587#endif
588
589/*
590 * __ffs - find first bit in word.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 * @word: The word to search
592 *
Ralf Baechle65903262005-07-12 12:50:30 +0000593 * Returns 0..SZLONG-1
594 * Undefined if no bit exists, so code should check against 0 first.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 */
Ralf Baechle65903262005-07-12 12:50:30 +0000596static inline unsigned long __ffs(unsigned long word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Ralf Baechle65903262005-07-12 12:50:30 +0000598#if defined(CONFIG_CPU_MIPS32_R1) || defined(CONFIG_CPU_MIPS64_R1)
599 return __ilog2(word & -word);
600#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 int b = 0, s;
602
Ralf Baechle875d43e2005-09-03 15:56:16 -0700603#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
605 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
606 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
607 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
608 s = 1; if (word << 31 != 0) s = 0; b += s;
609#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -0700610#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
612 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
613 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
614 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
615 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
616 s = 1; if (word << 63 != 0) s = 0; b += s;
617#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return b;
Ralf Baechle65903262005-07-12 12:50:30 +0000619#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621
622/*
Ralf Baechle65903262005-07-12 12:50:30 +0000623 * ffs - find first bit set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 * @word: The word to search
625 *
Ralf Baechle65903262005-07-12 12:50:30 +0000626 * Returns 1..SZLONG
627 * Returns 0 if no bit exists
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 */
Ralf Baechle65903262005-07-12 12:50:30 +0000629
630static inline unsigned long ffs(unsigned long word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631{
Ralf Baechle65903262005-07-12 12:50:30 +0000632 if (!word)
633 return 0;
634
635 return __ffs(word) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636}
637
638/*
Ralf Baechle65903262005-07-12 12:50:30 +0000639 * ffz - find first zero in word.
640 * @word: The word to search
641 *
642 * Undefined if no zero exists, so code should check against ~0UL first.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 */
Ralf Baechle65903262005-07-12 12:50:30 +0000644static inline unsigned long ffz(unsigned long word)
645{
646 return __ffs (~word);
647}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
Ralf Baechle65903262005-07-12 12:50:30 +0000649/*
650 * flz - find last zero in word.
651 * @word: The word to search
652 *
653 * Returns 0..SZLONG-1
654 * Undefined if no zero exists, so code should check against ~0UL first.
655 */
656static inline unsigned long flz(unsigned long word)
657{
658#if defined(CONFIG_CPU_MIPS32_R1) || defined(CONFIG_CPU_MIPS64_R1)
659 return __ilog2(~word);
660#else
661#if defined(CONFIG_32BIT)
662 int r = 31, s;
663 word = ~word;
664 s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s;
665 s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s;
666 s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s;
667 s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s;
668 s = 1; if ((word & 0x80000000)) s = 0; r -= s;
669#endif
670#if defined(CONFIG_64BIT)
671 int r = 63, s;
672 word = ~word;
673 s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s;
674 s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s;
675 s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s;
676 s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s;
677 s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s;
678 s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s;
679#endif
680 return r;
681#endif
682}
683
684/*
685 * fls - find last bit set.
686 * @word: The word to search
687 *
688 * Returns 1..SZLONG
689 * Returns 0 if no bit exists
690 */
691static inline unsigned long fls(unsigned long word)
692{
693 if (word == 0)
694 return 0;
695
696 return flz(~word) + 1;
697}
698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700/*
701 * find_next_zero_bit - find the first zero bit in a memory region
702 * @addr: The address to base the search on
703 * @offset: The bitnumber to start searching at
704 * @size: The maximum size to search
705 */
706static inline unsigned long find_next_zero_bit(const unsigned long *addr,
707 unsigned long size, unsigned long offset)
708{
709 const unsigned long *p = addr + (offset >> SZLONG_LOG);
710 unsigned long result = offset & ~SZLONG_MASK;
711 unsigned long tmp;
712
713 if (offset >= size)
714 return size;
715 size -= result;
716 offset &= SZLONG_MASK;
717 if (offset) {
718 tmp = *(p++);
719 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
720 if (size < _MIPS_SZLONG)
721 goto found_first;
722 if (~tmp)
723 goto found_middle;
724 size -= _MIPS_SZLONG;
725 result += _MIPS_SZLONG;
726 }
727 while (size & ~SZLONG_MASK) {
728 if (~(tmp = *(p++)))
729 goto found_middle;
730 result += _MIPS_SZLONG;
731 size -= _MIPS_SZLONG;
732 }
733 if (!size)
734 return result;
735 tmp = *p;
736
737found_first:
738 tmp |= ~0UL << size;
739 if (tmp == ~0UL) /* Are any bits zero? */
740 return result + size; /* Nope. */
741found_middle:
742 return result + ffz(tmp);
743}
744
745#define find_first_zero_bit(addr, size) \
746 find_next_zero_bit((addr), (size), 0)
747
748/*
749 * find_next_bit - find the next set bit in a memory region
750 * @addr: The address to base the search on
751 * @offset: The bitnumber to start searching at
752 * @size: The maximum size to search
753 */
754static inline unsigned long find_next_bit(const unsigned long *addr,
755 unsigned long size, unsigned long offset)
756{
757 const unsigned long *p = addr + (offset >> SZLONG_LOG);
758 unsigned long result = offset & ~SZLONG_MASK;
759 unsigned long tmp;
760
761 if (offset >= size)
762 return size;
763 size -= result;
764 offset &= SZLONG_MASK;
765 if (offset) {
766 tmp = *(p++);
767 tmp &= ~0UL << offset;
768 if (size < _MIPS_SZLONG)
769 goto found_first;
770 if (tmp)
771 goto found_middle;
772 size -= _MIPS_SZLONG;
773 result += _MIPS_SZLONG;
774 }
775 while (size & ~SZLONG_MASK) {
776 if ((tmp = *(p++)))
777 goto found_middle;
778 result += _MIPS_SZLONG;
779 size -= _MIPS_SZLONG;
780 }
781 if (!size)
782 return result;
783 tmp = *p;
784
785found_first:
786 tmp &= ~0UL >> (_MIPS_SZLONG - size);
787 if (tmp == 0UL) /* Are any bits set? */
788 return result + size; /* Nope. */
789found_middle:
790 return result + __ffs(tmp);
791}
792
793/*
794 * find_first_bit - find the first set bit in a memory region
795 * @addr: The address to start the search at
796 * @size: The maximum size to search
797 *
798 * Returns the bit-number of the first set bit, not the number of the byte
799 * containing a bit.
800 */
801#define find_first_bit(addr, size) \
802 find_next_bit((addr), (size), 0)
803
804#ifdef __KERNEL__
805
806/*
807 * Every architecture must define this function. It's the fastest
808 * way of searching a 140-bit bitmap where the first 100 bits are
809 * unlikely to be set. It's guaranteed that at least one of the 140
810 * bits is cleared.
811 */
812static inline int sched_find_first_bit(const unsigned long *b)
813{
Ralf Baechle875d43e2005-09-03 15:56:16 -0700814#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 if (unlikely(b[0]))
816 return __ffs(b[0]);
817 if (unlikely(b[1]))
818 return __ffs(b[1]) + 32;
819 if (unlikely(b[2]))
820 return __ffs(b[2]) + 64;
821 if (b[3])
822 return __ffs(b[3]) + 96;
823 return __ffs(b[4]) + 128;
824#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -0700825#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (unlikely(b[0]))
827 return __ffs(b[0]);
828 if (unlikely(b[1]))
829 return __ffs(b[1]) + 64;
830 return __ffs(b[2]) + 128;
831#endif
832}
833
834/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 * hweightN - returns the hamming weight of a N-bit word
836 * @x: the word to weigh
837 *
838 * The Hamming Weight of a number is the total number of bits set in it.
839 */
840
841#define hweight64(x) generic_hweight64(x)
842#define hweight32(x) generic_hweight32(x)
843#define hweight16(x) generic_hweight16(x)
844#define hweight8(x) generic_hweight8(x)
845
846static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
847{
848 unsigned char *ADDR = (unsigned char *) addr;
849 int mask, retval;
850
851 ADDR += nr >> 3;
852 mask = 1 << (nr & 0x07);
853 retval = (mask & *ADDR) != 0;
854 *ADDR |= mask;
855
856 return retval;
857}
858
859static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
860{
861 unsigned char *ADDR = (unsigned char *) addr;
862 int mask, retval;
863
864 ADDR += nr >> 3;
865 mask = 1 << (nr & 0x07);
866 retval = (mask & *ADDR) != 0;
867 *ADDR &= ~mask;
868
869 return retval;
870}
871
872static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
873{
874 const unsigned char *ADDR = (const unsigned char *) addr;
875 int mask;
876
877 ADDR += nr >> 3;
878 mask = 1 << (nr & 0x07);
879
880 return ((mask & *ADDR) != 0);
881}
882
883static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
884 unsigned long size, unsigned long offset)
885{
886 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
887 unsigned long result = offset & ~SZLONG_MASK;
888 unsigned long tmp;
889
890 if (offset >= size)
891 return size;
892 size -= result;
893 offset &= SZLONG_MASK;
894 if (offset) {
895 tmp = cpu_to_lelongp(p++);
896 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
897 if (size < _MIPS_SZLONG)
898 goto found_first;
899 if (~tmp)
900 goto found_middle;
901 size -= _MIPS_SZLONG;
902 result += _MIPS_SZLONG;
903 }
904 while (size & ~SZLONG_MASK) {
905 if (~(tmp = cpu_to_lelongp(p++)))
906 goto found_middle;
907 result += _MIPS_SZLONG;
908 size -= _MIPS_SZLONG;
909 }
910 if (!size)
911 return result;
912 tmp = cpu_to_lelongp(p);
913
914found_first:
915 tmp |= ~0UL << size;
916 if (tmp == ~0UL) /* Are any bits zero? */
917 return result + size; /* Nope. */
918
919found_middle:
920 return result + ffz(tmp);
921}
922
923#define find_first_zero_le_bit(addr, size) \
924 find_next_zero_le_bit((addr), (size), 0)
925
926#define ext2_set_bit(nr,addr) \
927 __test_and_set_le_bit((nr),(unsigned long*)addr)
928#define ext2_clear_bit(nr, addr) \
929 __test_and_clear_le_bit((nr),(unsigned long*)addr)
930 #define ext2_set_bit_atomic(lock, nr, addr) \
931({ \
932 int ret; \
933 spin_lock(lock); \
934 ret = ext2_set_bit((nr), (addr)); \
935 spin_unlock(lock); \
936 ret; \
937})
938
939#define ext2_clear_bit_atomic(lock, nr, addr) \
940({ \
941 int ret; \
942 spin_lock(lock); \
943 ret = ext2_clear_bit((nr), (addr)); \
944 spin_unlock(lock); \
945 ret; \
946})
947#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
948#define ext2_find_first_zero_bit(addr, size) \
949 find_first_zero_le_bit((unsigned long*)addr, size)
950#define ext2_find_next_zero_bit(addr, size, off) \
951 find_next_zero_le_bit((unsigned long*)addr, size, off)
952
953/*
954 * Bitmap functions for the minix filesystem.
955 *
956 * FIXME: These assume that Minix uses the native byte/bitorder.
957 * This limits the Minix filesystem's value for data exchange very much.
958 */
959#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
960#define minix_set_bit(nr,addr) set_bit(nr,addr)
961#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
962#define minix_test_bit(nr,addr) test_bit(nr,addr)
963#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
964
965#endif /* __KERNEL__ */
966
967#endif /* _ASM_BITOPS_H */