blob: 1bb89c5a10ee65d52dc34f4b2e40763fd6c42537 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/compiler.h>
13#include <linux/types.h>
Ralf Baechleec917c2c2005-10-07 16:58:15 +010014#include <asm/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/byteorder.h> /* sigh ... */
16#include <asm/cpu-features.h>
17
18#if (_MIPS_SZLONG == 32)
19#define SZLONG_LOG 5
20#define SZLONG_MASK 31UL
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000021#define __LL "ll "
22#define __SC "sc "
Ralf Baechle42a3b4f2005-09-03 15:56:17 -070023#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#elif (_MIPS_SZLONG == 64)
25#define SZLONG_LOG 6
26#define SZLONG_MASK 63UL
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000027#define __LL "lld "
28#define __SC "scd "
Ralf Baechle42a3b4f2005-09-03 15:56:17 -070029#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#endif
31
32#ifdef __KERNEL__
33
Ralf Baechle192ef362006-07-07 14:07:18 +010034#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/sgidefs.h>
36#include <asm/war.h>
37
38/*
39 * clear_bit() doesn't provide any barrier for the compiler.
40 */
41#define smp_mb__before_clear_bit() smp_mb()
42#define smp_mb__after_clear_bit() smp_mb()
43
44/*
45 * Only disable interrupt for kernel mode stuff to keep usermode stuff
46 * that dares to use kernel include files alive.
47 */
48
49#define __bi_flags unsigned long flags
50#define __bi_local_irq_save(x) local_irq_save(x)
51#define __bi_local_irq_restore(x) local_irq_restore(x)
52#else
53#define __bi_flags
54#define __bi_local_irq_save(x)
55#define __bi_local_irq_restore(x)
56#endif /* __KERNEL__ */
57
58/*
59 * set_bit - Atomically set a bit in memory
60 * @nr: the bit to set
61 * @addr: the address to start counting from
62 *
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
67 */
68static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
69{
70 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
71 unsigned long temp;
72
73 if (cpu_has_llsc && R10000_LLSC_WAR) {
74 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000075 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 "1: " __LL "%0, %1 # set_bit \n"
77 " or %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000078 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000080 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 : "=&r" (temp), "=m" (*m)
82 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
83 } else if (cpu_has_llsc) {
84 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +000085 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 "1: " __LL "%0, %1 # set_bit \n"
87 " or %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000088 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +000090 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 : "=&r" (temp), "=m" (*m)
92 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
93 } else {
94 volatile unsigned long *a = addr;
95 unsigned long mask;
96 __bi_flags;
97
98 a += nr >> SZLONG_LOG;
99 mask = 1UL << (nr & SZLONG_MASK);
100 __bi_local_irq_save(flags);
101 *a |= mask;
102 __bi_local_irq_restore(flags);
103 }
104}
105
106/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * clear_bit - Clears a bit in memory
108 * @nr: Bit to clear
109 * @addr: Address to start counting from
110 *
111 * clear_bit() is atomic and may not be reordered. However, it does
112 * not contain a memory barrier, so if it is used for locking purposes,
113 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
114 * in order to ensure changes are visible on other processors.
115 */
116static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
117{
118 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119 unsigned long temp;
120
121 if (cpu_has_llsc && R10000_LLSC_WAR) {
122 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000123 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 "1: " __LL "%0, %1 # clear_bit \n"
125 " and %0, %2 \n"
126 " " __SC "%0, %1 \n"
127 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000128 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 : "=&r" (temp), "=m" (*m)
130 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
131 } else if (cpu_has_llsc) {
132 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000133 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 "1: " __LL "%0, %1 # clear_bit \n"
135 " and %0, %2 \n"
136 " " __SC "%0, %1 \n"
137 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000138 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 : "=&r" (temp), "=m" (*m)
140 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
141 } else {
142 volatile unsigned long *a = addr;
143 unsigned long mask;
144 __bi_flags;
145
146 a += nr >> SZLONG_LOG;
147 mask = 1UL << (nr & SZLONG_MASK);
148 __bi_local_irq_save(flags);
149 *a &= ~mask;
150 __bi_local_irq_restore(flags);
151 }
152}
153
154/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * change_bit - Toggle a bit in memory
156 * @nr: Bit to change
157 * @addr: Address to start counting from
158 *
159 * change_bit() is atomic and may not be reordered.
160 * Note that @nr may be almost arbitrarily large; this function is not
161 * restricted to acting on a single-word quantity.
162 */
163static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
164{
165 if (cpu_has_llsc && R10000_LLSC_WAR) {
166 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
167 unsigned long temp;
168
169 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000170 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 "1: " __LL "%0, %1 # change_bit \n"
172 " xor %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000173 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 " beqzl %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000175 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 : "=&r" (temp), "=m" (*m)
177 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
178 } else if (cpu_has_llsc) {
179 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
180 unsigned long temp;
181
182 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000183 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 "1: " __LL "%0, %1 # change_bit \n"
185 " xor %0, %2 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000186 " " __SC "%0, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 " beqz %0, 1b \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000188 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 : "=&r" (temp), "=m" (*m)
190 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
191 } else {
192 volatile unsigned long *a = addr;
193 unsigned long mask;
194 __bi_flags;
195
196 a += nr >> SZLONG_LOG;
197 mask = 1UL << (nr & SZLONG_MASK);
198 __bi_local_irq_save(flags);
199 *a ^= mask;
200 __bi_local_irq_restore(flags);
201 }
202}
203
204/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * test_and_set_bit - Set a bit and return its old value
206 * @nr: Bit to set
207 * @addr: Address to count from
208 *
209 * This operation is atomic and cannot be reordered.
210 * It also implies a memory barrier.
211 */
212static inline int test_and_set_bit(unsigned long nr,
213 volatile unsigned long *addr)
214{
215 if (cpu_has_llsc && R10000_LLSC_WAR) {
216 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
217 unsigned long temp, res;
218
219 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000220 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 "1: " __LL "%0, %1 # test_and_set_bit \n"
222 " or %2, %0, %3 \n"
223 " " __SC "%2, %1 \n"
224 " beqzl %2, 1b \n"
225 " and %2, %0, %3 \n"
226#ifdef CONFIG_SMP
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000227 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000229 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 : "=&r" (temp), "=m" (*m), "=&r" (res)
231 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
232 : "memory");
233
234 return res != 0;
235 } else if (cpu_has_llsc) {
236 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
237 unsigned long temp, res;
238
239 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000240 " .set push \n"
241 " .set noreorder \n"
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000242 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000243 "1: " __LL "%0, %1 # test_and_set_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 " or %2, %0, %3 \n"
245 " " __SC "%2, %1 \n"
246 " beqz %2, 1b \n"
247 " and %2, %0, %3 \n"
248#ifdef CONFIG_SMP
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000249 " sync \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000251 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 : "=&r" (temp), "=m" (*m), "=&r" (res)
253 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
254 : "memory");
255
256 return res != 0;
257 } else {
258 volatile unsigned long *a = addr;
259 unsigned long mask;
260 int retval;
261 __bi_flags;
262
263 a += nr >> SZLONG_LOG;
264 mask = 1UL << (nr & SZLONG_MASK);
265 __bi_local_irq_save(flags);
266 retval = (mask & *a) != 0;
267 *a |= mask;
268 __bi_local_irq_restore(flags);
269
270 return retval;
271 }
272}
273
274/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * test_and_clear_bit - Clear a bit and return its old value
276 * @nr: Bit to clear
277 * @addr: Address to count from
278 *
279 * This operation is atomic and cannot be reordered.
280 * It also implies a memory barrier.
281 */
282static inline int test_and_clear_bit(unsigned long nr,
283 volatile unsigned long *addr)
284{
285 if (cpu_has_llsc && R10000_LLSC_WAR) {
286 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
287 unsigned long temp, res;
288
289 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000290 " .set mips3 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 "1: " __LL "%0, %1 # test_and_clear_bit \n"
292 " or %2, %0, %3 \n"
293 " xor %2, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000294 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 " beqzl %2, 1b \n"
296 " and %2, %0, %3 \n"
297#ifdef CONFIG_SMP
298 " sync \n"
299#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000300 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 : "=&r" (temp), "=m" (*m), "=&r" (res)
302 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
303 : "memory");
304
305 return res != 0;
306 } else if (cpu_has_llsc) {
307 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
308 unsigned long temp, res;
309
310 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000311 " .set push \n"
312 " .set noreorder \n"
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000313 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000314 "1: " __LL "%0, %1 # test_and_clear_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 " or %2, %0, %3 \n"
316 " xor %2, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000317 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 " beqz %2, 1b \n"
319 " and %2, %0, %3 \n"
320#ifdef CONFIG_SMP
321 " sync \n"
322#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000323 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 : "=&r" (temp), "=m" (*m), "=&r" (res)
325 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
326 : "memory");
327
328 return res != 0;
329 } else {
330 volatile unsigned long *a = addr;
331 unsigned long mask;
332 int retval;
333 __bi_flags;
334
335 a += nr >> SZLONG_LOG;
336 mask = 1UL << (nr & SZLONG_MASK);
337 __bi_local_irq_save(flags);
338 retval = (mask & *a) != 0;
339 *a &= ~mask;
340 __bi_local_irq_restore(flags);
341
342 return retval;
343 }
344}
345
346/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 * test_and_change_bit - Change a bit and return its old value
348 * @nr: Bit to change
349 * @addr: Address to count from
350 *
351 * This operation is atomic and cannot be reordered.
352 * It also implies a memory barrier.
353 */
354static inline int test_and_change_bit(unsigned long nr,
355 volatile unsigned long *addr)
356{
357 if (cpu_has_llsc && R10000_LLSC_WAR) {
358 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
359 unsigned long temp, res;
360
361 __asm__ __volatile__(
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000362 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000363 "1: " __LL "%0, %1 # test_and_change_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 " xor %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000365 " " __SC "%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 " beqzl %2, 1b \n"
367 " and %2, %0, %3 \n"
368#ifdef CONFIG_SMP
369 " sync \n"
370#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000371 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 : "=&r" (temp), "=m" (*m), "=&r" (res)
373 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
374 : "memory");
375
376 return res != 0;
377 } else if (cpu_has_llsc) {
378 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379 unsigned long temp, res;
380
381 __asm__ __volatile__(
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000382 " .set push \n"
383 " .set noreorder \n"
Maciej W. Rozyckic4559f62005-06-23 15:57:15 +0000384 " .set mips3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000385 "1: " __LL "%0, %1 # test_and_change_bit \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 " xor %2, %0, %3 \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000387 " " __SC "\t%2, %1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 " beqz %2, 1b \n"
389 " and %2, %0, %3 \n"
390#ifdef CONFIG_SMP
391 " sync \n"
392#endif
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000393 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 : "=&r" (temp), "=m" (*m), "=&r" (res)
395 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
396 : "memory");
397
398 return res != 0;
399 } else {
400 volatile unsigned long *a = addr;
401 unsigned long mask, retval;
402 __bi_flags;
403
404 a += nr >> SZLONG_LOG;
405 mask = 1UL << (nr & SZLONG_MASK);
406 __bi_local_irq_save(flags);
407 retval = (mask & *a) != 0;
408 *a ^= mask;
409 __bi_local_irq_restore(flags);
410
411 return retval;
412 }
413}
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415#undef __bi_flags
416#undef __bi_local_irq_save
417#undef __bi_local_irq_restore
418
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800419#include <asm-generic/bitops/non-atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Ralf Baechle65903262005-07-12 12:50:30 +0000421/*
422 * Return the bit position (0..63) of the most significant 1 bit in a word
423 * Returns -1 if no 1 bit exists
424 */
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100425static inline int __ilog2(unsigned long x)
Ralf Baechle65903262005-07-12 12:50:30 +0000426{
427 int lz;
428
Ralf Baechleec917c2c2005-10-07 16:58:15 +0100429 if (sizeof(x) == 4) {
430 __asm__ (
431 " .set push \n"
432 " .set mips32 \n"
433 " clz %0, %1 \n"
434 " .set pop \n"
435 : "=r" (lz)
436 : "r" (x));
437
438 return 31 - lz;
439 }
440
441 BUG_ON(sizeof(x) != 8);
442
Ralf Baechle65903262005-07-12 12:50:30 +0000443 __asm__ (
444 " .set push \n"
445 " .set mips64 \n"
446 " dclz %0, %1 \n"
447 " .set pop \n"
448 : "=r" (lz)
449 : "r" (x));
450
451 return 63 - lz;
452}
Ralf Baechle65903262005-07-12 12:50:30 +0000453
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800454#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
455
Ralf Baechle65903262005-07-12 12:50:30 +0000456/*
457 * __ffs - find first bit in word.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 * @word: The word to search
459 *
Ralf Baechle65903262005-07-12 12:50:30 +0000460 * Returns 0..SZLONG-1
461 * Undefined if no bit exists, so code should check against 0 first.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 */
Ralf Baechle65903262005-07-12 12:50:30 +0000463static inline unsigned long __ffs(unsigned long word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464{
Ralf Baechle65903262005-07-12 12:50:30 +0000465 return __ilog2(word & -word);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
468/*
Atsushi Nemotobc818242006-04-17 21:19:12 +0900469 * fls - find last bit set.
470 * @word: The word to search
471 *
472 * This is defined the same way as ffs.
473 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
474 */
475static inline int fls(int word)
476{
477 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
478
479 return 32 - word;
480}
481
482#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
483static inline int fls64(__u64 word)
484{
485 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
486
487 return 64 - word;
488}
489#else
490#include <asm-generic/bitops/fls64.h>
491#endif
492
493/*
Ralf Baechle65903262005-07-12 12:50:30 +0000494 * ffs - find first bit set.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 * @word: The word to search
496 *
Atsushi Nemotobc818242006-04-17 21:19:12 +0900497 * This is defined the same way as
498 * the libc and compiler builtin ffs routines, therefore
499 * differs in spirit from the above ffz (man ffs).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 */
Atsushi Nemotobc818242006-04-17 21:19:12 +0900501static inline int ffs(int word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
Ralf Baechle65903262005-07-12 12:50:30 +0000503 if (!word)
504 return 0;
505
Atsushi Nemotobc818242006-04-17 21:19:12 +0900506 return fls(word & -word);
Ralf Baechle65903262005-07-12 12:50:30 +0000507}
Ralf Baechle2caf1902006-01-30 17:14:41 +0000508
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800509#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800511#include <asm-generic/bitops/__ffs.h>
512#include <asm-generic/bitops/ffs.h>
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800513#include <asm-generic/bitops/fls.h>
Atsushi Nemotobc818242006-04-17 21:19:12 +0900514#include <asm-generic/bitops/fls64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800516#endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Atsushi Nemotobc818242006-04-17 21:19:12 +0900518#include <asm-generic/bitops/ffz.h>
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800519#include <asm-generic/bitops/find.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521#ifdef __KERNEL__
522
Akinobu Mita3c9ee7e2006-03-26 01:39:30 -0800523#include <asm-generic/bitops/sched.h>
524#include <asm-generic/bitops/hweight.h>
525#include <asm-generic/bitops/ext2-non-atomic.h>
526#include <asm-generic/bitops/ext2-atomic.h>
527#include <asm-generic/bitops/minix.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529#endif /* __KERNEL__ */
530
531#endif /* _ASM_BITOPS_H */