Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) |
| 7 | * Copyright (c) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ |
| 9 | #ifndef _ASM_BITOPS_H |
| 10 | #define _ASM_BITOPS_H |
| 11 | |
| 12 | #include <linux/config.h> |
| 13 | #include <linux/compiler.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <asm/byteorder.h> /* sigh ... */ |
| 16 | #include <asm/cpu-features.h> |
| 17 | |
| 18 | #if (_MIPS_SZLONG == 32) |
| 19 | #define SZLONG_LOG 5 |
| 20 | #define SZLONG_MASK 31UL |
| 21 | #define __LL "ll " |
| 22 | #define __SC "sc " |
Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 23 | #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #elif (_MIPS_SZLONG == 64) |
| 25 | #define SZLONG_LOG 6 |
| 26 | #define SZLONG_MASK 63UL |
| 27 | #define __LL "lld " |
| 28 | #define __SC "scd " |
Ralf Baechle | 42a3b4f | 2005-09-03 15:56:17 -0700 | [diff] [blame] | 29 | #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #endif |
| 31 | |
| 32 | #ifdef __KERNEL__ |
| 33 | |
| 34 | #include <asm/interrupt.h> |
| 35 | #include <asm/sgidefs.h> |
| 36 | #include <asm/war.h> |
| 37 | |
| 38 | /* |
| 39 | * clear_bit() doesn't provide any barrier for the compiler. |
| 40 | */ |
| 41 | #define smp_mb__before_clear_bit() smp_mb() |
| 42 | #define smp_mb__after_clear_bit() smp_mb() |
| 43 | |
| 44 | /* |
| 45 | * Only disable interrupt for kernel mode stuff to keep usermode stuff |
| 46 | * that dares to use kernel include files alive. |
| 47 | */ |
| 48 | |
| 49 | #define __bi_flags unsigned long flags |
| 50 | #define __bi_local_irq_save(x) local_irq_save(x) |
| 51 | #define __bi_local_irq_restore(x) local_irq_restore(x) |
| 52 | #else |
| 53 | #define __bi_flags |
| 54 | #define __bi_local_irq_save(x) |
| 55 | #define __bi_local_irq_restore(x) |
| 56 | #endif /* __KERNEL__ */ |
| 57 | |
| 58 | /* |
| 59 | * set_bit - Atomically set a bit in memory |
| 60 | * @nr: the bit to set |
| 61 | * @addr: the address to start counting from |
| 62 | * |
| 63 | * This function is atomic and may not be reordered. See __set_bit() |
| 64 | * if you do not require the atomic guarantees. |
| 65 | * Note that @nr may be almost arbitrarily large; this function is not |
| 66 | * restricted to acting on a single-word quantity. |
| 67 | */ |
| 68 | static inline void set_bit(unsigned long nr, volatile unsigned long *addr) |
| 69 | { |
| 70 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 71 | unsigned long temp; |
| 72 | |
| 73 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 74 | __asm__ __volatile__( |
| 75 | "1: " __LL "%0, %1 # set_bit \n" |
| 76 | " or %0, %2 \n" |
| 77 | " "__SC "%0, %1 \n" |
| 78 | " beqzl %0, 1b \n" |
| 79 | : "=&r" (temp), "=m" (*m) |
| 80 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
| 81 | } else if (cpu_has_llsc) { |
| 82 | __asm__ __volatile__( |
| 83 | "1: " __LL "%0, %1 # set_bit \n" |
| 84 | " or %0, %2 \n" |
| 85 | " "__SC "%0, %1 \n" |
| 86 | " beqz %0, 1b \n" |
| 87 | : "=&r" (temp), "=m" (*m) |
| 88 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
| 89 | } else { |
| 90 | volatile unsigned long *a = addr; |
| 91 | unsigned long mask; |
| 92 | __bi_flags; |
| 93 | |
| 94 | a += nr >> SZLONG_LOG; |
| 95 | mask = 1UL << (nr & SZLONG_MASK); |
| 96 | __bi_local_irq_save(flags); |
| 97 | *a |= mask; |
| 98 | __bi_local_irq_restore(flags); |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | /* |
| 103 | * __set_bit - Set a bit in memory |
| 104 | * @nr: the bit to set |
| 105 | * @addr: the address to start counting from |
| 106 | * |
| 107 | * Unlike set_bit(), this function is non-atomic and may be reordered. |
| 108 | * If it's called on the same region of memory simultaneously, the effect |
| 109 | * may be that only one operation succeeds. |
| 110 | */ |
| 111 | static inline void __set_bit(unsigned long nr, volatile unsigned long * addr) |
| 112 | { |
| 113 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 114 | |
| 115 | *m |= 1UL << (nr & SZLONG_MASK); |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * clear_bit - Clears a bit in memory |
| 120 | * @nr: Bit to clear |
| 121 | * @addr: Address to start counting from |
| 122 | * |
| 123 | * clear_bit() is atomic and may not be reordered. However, it does |
| 124 | * not contain a memory barrier, so if it is used for locking purposes, |
| 125 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
| 126 | * in order to ensure changes are visible on other processors. |
| 127 | */ |
| 128 | static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) |
| 129 | { |
| 130 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 131 | unsigned long temp; |
| 132 | |
| 133 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 134 | __asm__ __volatile__( |
| 135 | "1: " __LL "%0, %1 # clear_bit \n" |
| 136 | " and %0, %2 \n" |
| 137 | " " __SC "%0, %1 \n" |
| 138 | " beqzl %0, 1b \n" |
| 139 | : "=&r" (temp), "=m" (*m) |
| 140 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); |
| 141 | } else if (cpu_has_llsc) { |
| 142 | __asm__ __volatile__( |
| 143 | "1: " __LL "%0, %1 # clear_bit \n" |
| 144 | " and %0, %2 \n" |
| 145 | " " __SC "%0, %1 \n" |
| 146 | " beqz %0, 1b \n" |
| 147 | : "=&r" (temp), "=m" (*m) |
| 148 | : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m)); |
| 149 | } else { |
| 150 | volatile unsigned long *a = addr; |
| 151 | unsigned long mask; |
| 152 | __bi_flags; |
| 153 | |
| 154 | a += nr >> SZLONG_LOG; |
| 155 | mask = 1UL << (nr & SZLONG_MASK); |
| 156 | __bi_local_irq_save(flags); |
| 157 | *a &= ~mask; |
| 158 | __bi_local_irq_restore(flags); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * __clear_bit - Clears a bit in memory |
| 164 | * @nr: Bit to clear |
| 165 | * @addr: Address to start counting from |
| 166 | * |
| 167 | * Unlike clear_bit(), this function is non-atomic and may be reordered. |
| 168 | * If it's called on the same region of memory simultaneously, the effect |
| 169 | * may be that only one operation succeeds. |
| 170 | */ |
| 171 | static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr) |
| 172 | { |
| 173 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 174 | |
| 175 | *m &= ~(1UL << (nr & SZLONG_MASK)); |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | * change_bit - Toggle a bit in memory |
| 180 | * @nr: Bit to change |
| 181 | * @addr: Address to start counting from |
| 182 | * |
| 183 | * change_bit() is atomic and may not be reordered. |
| 184 | * Note that @nr may be almost arbitrarily large; this function is not |
| 185 | * restricted to acting on a single-word quantity. |
| 186 | */ |
| 187 | static inline void change_bit(unsigned long nr, volatile unsigned long *addr) |
| 188 | { |
| 189 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 190 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 191 | unsigned long temp; |
| 192 | |
| 193 | __asm__ __volatile__( |
| 194 | "1: " __LL "%0, %1 # change_bit \n" |
| 195 | " xor %0, %2 \n" |
| 196 | " "__SC "%0, %1 \n" |
| 197 | " beqzl %0, 1b \n" |
| 198 | : "=&r" (temp), "=m" (*m) |
| 199 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
| 200 | } else if (cpu_has_llsc) { |
| 201 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 202 | unsigned long temp; |
| 203 | |
| 204 | __asm__ __volatile__( |
| 205 | "1: " __LL "%0, %1 # change_bit \n" |
| 206 | " xor %0, %2 \n" |
| 207 | " "__SC "%0, %1 \n" |
| 208 | " beqz %0, 1b \n" |
| 209 | : "=&r" (temp), "=m" (*m) |
| 210 | : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m)); |
| 211 | } else { |
| 212 | volatile unsigned long *a = addr; |
| 213 | unsigned long mask; |
| 214 | __bi_flags; |
| 215 | |
| 216 | a += nr >> SZLONG_LOG; |
| 217 | mask = 1UL << (nr & SZLONG_MASK); |
| 218 | __bi_local_irq_save(flags); |
| 219 | *a ^= mask; |
| 220 | __bi_local_irq_restore(flags); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * __change_bit - Toggle a bit in memory |
| 226 | * @nr: the bit to change |
| 227 | * @addr: the address to start counting from |
| 228 | * |
| 229 | * Unlike change_bit(), this function is non-atomic and may be reordered. |
| 230 | * If it's called on the same region of memory simultaneously, the effect |
| 231 | * may be that only one operation succeeds. |
| 232 | */ |
| 233 | static inline void __change_bit(unsigned long nr, volatile unsigned long * addr) |
| 234 | { |
| 235 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 236 | |
| 237 | *m ^= 1UL << (nr & SZLONG_MASK); |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * test_and_set_bit - Set a bit and return its old value |
| 242 | * @nr: Bit to set |
| 243 | * @addr: Address to count from |
| 244 | * |
| 245 | * This operation is atomic and cannot be reordered. |
| 246 | * It also implies a memory barrier. |
| 247 | */ |
| 248 | static inline int test_and_set_bit(unsigned long nr, |
| 249 | volatile unsigned long *addr) |
| 250 | { |
| 251 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 252 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 253 | unsigned long temp, res; |
| 254 | |
| 255 | __asm__ __volatile__( |
| 256 | "1: " __LL "%0, %1 # test_and_set_bit \n" |
| 257 | " or %2, %0, %3 \n" |
| 258 | " " __SC "%2, %1 \n" |
| 259 | " beqzl %2, 1b \n" |
| 260 | " and %2, %0, %3 \n" |
| 261 | #ifdef CONFIG_SMP |
| 262 | "sync \n" |
| 263 | #endif |
| 264 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 265 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| 266 | : "memory"); |
| 267 | |
| 268 | return res != 0; |
| 269 | } else if (cpu_has_llsc) { |
| 270 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 271 | unsigned long temp, res; |
| 272 | |
| 273 | __asm__ __volatile__( |
| 274 | " .set noreorder # test_and_set_bit \n" |
| 275 | "1: " __LL "%0, %1 \n" |
| 276 | " or %2, %0, %3 \n" |
| 277 | " " __SC "%2, %1 \n" |
| 278 | " beqz %2, 1b \n" |
| 279 | " and %2, %0, %3 \n" |
| 280 | #ifdef CONFIG_SMP |
| 281 | "sync \n" |
| 282 | #endif |
| 283 | ".set\treorder" |
| 284 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 285 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| 286 | : "memory"); |
| 287 | |
| 288 | return res != 0; |
| 289 | } else { |
| 290 | volatile unsigned long *a = addr; |
| 291 | unsigned long mask; |
| 292 | int retval; |
| 293 | __bi_flags; |
| 294 | |
| 295 | a += nr >> SZLONG_LOG; |
| 296 | mask = 1UL << (nr & SZLONG_MASK); |
| 297 | __bi_local_irq_save(flags); |
| 298 | retval = (mask & *a) != 0; |
| 299 | *a |= mask; |
| 300 | __bi_local_irq_restore(flags); |
| 301 | |
| 302 | return retval; |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | /* |
| 307 | * __test_and_set_bit - Set a bit and return its old value |
| 308 | * @nr: Bit to set |
| 309 | * @addr: Address to count from |
| 310 | * |
| 311 | * This operation is non-atomic and can be reordered. |
| 312 | * If two examples of this operation race, one can appear to succeed |
| 313 | * but actually fail. You must protect multiple accesses with a lock. |
| 314 | */ |
| 315 | static inline int __test_and_set_bit(unsigned long nr, |
| 316 | volatile unsigned long *addr) |
| 317 | { |
| 318 | volatile unsigned long *a = addr; |
| 319 | unsigned long mask; |
| 320 | int retval; |
| 321 | |
| 322 | a += nr >> SZLONG_LOG; |
| 323 | mask = 1UL << (nr & SZLONG_MASK); |
| 324 | retval = (mask & *a) != 0; |
| 325 | *a |= mask; |
| 326 | |
| 327 | return retval; |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * test_and_clear_bit - Clear a bit and return its old value |
| 332 | * @nr: Bit to clear |
| 333 | * @addr: Address to count from |
| 334 | * |
| 335 | * This operation is atomic and cannot be reordered. |
| 336 | * It also implies a memory barrier. |
| 337 | */ |
| 338 | static inline int test_and_clear_bit(unsigned long nr, |
| 339 | volatile unsigned long *addr) |
| 340 | { |
| 341 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 342 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 343 | unsigned long temp, res; |
| 344 | |
| 345 | __asm__ __volatile__( |
| 346 | "1: " __LL "%0, %1 # test_and_clear_bit \n" |
| 347 | " or %2, %0, %3 \n" |
| 348 | " xor %2, %3 \n" |
| 349 | __SC "%2, %1 \n" |
| 350 | " beqzl %2, 1b \n" |
| 351 | " and %2, %0, %3 \n" |
| 352 | #ifdef CONFIG_SMP |
| 353 | " sync \n" |
| 354 | #endif |
| 355 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 356 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| 357 | : "memory"); |
| 358 | |
| 359 | return res != 0; |
| 360 | } else if (cpu_has_llsc) { |
| 361 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 362 | unsigned long temp, res; |
| 363 | |
| 364 | __asm__ __volatile__( |
| 365 | " .set noreorder # test_and_clear_bit \n" |
| 366 | "1: " __LL "%0, %1 \n" |
| 367 | " or %2, %0, %3 \n" |
| 368 | " xor %2, %3 \n" |
| 369 | __SC "%2, %1 \n" |
| 370 | " beqz %2, 1b \n" |
| 371 | " and %2, %0, %3 \n" |
| 372 | #ifdef CONFIG_SMP |
| 373 | " sync \n" |
| 374 | #endif |
| 375 | " .set reorder \n" |
| 376 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 377 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| 378 | : "memory"); |
| 379 | |
| 380 | return res != 0; |
| 381 | } else { |
| 382 | volatile unsigned long *a = addr; |
| 383 | unsigned long mask; |
| 384 | int retval; |
| 385 | __bi_flags; |
| 386 | |
| 387 | a += nr >> SZLONG_LOG; |
| 388 | mask = 1UL << (nr & SZLONG_MASK); |
| 389 | __bi_local_irq_save(flags); |
| 390 | retval = (mask & *a) != 0; |
| 391 | *a &= ~mask; |
| 392 | __bi_local_irq_restore(flags); |
| 393 | |
| 394 | return retval; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * __test_and_clear_bit - Clear a bit and return its old value |
| 400 | * @nr: Bit to clear |
| 401 | * @addr: Address to count from |
| 402 | * |
| 403 | * This operation is non-atomic and can be reordered. |
| 404 | * If two examples of this operation race, one can appear to succeed |
| 405 | * but actually fail. You must protect multiple accesses with a lock. |
| 406 | */ |
| 407 | static inline int __test_and_clear_bit(unsigned long nr, |
| 408 | volatile unsigned long * addr) |
| 409 | { |
| 410 | volatile unsigned long *a = addr; |
| 411 | unsigned long mask; |
| 412 | int retval; |
| 413 | |
| 414 | a += (nr >> SZLONG_LOG); |
| 415 | mask = 1UL << (nr & SZLONG_MASK); |
| 416 | retval = ((mask & *a) != 0); |
| 417 | *a &= ~mask; |
| 418 | |
| 419 | return retval; |
| 420 | } |
| 421 | |
| 422 | /* |
| 423 | * test_and_change_bit - Change a bit and return its old value |
| 424 | * @nr: Bit to change |
| 425 | * @addr: Address to count from |
| 426 | * |
| 427 | * This operation is atomic and cannot be reordered. |
| 428 | * It also implies a memory barrier. |
| 429 | */ |
| 430 | static inline int test_and_change_bit(unsigned long nr, |
| 431 | volatile unsigned long *addr) |
| 432 | { |
| 433 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 434 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 435 | unsigned long temp, res; |
| 436 | |
| 437 | __asm__ __volatile__( |
| 438 | "1: " __LL " %0, %1 # test_and_change_bit \n" |
| 439 | " xor %2, %0, %3 \n" |
| 440 | " "__SC "%2, %1 \n" |
| 441 | " beqzl %2, 1b \n" |
| 442 | " and %2, %0, %3 \n" |
| 443 | #ifdef CONFIG_SMP |
| 444 | " sync \n" |
| 445 | #endif |
| 446 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 447 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| 448 | : "memory"); |
| 449 | |
| 450 | return res != 0; |
| 451 | } else if (cpu_has_llsc) { |
| 452 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
| 453 | unsigned long temp, res; |
| 454 | |
| 455 | __asm__ __volatile__( |
| 456 | " .set noreorder # test_and_change_bit \n" |
| 457 | "1: " __LL " %0, %1 \n" |
| 458 | " xor %2, %0, %3 \n" |
| 459 | " "__SC "\t%2, %1 \n" |
| 460 | " beqz %2, 1b \n" |
| 461 | " and %2, %0, %3 \n" |
| 462 | #ifdef CONFIG_SMP |
| 463 | " sync \n" |
| 464 | #endif |
| 465 | " .set reorder \n" |
| 466 | : "=&r" (temp), "=m" (*m), "=&r" (res) |
| 467 | : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) |
| 468 | : "memory"); |
| 469 | |
| 470 | return res != 0; |
| 471 | } else { |
| 472 | volatile unsigned long *a = addr; |
| 473 | unsigned long mask, retval; |
| 474 | __bi_flags; |
| 475 | |
| 476 | a += nr >> SZLONG_LOG; |
| 477 | mask = 1UL << (nr & SZLONG_MASK); |
| 478 | __bi_local_irq_save(flags); |
| 479 | retval = (mask & *a) != 0; |
| 480 | *a ^= mask; |
| 481 | __bi_local_irq_restore(flags); |
| 482 | |
| 483 | return retval; |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | /* |
| 488 | * __test_and_change_bit - Change a bit and return its old value |
| 489 | * @nr: Bit to change |
| 490 | * @addr: Address to count from |
| 491 | * |
| 492 | * This operation is non-atomic and can be reordered. |
| 493 | * If two examples of this operation race, one can appear to succeed |
| 494 | * but actually fail. You must protect multiple accesses with a lock. |
| 495 | */ |
| 496 | static inline int __test_and_change_bit(unsigned long nr, |
| 497 | volatile unsigned long *addr) |
| 498 | { |
| 499 | volatile unsigned long *a = addr; |
| 500 | unsigned long mask; |
| 501 | int retval; |
| 502 | |
| 503 | a += (nr >> SZLONG_LOG); |
| 504 | mask = 1UL << (nr & SZLONG_MASK); |
| 505 | retval = ((mask & *a) != 0); |
| 506 | *a ^= mask; |
| 507 | |
| 508 | return retval; |
| 509 | } |
| 510 | |
| 511 | #undef __bi_flags |
| 512 | #undef __bi_local_irq_save |
| 513 | #undef __bi_local_irq_restore |
| 514 | |
| 515 | /* |
| 516 | * test_bit - Determine whether a bit is set |
| 517 | * @nr: bit number to test |
| 518 | * @addr: Address to start counting from |
| 519 | */ |
| 520 | static inline int test_bit(unsigned long nr, const volatile unsigned long *addr) |
| 521 | { |
| 522 | return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK)); |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * ffz - find first zero in word. |
| 527 | * @word: The word to search |
| 528 | * |
| 529 | * Undefined if no zero exists, so code should check against ~0UL first. |
| 530 | */ |
| 531 | static inline unsigned long ffz(unsigned long word) |
| 532 | { |
| 533 | int b = 0, s; |
| 534 | |
| 535 | word = ~word; |
Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 536 | #ifdef CONFIG_32BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s; |
| 538 | s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s; |
| 539 | s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s; |
| 540 | s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s; |
| 541 | s = 1; if (word << 31 != 0) s = 0; b += s; |
| 542 | #endif |
Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 543 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s; |
| 545 | s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s; |
| 546 | s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s; |
| 547 | s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s; |
| 548 | s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s; |
| 549 | s = 1; if (word << 63 != 0) s = 0; b += s; |
| 550 | #endif |
| 551 | |
| 552 | return b; |
| 553 | } |
| 554 | |
| 555 | /* |
| 556 | * __ffs - find first bit in word. |
| 557 | * @word: The word to search |
| 558 | * |
| 559 | * Undefined if no bit exists, so code should check against 0 first. |
| 560 | */ |
| 561 | static inline unsigned long __ffs(unsigned long word) |
| 562 | { |
| 563 | return ffz(~word); |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * fls: find last bit set. |
| 568 | */ |
| 569 | |
| 570 | #define fls(x) generic_fls(x) |
| 571 | |
| 572 | /* |
| 573 | * find_next_zero_bit - find the first zero bit in a memory region |
| 574 | * @addr: The address to base the search on |
| 575 | * @offset: The bitnumber to start searching at |
| 576 | * @size: The maximum size to search |
| 577 | */ |
| 578 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, |
| 579 | unsigned long size, unsigned long offset) |
| 580 | { |
| 581 | const unsigned long *p = addr + (offset >> SZLONG_LOG); |
| 582 | unsigned long result = offset & ~SZLONG_MASK; |
| 583 | unsigned long tmp; |
| 584 | |
| 585 | if (offset >= size) |
| 586 | return size; |
| 587 | size -= result; |
| 588 | offset &= SZLONG_MASK; |
| 589 | if (offset) { |
| 590 | tmp = *(p++); |
| 591 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); |
| 592 | if (size < _MIPS_SZLONG) |
| 593 | goto found_first; |
| 594 | if (~tmp) |
| 595 | goto found_middle; |
| 596 | size -= _MIPS_SZLONG; |
| 597 | result += _MIPS_SZLONG; |
| 598 | } |
| 599 | while (size & ~SZLONG_MASK) { |
| 600 | if (~(tmp = *(p++))) |
| 601 | goto found_middle; |
| 602 | result += _MIPS_SZLONG; |
| 603 | size -= _MIPS_SZLONG; |
| 604 | } |
| 605 | if (!size) |
| 606 | return result; |
| 607 | tmp = *p; |
| 608 | |
| 609 | found_first: |
| 610 | tmp |= ~0UL << size; |
| 611 | if (tmp == ~0UL) /* Are any bits zero? */ |
| 612 | return result + size; /* Nope. */ |
| 613 | found_middle: |
| 614 | return result + ffz(tmp); |
| 615 | } |
| 616 | |
| 617 | #define find_first_zero_bit(addr, size) \ |
| 618 | find_next_zero_bit((addr), (size), 0) |
| 619 | |
| 620 | /* |
| 621 | * find_next_bit - find the next set bit in a memory region |
| 622 | * @addr: The address to base the search on |
| 623 | * @offset: The bitnumber to start searching at |
| 624 | * @size: The maximum size to search |
| 625 | */ |
| 626 | static inline unsigned long find_next_bit(const unsigned long *addr, |
| 627 | unsigned long size, unsigned long offset) |
| 628 | { |
| 629 | const unsigned long *p = addr + (offset >> SZLONG_LOG); |
| 630 | unsigned long result = offset & ~SZLONG_MASK; |
| 631 | unsigned long tmp; |
| 632 | |
| 633 | if (offset >= size) |
| 634 | return size; |
| 635 | size -= result; |
| 636 | offset &= SZLONG_MASK; |
| 637 | if (offset) { |
| 638 | tmp = *(p++); |
| 639 | tmp &= ~0UL << offset; |
| 640 | if (size < _MIPS_SZLONG) |
| 641 | goto found_first; |
| 642 | if (tmp) |
| 643 | goto found_middle; |
| 644 | size -= _MIPS_SZLONG; |
| 645 | result += _MIPS_SZLONG; |
| 646 | } |
| 647 | while (size & ~SZLONG_MASK) { |
| 648 | if ((tmp = *(p++))) |
| 649 | goto found_middle; |
| 650 | result += _MIPS_SZLONG; |
| 651 | size -= _MIPS_SZLONG; |
| 652 | } |
| 653 | if (!size) |
| 654 | return result; |
| 655 | tmp = *p; |
| 656 | |
| 657 | found_first: |
| 658 | tmp &= ~0UL >> (_MIPS_SZLONG - size); |
| 659 | if (tmp == 0UL) /* Are any bits set? */ |
| 660 | return result + size; /* Nope. */ |
| 661 | found_middle: |
| 662 | return result + __ffs(tmp); |
| 663 | } |
| 664 | |
| 665 | /* |
| 666 | * find_first_bit - find the first set bit in a memory region |
| 667 | * @addr: The address to start the search at |
| 668 | * @size: The maximum size to search |
| 669 | * |
| 670 | * Returns the bit-number of the first set bit, not the number of the byte |
| 671 | * containing a bit. |
| 672 | */ |
| 673 | #define find_first_bit(addr, size) \ |
| 674 | find_next_bit((addr), (size), 0) |
| 675 | |
| 676 | #ifdef __KERNEL__ |
| 677 | |
| 678 | /* |
| 679 | * Every architecture must define this function. It's the fastest |
| 680 | * way of searching a 140-bit bitmap where the first 100 bits are |
| 681 | * unlikely to be set. It's guaranteed that at least one of the 140 |
| 682 | * bits is cleared. |
| 683 | */ |
| 684 | static inline int sched_find_first_bit(const unsigned long *b) |
| 685 | { |
Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 686 | #ifdef CONFIG_32BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | if (unlikely(b[0])) |
| 688 | return __ffs(b[0]); |
| 689 | if (unlikely(b[1])) |
| 690 | return __ffs(b[1]) + 32; |
| 691 | if (unlikely(b[2])) |
| 692 | return __ffs(b[2]) + 64; |
| 693 | if (b[3]) |
| 694 | return __ffs(b[3]) + 96; |
| 695 | return __ffs(b[4]) + 128; |
| 696 | #endif |
Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 697 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | if (unlikely(b[0])) |
| 699 | return __ffs(b[0]); |
| 700 | if (unlikely(b[1])) |
| 701 | return __ffs(b[1]) + 64; |
| 702 | return __ffs(b[2]) + 128; |
| 703 | #endif |
| 704 | } |
| 705 | |
| 706 | /* |
| 707 | * ffs - find first bit set |
| 708 | * @x: the word to search |
| 709 | * |
| 710 | * This is defined the same way as |
| 711 | * the libc and compiler builtin ffs routines, therefore |
| 712 | * differs in spirit from the above ffz (man ffs). |
| 713 | */ |
| 714 | |
| 715 | #define ffs(x) generic_ffs(x) |
| 716 | |
| 717 | /* |
| 718 | * hweightN - returns the hamming weight of a N-bit word |
| 719 | * @x: the word to weigh |
| 720 | * |
| 721 | * The Hamming Weight of a number is the total number of bits set in it. |
| 722 | */ |
| 723 | |
| 724 | #define hweight64(x) generic_hweight64(x) |
| 725 | #define hweight32(x) generic_hweight32(x) |
| 726 | #define hweight16(x) generic_hweight16(x) |
| 727 | #define hweight8(x) generic_hweight8(x) |
| 728 | |
| 729 | static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr) |
| 730 | { |
| 731 | unsigned char *ADDR = (unsigned char *) addr; |
| 732 | int mask, retval; |
| 733 | |
| 734 | ADDR += nr >> 3; |
| 735 | mask = 1 << (nr & 0x07); |
| 736 | retval = (mask & *ADDR) != 0; |
| 737 | *ADDR |= mask; |
| 738 | |
| 739 | return retval; |
| 740 | } |
| 741 | |
| 742 | static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr) |
| 743 | { |
| 744 | unsigned char *ADDR = (unsigned char *) addr; |
| 745 | int mask, retval; |
| 746 | |
| 747 | ADDR += nr >> 3; |
| 748 | mask = 1 << (nr & 0x07); |
| 749 | retval = (mask & *ADDR) != 0; |
| 750 | *ADDR &= ~mask; |
| 751 | |
| 752 | return retval; |
| 753 | } |
| 754 | |
| 755 | static inline int test_le_bit(unsigned long nr, const unsigned long * addr) |
| 756 | { |
| 757 | const unsigned char *ADDR = (const unsigned char *) addr; |
| 758 | int mask; |
| 759 | |
| 760 | ADDR += nr >> 3; |
| 761 | mask = 1 << (nr & 0x07); |
| 762 | |
| 763 | return ((mask & *ADDR) != 0); |
| 764 | } |
| 765 | |
| 766 | static inline unsigned long find_next_zero_le_bit(unsigned long *addr, |
| 767 | unsigned long size, unsigned long offset) |
| 768 | { |
| 769 | unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG); |
| 770 | unsigned long result = offset & ~SZLONG_MASK; |
| 771 | unsigned long tmp; |
| 772 | |
| 773 | if (offset >= size) |
| 774 | return size; |
| 775 | size -= result; |
| 776 | offset &= SZLONG_MASK; |
| 777 | if (offset) { |
| 778 | tmp = cpu_to_lelongp(p++); |
| 779 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */ |
| 780 | if (size < _MIPS_SZLONG) |
| 781 | goto found_first; |
| 782 | if (~tmp) |
| 783 | goto found_middle; |
| 784 | size -= _MIPS_SZLONG; |
| 785 | result += _MIPS_SZLONG; |
| 786 | } |
| 787 | while (size & ~SZLONG_MASK) { |
| 788 | if (~(tmp = cpu_to_lelongp(p++))) |
| 789 | goto found_middle; |
| 790 | result += _MIPS_SZLONG; |
| 791 | size -= _MIPS_SZLONG; |
| 792 | } |
| 793 | if (!size) |
| 794 | return result; |
| 795 | tmp = cpu_to_lelongp(p); |
| 796 | |
| 797 | found_first: |
| 798 | tmp |= ~0UL << size; |
| 799 | if (tmp == ~0UL) /* Are any bits zero? */ |
| 800 | return result + size; /* Nope. */ |
| 801 | |
| 802 | found_middle: |
| 803 | return result + ffz(tmp); |
| 804 | } |
| 805 | |
| 806 | #define find_first_zero_le_bit(addr, size) \ |
| 807 | find_next_zero_le_bit((addr), (size), 0) |
| 808 | |
| 809 | #define ext2_set_bit(nr,addr) \ |
| 810 | __test_and_set_le_bit((nr),(unsigned long*)addr) |
| 811 | #define ext2_clear_bit(nr, addr) \ |
| 812 | __test_and_clear_le_bit((nr),(unsigned long*)addr) |
| 813 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 814 | ({ \ |
| 815 | int ret; \ |
| 816 | spin_lock(lock); \ |
| 817 | ret = ext2_set_bit((nr), (addr)); \ |
| 818 | spin_unlock(lock); \ |
| 819 | ret; \ |
| 820 | }) |
| 821 | |
| 822 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 823 | ({ \ |
| 824 | int ret; \ |
| 825 | spin_lock(lock); \ |
| 826 | ret = ext2_clear_bit((nr), (addr)); \ |
| 827 | spin_unlock(lock); \ |
| 828 | ret; \ |
| 829 | }) |
| 830 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) |
| 831 | #define ext2_find_first_zero_bit(addr, size) \ |
| 832 | find_first_zero_le_bit((unsigned long*)addr, size) |
| 833 | #define ext2_find_next_zero_bit(addr, size, off) \ |
| 834 | find_next_zero_le_bit((unsigned long*)addr, size, off) |
| 835 | |
| 836 | /* |
| 837 | * Bitmap functions for the minix filesystem. |
| 838 | * |
| 839 | * FIXME: These assume that Minix uses the native byte/bitorder. |
| 840 | * This limits the Minix filesystem's value for data exchange very much. |
| 841 | */ |
| 842 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) |
| 843 | #define minix_set_bit(nr,addr) set_bit(nr,addr) |
| 844 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) |
| 845 | #define minix_test_bit(nr,addr) test_bit(nr,addr) |
| 846 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) |
| 847 | |
| 848 | #endif /* __KERNEL__ */ |
| 849 | |
| 850 | #endif /* _ASM_BITOPS_H */ |