| #ifndef _LINUX_BITOPS_H |
| #define _LINUX_BITOPS_H |
| #include <asm/types.h> |
| #include <linux/bits.h> |
| |
| #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) |
| #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) |
| |
| extern unsigned int __sw_hweight8(unsigned int w); |
| extern unsigned int __sw_hweight16(unsigned int w); |
| extern unsigned int __sw_hweight32(unsigned int w); |
| extern unsigned long __sw_hweight64(__u64 w); |
| |
| /* |
| * Include this here because some architectures need generic_ffs/fls in |
| * scope |
| */ |
| #include <asm/bitops.h> |
| |
| #define for_each_set_bit(bit, addr, size) \ |
| for ((bit) = find_first_bit((addr), (size)); \ |
| (bit) < (size); \ |
| (bit) = find_next_bit((addr), (size), (bit) + 1)) |
| |
| /* same as for_each_set_bit() but use bit as value to start with */ |
| #define for_each_set_bit_from(bit, addr, size) \ |
| for ((bit) = find_next_bit((addr), (size), (bit)); \ |
| (bit) < (size); \ |
| (bit) = find_next_bit((addr), (size), (bit) + 1)) |
| |
| #define for_each_clear_bit(bit, addr, size) \ |
| for ((bit) = find_first_zero_bit((addr), (size)); \ |
| (bit) < (size); \ |
| (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) |
| |
| /* same as for_each_clear_bit() but use bit as value to start with */ |
| #define for_each_clear_bit_from(bit, addr, size) \ |
| for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ |
| (bit) < (size); \ |
| (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) |
| |
| static inline int get_bitmask_order(unsigned int count) |
| { |
| int order; |
| |
| order = fls(count); |
| return order; /* We could be slightly more clever with -1 here... */ |
| } |
| |
| static __always_inline unsigned long hweight_long(unsigned long w) |
| { |
| return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
| } |
| |
| /** |
| * rol64 - rotate a 64-bit value left |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u64 rol64(__u64 word, unsigned int shift) |
| { |
| return (word << (shift & 63)) | (word >> ((-shift) & 63)); |
| } |
| |
| /** |
| * ror64 - rotate a 64-bit value right |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u64 ror64(__u64 word, unsigned int shift) |
| { |
| return (word >> (shift & 63)) | (word << ((-shift) & 63)); |
| } |
| |
| /** |
| * rol32 - rotate a 32-bit value left |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u32 rol32(__u32 word, unsigned int shift) |
| { |
| return (word << (shift & 31)) | (word >> ((-shift) & 31)); |
| } |
| |
| /** |
| * ror32 - rotate a 32-bit value right |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u32 ror32(__u32 word, unsigned int shift) |
| { |
| return (word >> (shift & 31)) | (word << ((-shift) & 31)); |
| } |
| |
| /** |
| * rol16 - rotate a 16-bit value left |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u16 rol16(__u16 word, unsigned int shift) |
| { |
| return (word << (shift & 15)) | (word >> ((-shift) & 15)); |
| } |
| |
| /** |
| * ror16 - rotate a 16-bit value right |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u16 ror16(__u16 word, unsigned int shift) |
| { |
| return (word >> (shift & 15)) | (word << ((-shift) & 15)); |
| } |
| |
| /** |
| * rol8 - rotate an 8-bit value left |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u8 rol8(__u8 word, unsigned int shift) |
| { |
| return (word << (shift & 7)) | (word >> ((-shift) & 7)); |
| } |
| |
| /** |
| * ror8 - rotate an 8-bit value right |
| * @word: value to rotate |
| * @shift: bits to roll |
| */ |
| static inline __u8 ror8(__u8 word, unsigned int shift) |
| { |
| return (word >> (shift & 7)) | (word << ((-shift) & 7)); |
| } |
| |
| /** |
| * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit |
| * @value: value to sign extend |
| * @index: 0 based bit index (0<=index<32) to sign bit |
| * |
| * This is safe to use for 16- and 8-bit types as well. |
| */ |
| static inline __s32 sign_extend32(__u32 value, int index) |
| { |
| __u8 shift = 31 - index; |
| return (__s32)(value << shift) >> shift; |
| } |
| |
| /** |
| * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit |
| * @value: value to sign extend |
| * @index: 0 based bit index (0<=index<64) to sign bit |
| */ |
| static inline __s64 sign_extend64(__u64 value, int index) |
| { |
| __u8 shift = 63 - index; |
| return (__s64)(value << shift) >> shift; |
| } |
| |
| static inline unsigned fls_long(unsigned long l) |
| { |
| if (sizeof(l) == 4) |
| return fls(l); |
| return fls64(l); |
| } |
| |
| static inline int get_count_order(unsigned int count) |
| { |
| int order; |
| |
| order = fls(count) - 1; |
| if (count & (count - 1)) |
| order++; |
| return order; |
| } |
| |
| /** |
| * get_count_order_long - get order after rounding @l up to power of 2 |
| * @l: parameter |
| * |
| * it is same as get_count_order() but with long type parameter |
| */ |
| static inline int get_count_order_long(unsigned long l) |
| { |
| if (l == 0UL) |
| return -1; |
| else if (l & (l - 1UL)) |
| return (int)fls_long(l); |
| else |
| return (int)fls_long(l) - 1; |
| } |
| |
| /** |
| * __ffs64 - find first set bit in a 64 bit word |
| * @word: The 64 bit word |
| * |
| * On 64 bit arches this is a synomyn for __ffs |
| * The result is not defined if no bits are set, so check that @word |
| * is non-zero before calling this. |
| */ |
| static inline unsigned long __ffs64(u64 word) |
| { |
| #if BITS_PER_LONG == 32 |
| if (((u32)word) == 0UL) |
| return __ffs((u32)(word >> 32)) + 32; |
| #elif BITS_PER_LONG != 64 |
| #error BITS_PER_LONG not 32 or 64 |
| #endif |
| return __ffs((unsigned long)word); |
| } |
| |
| #ifdef __KERNEL__ |
| |
| #ifndef set_mask_bits |
| #define set_mask_bits(ptr, _mask, _bits) \ |
| ({ \ |
| const typeof(*ptr) mask = (_mask), bits = (_bits); \ |
| typeof(*ptr) old, new; \ |
| \ |
| do { \ |
| old = ACCESS_ONCE(*ptr); \ |
| new = (old & ~mask) | bits; \ |
| } while (cmpxchg(ptr, old, new) != old); \ |
| \ |
| new; \ |
| }) |
| #endif |
| |
| #ifndef bit_clear_unless |
| #define bit_clear_unless(ptr, _clear, _test) \ |
| ({ \ |
| const typeof(*ptr) clear = (_clear), test = (_test); \ |
| typeof(*ptr) old, new; \ |
| \ |
| do { \ |
| old = ACCESS_ONCE(*ptr); \ |
| new = old & ~clear; \ |
| } while (!(old & test) && \ |
| cmpxchg(ptr, old, new) != old); \ |
| \ |
| !(old & test); \ |
| }) |
| #endif |
| |
| #ifndef find_last_bit |
| /** |
| * find_last_bit - find the last set bit in a memory region |
| * @addr: The address to start the search at |
| * @size: The number of bits to search |
| * |
| * Returns the bit number of the last set bit, or size. |
| */ |
| extern unsigned long find_last_bit(const unsigned long *addr, |
| unsigned long size); |
| #endif |
| |
| #endif /* __KERNEL__ */ |
| #endif |