blob: abc9ca7784568ba5737add4fce84577348428eac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_BITOPS_H
2#define _LINUX_BITOPS_H
3#include <asm/types.h>
4
Jiri Slabyd05be132007-10-18 23:40:31 -07005#ifdef __KERNEL__
Jiri Slaby93043ec2007-10-18 23:40:35 -07006#define BIT(nr) (1UL << (nr))
Srinivas Pandruvadabfd1ff62013-10-11 16:54:59 -07007#define BIT_ULL(nr) (1ULL << (nr))
Jiri Slabyd05be132007-10-18 23:40:31 -07008#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
9#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
Srinivas Pandruvadabfd1ff62013-10-11 16:54:59 -070010#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
11#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
Jiri Slabyd05be132007-10-18 23:40:31 -070012#define BITS_PER_BYTE 8
Eric Dumazetede9c692008-04-29 00:58:35 -070013#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
Jiri Slabyd05be132007-10-18 23:40:31 -070014#endif
15
Chen, Gong10ef6b02013-10-18 14:29:07 -070016/*
17 * Create a contiguous bitmask starting at bit position @l and ending at
18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20 */
21#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
22#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
23
Borislav Petkov4677d4a2010-05-03 14:57:11 +020024extern unsigned int __sw_hweight8(unsigned int w);
25extern unsigned int __sw_hweight16(unsigned int w);
26extern unsigned int __sw_hweight32(unsigned int w);
27extern unsigned long __sw_hweight64(__u64 w);
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * Include this here because some architectures need generic_ffs/fls in
31 * scope
32 */
33#include <asm/bitops.h>
34
Akinobu Mita984b3f52010-03-05 13:41:37 -080035#define for_each_set_bit(bit, addr, size) \
Robert Richter1e2ad282011-11-18 12:35:21 +010036 for ((bit) = find_first_bit((addr), (size)); \
37 (bit) < (size); \
38 (bit) = find_next_bit((addr), (size), (bit) + 1))
39
40/* same as for_each_set_bit() but use bit as value to start with */
Akinobu Mita307b1cd2012-03-23 15:02:03 -070041#define for_each_set_bit_from(bit, addr, size) \
Robert Richter1e2ad282011-11-18 12:35:21 +010042 for ((bit) = find_next_bit((addr), (size), (bit)); \
43 (bit) < (size); \
Shannon Nelson3e037452007-10-16 01:27:40 -070044 (bit) = find_next_bit((addr), (size), (bit) + 1))
45
Akinobu Mita03f4a822012-03-23 15:02:04 -070046#define for_each_clear_bit(bit, addr, size) \
47 for ((bit) = find_first_zero_bit((addr), (size)); \
48 (bit) < (size); \
49 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
50
51/* same as for_each_clear_bit() but use bit as value to start with */
52#define for_each_clear_bit_from(bit, addr, size) \
53 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
54 (bit) < (size); \
55 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static __inline__ int get_bitmask_order(unsigned int count)
58{
59 int order;
Peter Zijlstra9f416992010-01-22 15:59:29 +010060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 order = fls(count);
62 return order; /* We could be slightly more clever with -1 here... */
63}
64
Siddha, Suresh B94605ef2005-11-05 17:25:54 +010065static __inline__ int get_count_order(unsigned int count)
66{
67 int order;
Peter Zijlstra9f416992010-01-22 15:59:29 +010068
Siddha, Suresh B94605ef2005-11-05 17:25:54 +010069 order = fls(count) - 1;
70 if (count & (count - 1))
71 order++;
72 return order;
73}
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075static inline unsigned long hweight_long(unsigned long w)
76{
Akinobu Mitae9bebd62006-03-26 01:39:55 -080077 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
Robert P. J. Day45f8bde2007-01-26 00:57:09 -080080/**
Alexey Dobriyanf2ea0f52012-01-14 21:44:49 +030081 * rol64 - rotate a 64-bit value left
82 * @word: value to rotate
83 * @shift: bits to roll
84 */
85static inline __u64 rol64(__u64 word, unsigned int shift)
86{
87 return (word << shift) | (word >> (64 - shift));
88}
89
90/**
91 * ror64 - rotate a 64-bit value right
92 * @word: value to rotate
93 * @shift: bits to roll
94 */
95static inline __u64 ror64(__u64 word, unsigned int shift)
96{
97 return (word >> shift) | (word << (64 - shift));
98}
99
100/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * rol32 - rotate a 32-bit value left
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 * @word: value to rotate
103 * @shift: bits to roll
104 */
105static inline __u32 rol32(__u32 word, unsigned int shift)
106{
107 return (word << shift) | (word >> (32 - shift));
108}
109
Robert P. J. Day45f8bde2007-01-26 00:57:09 -0800110/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 * ror32 - rotate a 32-bit value right
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * @word: value to rotate
113 * @shift: bits to roll
114 */
115static inline __u32 ror32(__u32 word, unsigned int shift)
116{
117 return (word >> shift) | (word << (32 - shift));
118}
119
Harvey Harrison3afe3922008-03-28 14:16:01 -0700120/**
121 * rol16 - rotate a 16-bit value left
122 * @word: value to rotate
123 * @shift: bits to roll
124 */
125static inline __u16 rol16(__u16 word, unsigned int shift)
126{
127 return (word << shift) | (word >> (16 - shift));
128}
129
130/**
131 * ror16 - rotate a 16-bit value right
132 * @word: value to rotate
133 * @shift: bits to roll
134 */
135static inline __u16 ror16(__u16 word, unsigned int shift)
136{
137 return (word >> shift) | (word << (16 - shift));
138}
139
140/**
141 * rol8 - rotate an 8-bit value left
142 * @word: value to rotate
143 * @shift: bits to roll
144 */
145static inline __u8 rol8(__u8 word, unsigned int shift)
146{
147 return (word << shift) | (word >> (8 - shift));
148}
149
150/**
151 * ror8 - rotate an 8-bit value right
152 * @word: value to rotate
153 * @shift: bits to roll
154 */
155static inline __u8 ror8(__u8 word, unsigned int shift)
156{
157 return (word >> shift) | (word << (8 - shift));
158}
159
Andreas Herrmann7919a572010-08-30 19:04:01 +0000160/**
161 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
162 * @value: value to sign extend
163 * @index: 0 based bit index (0<=index<32) to sign bit
164 */
165static inline __s32 sign_extend32(__u32 value, int index)
166{
167 __u8 shift = 31 - index;
168 return (__s32)(value << shift) >> shift;
169}
170
Andrew Morton962749a2006-03-25 03:08:01 -0800171static inline unsigned fls_long(unsigned long l)
172{
173 if (sizeof(l) == 4)
174 return fls(l);
175 return fls64(l);
176}
177
Steven Whitehouse952043a2009-04-23 08:48:15 +0100178/**
179 * __ffs64 - find first set bit in a 64 bit word
180 * @word: The 64 bit word
181 *
182 * On 64 bit arches this is a synomyn for __ffs
183 * The result is not defined if no bits are set, so check that @word
184 * is non-zero before calling this.
185 */
186static inline unsigned long __ffs64(u64 word)
187{
188#if BITS_PER_LONG == 32
189 if (((u32)word) == 0UL)
190 return __ffs((u32)(word >> 32)) + 32;
191#elif BITS_PER_LONG != 64
192#error BITS_PER_LONG not 32 or 64
193#endif
194 return __ffs((unsigned long)word);
195}
196
Alexander van Heukelum64970b62008-03-11 16:17:19 +0100197#ifdef __KERNEL__
Alexander van Heukelum77b9bd92008-04-01 11:46:19 +0200198
Akinobu Mita19de85e2011-05-26 16:26:09 -0700199#ifndef find_last_bit
Rusty Russellab53d472009-01-01 10:12:19 +1030200/**
201 * find_last_bit - find the last set bit in a memory region
202 * @addr: The address to start the search at
203 * @size: The maximum size to search
204 *
205 * Returns the bit number of the first set bit, or size.
206 */
207extern unsigned long find_last_bit(const unsigned long *addr,
208 unsigned long size);
Akinobu Mita19de85e2011-05-26 16:26:09 -0700209#endif
Rusty Russellab53d472009-01-01 10:12:19 +1030210
Alexander van Heukelum64970b62008-03-11 16:17:19 +0100211#endif /* __KERNEL__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#endif