blob: 36d0fb95ea89dc5113919f03cc0dde9896a6efeb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_BITOPS_H
2#define _ASM_IA64_BITOPS_H
3
4/*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
9 * scheduler patch
10 */
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/bitops.h>
15#include <asm/intrinsics.h>
16
17/**
18 * set_bit - Atomically set a bit in memory
19 * @nr: the bit to set
20 * @addr: the address to start counting from
21 *
22 * This function is atomic and may not be reordered. See __set_bit()
23 * if you do not require the atomic guarantees.
24 * Note that @nr may be almost arbitrarily large; this function is not
25 * restricted to acting on a single-word quantity.
26 *
27 * The address must be (at least) "long" aligned.
28 * Note that there are driver (e.g., eepro100) which use these operations to operate on
29 * hw-defined data-structures, so we can't easily change these operations to force a
30 * bigger alignment.
31 *
32 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
33 */
34static __inline__ void
35set_bit (int nr, volatile void *addr)
36{
37 __u32 bit, old, new;
38 volatile __u32 *m;
39 CMPXCHG_BUGCHECK_DECL
40
41 m = (volatile __u32 *) addr + (nr >> 5);
42 bit = 1 << (nr & 31);
43 do {
44 CMPXCHG_BUGCHECK(m);
45 old = *m;
46 new = old | bit;
47 } while (cmpxchg_acq(m, old, new) != old);
48}
49
50/**
51 * __set_bit - Set a bit in memory
52 * @nr: the bit to set
53 * @addr: the address to start counting from
54 *
55 * Unlike set_bit(), this function is non-atomic and may be reordered.
56 * If it's called on the same region of memory simultaneously, the effect
57 * may be that only one operation succeeds.
58 */
59static __inline__ void
60__set_bit (int nr, volatile void *addr)
61{
62 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
63}
64
65/*
66 * clear_bit() has "acquire" semantics.
67 */
68#define smp_mb__before_clear_bit() smp_mb()
69#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
70
71/**
72 * clear_bit - Clears a bit in memory
73 * @nr: Bit to clear
74 * @addr: Address to start counting from
75 *
76 * clear_bit() is atomic and may not be reordered. However, it does
77 * not contain a memory barrier, so if it is used for locking purposes,
78 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
79 * in order to ensure changes are visible on other processors.
80 */
81static __inline__ void
82clear_bit (int nr, volatile void *addr)
83{
84 __u32 mask, old, new;
85 volatile __u32 *m;
86 CMPXCHG_BUGCHECK_DECL
87
88 m = (volatile __u32 *) addr + (nr >> 5);
89 mask = ~(1 << (nr & 31));
90 do {
91 CMPXCHG_BUGCHECK(m);
92 old = *m;
93 new = old & mask;
94 } while (cmpxchg_acq(m, old, new) != old);
95}
96
97/**
98 * __clear_bit - Clears a bit in memory (non-atomic version)
99 */
100static __inline__ void
101__clear_bit (int nr, volatile void *addr)
102{
103 volatile __u32 *p = (__u32 *) addr + (nr >> 5);
104 __u32 m = 1 << (nr & 31);
105 *p &= ~m;
106}
107
108/**
109 * change_bit - Toggle a bit in memory
110 * @nr: Bit to clear
111 * @addr: Address to start counting from
112 *
113 * change_bit() is atomic and may not be reordered.
114 * Note that @nr may be almost arbitrarily large; this function is not
115 * restricted to acting on a single-word quantity.
116 */
117static __inline__ void
118change_bit (int nr, volatile void *addr)
119{
120 __u32 bit, old, new;
121 volatile __u32 *m;
122 CMPXCHG_BUGCHECK_DECL
123
124 m = (volatile __u32 *) addr + (nr >> 5);
125 bit = (1 << (nr & 31));
126 do {
127 CMPXCHG_BUGCHECK(m);
128 old = *m;
129 new = old ^ bit;
130 } while (cmpxchg_acq(m, old, new) != old);
131}
132
133/**
134 * __change_bit - Toggle a bit in memory
135 * @nr: the bit to set
136 * @addr: the address to start counting from
137 *
138 * Unlike change_bit(), this function is non-atomic and may be reordered.
139 * If it's called on the same region of memory simultaneously, the effect
140 * may be that only one operation succeeds.
141 */
142static __inline__ void
143__change_bit (int nr, volatile void *addr)
144{
145 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
146}
147
148/**
149 * test_and_set_bit - Set a bit and return its old value
150 * @nr: Bit to set
151 * @addr: Address to count from
152 *
153 * This operation is atomic and cannot be reordered.
154 * It also implies a memory barrier.
155 */
156static __inline__ int
157test_and_set_bit (int nr, volatile void *addr)
158{
159 __u32 bit, old, new;
160 volatile __u32 *m;
161 CMPXCHG_BUGCHECK_DECL
162
163 m = (volatile __u32 *) addr + (nr >> 5);
164 bit = 1 << (nr & 31);
165 do {
166 CMPXCHG_BUGCHECK(m);
167 old = *m;
168 new = old | bit;
169 } while (cmpxchg_acq(m, old, new) != old);
170 return (old & bit) != 0;
171}
172
173/**
174 * __test_and_set_bit - Set a bit and return its old value
175 * @nr: Bit to set
176 * @addr: Address to count from
177 *
178 * This operation is non-atomic and can be reordered.
179 * If two examples of this operation race, one can appear to succeed
180 * but actually fail. You must protect multiple accesses with a lock.
181 */
182static __inline__ int
183__test_and_set_bit (int nr, volatile void *addr)
184{
185 __u32 *p = (__u32 *) addr + (nr >> 5);
186 __u32 m = 1 << (nr & 31);
187 int oldbitset = (*p & m) != 0;
188
189 *p |= m;
190 return oldbitset;
191}
192
193/**
194 * test_and_clear_bit - Clear a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
197 *
198 * This operation is atomic and cannot be reordered.
199 * It also implies a memory barrier.
200 */
201static __inline__ int
202test_and_clear_bit (int nr, volatile void *addr)
203{
204 __u32 mask, old, new;
205 volatile __u32 *m;
206 CMPXCHG_BUGCHECK_DECL
207
208 m = (volatile __u32 *) addr + (nr >> 5);
209 mask = ~(1 << (nr & 31));
210 do {
211 CMPXCHG_BUGCHECK(m);
212 old = *m;
213 new = old & mask;
214 } while (cmpxchg_acq(m, old, new) != old);
215 return (old & ~mask) != 0;
216}
217
218/**
219 * __test_and_clear_bit - Clear a bit and return its old value
220 * @nr: Bit to set
221 * @addr: Address to count from
222 *
223 * This operation is non-atomic and can be reordered.
224 * If two examples of this operation race, one can appear to succeed
225 * but actually fail. You must protect multiple accesses with a lock.
226 */
227static __inline__ int
228__test_and_clear_bit(int nr, volatile void * addr)
229{
230 __u32 *p = (__u32 *) addr + (nr >> 5);
231 __u32 m = 1 << (nr & 31);
232 int oldbitset = *p & m;
233
234 *p &= ~m;
235 return oldbitset;
236}
237
238/**
239 * test_and_change_bit - Change a bit and return its old value
240 * @nr: Bit to set
241 * @addr: Address to count from
242 *
243 * This operation is atomic and cannot be reordered.
244 * It also implies a memory barrier.
245 */
246static __inline__ int
247test_and_change_bit (int nr, volatile void *addr)
248{
249 __u32 bit, old, new;
250 volatile __u32 *m;
251 CMPXCHG_BUGCHECK_DECL
252
253 m = (volatile __u32 *) addr + (nr >> 5);
254 bit = (1 << (nr & 31));
255 do {
256 CMPXCHG_BUGCHECK(m);
257 old = *m;
258 new = old ^ bit;
259 } while (cmpxchg_acq(m, old, new) != old);
260 return (old & bit) != 0;
261}
262
263/*
264 * WARNING: non atomic version.
265 */
266static __inline__ int
267__test_and_change_bit (int nr, void *addr)
268{
269 __u32 old, bit = (1 << (nr & 31));
270 __u32 *m = (__u32 *) addr + (nr >> 5);
271
272 old = *m;
273 *m = old ^ bit;
274 return (old & bit) != 0;
275}
276
277static __inline__ int
278test_bit (int nr, const volatile void *addr)
279{
280 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
281}
282
283/**
284 * ffz - find the first zero bit in a long word
285 * @x: The long word to find the bit in
286 *
287 * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
288 * no zero exists, so code should check against ~0UL first...
289 */
290static inline unsigned long
291ffz (unsigned long x)
292{
293 unsigned long result;
294
295 result = ia64_popcnt(x & (~x - 1));
296 return result;
297}
298
299/**
300 * __ffs - find first bit in word.
301 * @x: The word to search
302 *
303 * Undefined if no bit exists, so code should check against 0 first.
304 */
305static __inline__ unsigned long
306__ffs (unsigned long x)
307{
308 unsigned long result;
309
310 result = ia64_popcnt((x-1) & ~x);
311 return result;
312}
313
314#ifdef __KERNEL__
315
316/*
David Mosberger-Tang821376b2005-04-21 11:07:59 -0700317 * Return bit number of last (most-significant) bit set. Undefined
318 * for x==0. Bits are numbered from 0..63 (e.g., ia64_fls(9) == 3).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 */
320static inline unsigned long
321ia64_fls (unsigned long x)
322{
323 long double d = x;
324 long exp;
325
326 exp = ia64_getf_exp(d);
327 return exp - 0xffff;
328}
329
David Mosberger-Tang821376b2005-04-21 11:07:59 -0700330/*
331 * Find the last (most significant) bit set. Returns 0 for x==0 and
332 * bits are numbered from 1..32 (e.g., fls(9) == 4).
333 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334static inline int
David Mosberger-Tang821376b2005-04-21 11:07:59 -0700335fls (int t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
David Mosberger-Tang821376b2005-04-21 11:07:59 -0700337 unsigned long x = t & 0xffffffffu;
338
339 if (!x)
340 return 0;
341 x |= x >> 1;
342 x |= x >> 2;
343 x |= x >> 4;
344 x |= x >> 8;
345 x |= x >> 16;
346 return ia64_popcnt(x);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
Stephen Hemminger3821af22005-12-21 19:30:53 -0800348#define fls64(x) generic_fls64(x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350/*
351 * ffs: find first bit set. This is defined the same way as the libc and compiler builtin
352 * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
353 * "int" values only and the result value is the bit number + 1. ffs(0) is defined to
354 * return zero.
355 */
356#define ffs(x) __builtin_ffs(x)
357
358/*
359 * hweightN: returns the hamming weight (i.e. the number
360 * of bits set) of a N-bit word
361 */
362static __inline__ unsigned long
363hweight64 (unsigned long x)
364{
365 unsigned long result;
366 result = ia64_popcnt(x);
367 return result;
368}
369
370#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
371#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
372#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
373
374#endif /* __KERNEL__ */
375
376extern int __find_next_zero_bit (const void *addr, unsigned long size,
377 unsigned long offset);
378extern int __find_next_bit(const void *addr, unsigned long size,
379 unsigned long offset);
380
381#define find_next_zero_bit(addr, size, offset) \
382 __find_next_zero_bit((addr), (size), (offset))
383#define find_next_bit(addr, size, offset) \
384 __find_next_bit((addr), (size), (offset))
385
386/*
387 * The optimizer actually does good code for this case..
388 */
389#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
390
391#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
392
393#ifdef __KERNEL__
394
395#define __clear_bit(nr, addr) clear_bit(nr, addr)
396
397#define ext2_set_bit test_and_set_bit
398#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
399#define ext2_clear_bit test_and_clear_bit
400#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
401#define ext2_test_bit test_bit
402#define ext2_find_first_zero_bit find_first_zero_bit
403#define ext2_find_next_zero_bit find_next_zero_bit
404
405/* Bitmap functions for the minix filesystem. */
406#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
407#define minix_set_bit(nr,addr) set_bit(nr,addr)
408#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
409#define minix_test_bit(nr,addr) test_bit(nr,addr)
410#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
411
412static inline int
413sched_find_first_bit (unsigned long *b)
414{
415 if (unlikely(b[0]))
416 return __ffs(b[0]);
417 if (unlikely(b[1]))
418 return 64 + __ffs(b[1]);
419 return __ffs(b[2]) + 128;
420}
421
422#endif /* __KERNEL__ */
423
424#endif /* _ASM_IA64_BITOPS_H */