blob: ab7635a4acd98e7bf5ba911f865eabf6417b00c8 [file] [log] [blame]
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +01001#ifndef _ASM_X86_BITOPS_H
2#define _ASM_X86_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8#ifndef _LINUX_BITOPS_H
9#error only <linux/bitops.h> can be included directly
10#endif
11
12#include <linux/compiler.h>
13#include <asm/alternative.h>
14
15/*
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
19 *
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
21 */
22
23#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
24/* Technically wrong, but this avoids compilation errors on some gcc
25 versions. */
Linus Torvalds1a750e02008-06-18 21:03:26 -070026#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010027#else
Linus Torvalds1a750e02008-06-18 21:03:26 -070028#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010029#endif
30
Linus Torvalds1a750e02008-06-18 21:03:26 -070031#define ADDR BITOP_ADDR(addr)
32
33/*
34 * We do the locked ops that don't return the old value as
35 * a mask operation on a byte.
36 */
37#define IS_IMMEDIATE(nr) \
38 (__builtin_constant_p(nr))
39#define CONST_MASK_ADDR BITOP_ADDR(addr + (nr>>3))
40#define CONST_MASK (1 << (nr & 7))
41
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010042/**
43 * set_bit - Atomically set a bit in memory
44 * @nr: the bit to set
45 * @addr: the address to start counting from
46 *
47 * This function is atomic and may not be reordered. See __set_bit()
48 * if you do not require the atomic guarantees.
49 *
50 * Note: there are no guarantees that this function will not be reordered
51 * on non x86 architectures, so if you are writing portable code,
52 * make sure not to rely on its reordering guarantees.
53 *
54 * Note that @nr may be almost arbitrarily large; this function is not
55 * restricted to acting on a single-word quantity.
56 */
Linus Torvalds1a750e02008-06-18 21:03:26 -070057static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010058{
Linus Torvalds1a750e02008-06-18 21:03:26 -070059 if (IS_IMMEDIATE(nr))
60 asm volatile(LOCK_PREFIX "orb %1,%0" : CONST_MASK_ADDR : "i" (CONST_MASK) : "memory");
61 else
62 asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010063}
64
Linus Torvalds1a750e02008-06-18 21:03:26 -070065
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010066/**
67 * __set_bit - Set a bit in memory
68 * @nr: the bit to set
69 * @addr: the address to start counting from
70 *
71 * Unlike set_bit(), this function is non-atomic and may be reordered.
72 * If it's called on the same region of memory simultaneously, the effect
73 * may be that only one operation succeeds.
74 */
Andrew Morton5136dea2008-05-14 16:10:41 -070075static inline void __set_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010076{
Joe Perchesf19dcf42008-03-23 01:03:07 -070077 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010078}
79
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010080/**
81 * clear_bit - Clears a bit in memory
82 * @nr: Bit to clear
83 * @addr: Address to start counting from
84 *
85 * clear_bit() is atomic and may not be reordered. However, it does
86 * not contain a memory barrier, so if it is used for locking purposes,
87 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
88 * in order to ensure changes are visible on other processors.
89 */
Andrew Morton5136dea2008-05-14 16:10:41 -070090static inline void clear_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010091{
Linus Torvalds1a750e02008-06-18 21:03:26 -070092 if (IS_IMMEDIATE(nr))
93 asm volatile(LOCK_PREFIX "andb %1,%0" : CONST_MASK_ADDR : "i" (~CONST_MASK));
94 else
95 asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +010096}
97
98/*
99 * clear_bit_unlock - Clears a bit in memory
100 * @nr: Bit to clear
101 * @addr: Address to start counting from
102 *
103 * clear_bit() is atomic and implies release semantics before the memory
104 * operation. It can be used for an unlock.
105 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700106static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100107{
108 barrier();
109 clear_bit(nr, addr);
110}
111
Andrew Morton5136dea2008-05-14 16:10:41 -0700112static inline void __clear_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100113{
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200114 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100115}
116
117/*
118 * __clear_bit_unlock - Clears a bit in memory
119 * @nr: Bit to clear
120 * @addr: Address to start counting from
121 *
122 * __clear_bit() is non-atomic and implies release semantics before the memory
123 * operation. It can be used for an unlock if no other CPUs can concurrently
124 * modify other bits in the word.
125 *
126 * No memory barrier is required here, because x86 cannot reorder stores past
127 * older loads. Same principle as spin_unlock.
128 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700129static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100130{
131 barrier();
132 __clear_bit(nr, addr);
133}
134
135#define smp_mb__before_clear_bit() barrier()
136#define smp_mb__after_clear_bit() barrier()
137
138/**
139 * __change_bit - Toggle a bit in memory
140 * @nr: the bit to change
141 * @addr: the address to start counting from
142 *
143 * Unlike change_bit(), this function is non-atomic and may be reordered.
144 * If it's called on the same region of memory simultaneously, the effect
145 * may be that only one operation succeeds.
146 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700147static inline void __change_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100148{
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200149 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100150}
151
152/**
153 * change_bit - Toggle a bit in memory
154 * @nr: Bit to change
155 * @addr: Address to start counting from
156 *
157 * change_bit() is atomic and may not be reordered.
158 * Note that @nr may be almost arbitrarily large; this function is not
159 * restricted to acting on a single-word quantity.
160 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700161static inline void change_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100162{
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200163 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100164}
165
166/**
167 * test_and_set_bit - Set a bit and return its old value
168 * @nr: Bit to set
169 * @addr: Address to count from
170 *
171 * This operation is atomic and cannot be reordered.
172 * It also implies a memory barrier.
173 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700174static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100175{
176 int oldbit;
177
178 asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
Joe Perches286275c2008-03-23 01:01:45 -0700179 "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100180
181 return oldbit;
182}
183
184/**
185 * test_and_set_bit_lock - Set a bit and return its old value for lock
186 * @nr: Bit to set
187 * @addr: Address to count from
188 *
189 * This is the same as test_and_set_bit on x86.
190 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700191static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100192{
193 return test_and_set_bit(nr, addr);
194}
195
196/**
197 * __test_and_set_bit - Set a bit and return its old value
198 * @nr: Bit to set
199 * @addr: Address to count from
200 *
201 * This operation is non-atomic and can be reordered.
202 * If two examples of this operation race, one can appear to succeed
203 * but actually fail. You must protect multiple accesses with a lock.
204 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700205static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100206{
207 int oldbit;
208
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200209 asm("bts %2,%1\n\t"
210 "sbb %0,%0"
211 : "=r" (oldbit), ADDR
212 : "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100213 return oldbit;
214}
215
216/**
217 * test_and_clear_bit - Clear a bit and return its old value
218 * @nr: Bit to clear
219 * @addr: Address to count from
220 *
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
223 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700224static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100225{
226 int oldbit;
227
228 asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
229 "sbb %0,%0"
Joe Perches286275c2008-03-23 01:01:45 -0700230 : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100231
232 return oldbit;
233}
234
235/**
236 * __test_and_clear_bit - Clear a bit and return its old value
237 * @nr: Bit to clear
238 * @addr: Address to count from
239 *
240 * This operation is non-atomic and can be reordered.
241 * If two examples of this operation race, one can appear to succeed
242 * but actually fail. You must protect multiple accesses with a lock.
243 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700244static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100245{
246 int oldbit;
247
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200248 asm volatile("btr %2,%1\n\t"
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100249 "sbb %0,%0"
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200250 : "=r" (oldbit), ADDR
251 : "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100252 return oldbit;
253}
254
255/* WARNING: non atomic and it can be reordered! */
Andrew Morton5136dea2008-05-14 16:10:41 -0700256static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100257{
258 int oldbit;
259
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200260 asm volatile("btc %2,%1\n\t"
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100261 "sbb %0,%0"
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200262 : "=r" (oldbit), ADDR
263 : "Ir" (nr) : "memory");
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100264
265 return oldbit;
266}
267
268/**
269 * test_and_change_bit - Change a bit and return its old value
270 * @nr: Bit to change
271 * @addr: Address to count from
272 *
273 * This operation is atomic and cannot be reordered.
274 * It also implies a memory barrier.
275 */
Andrew Morton5136dea2008-05-14 16:10:41 -0700276static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100277{
278 int oldbit;
279
280 asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
281 "sbb %0,%0"
Joe Perches286275c2008-03-23 01:01:45 -0700282 : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100283
284 return oldbit;
285}
286
Andrew Morton5136dea2008-05-14 16:10:41 -0700287static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100288{
Glauber de Oliveira Costa26996dd2008-01-30 13:31:31 +0100289 return ((1UL << (nr % BITS_PER_LONG)) &
290 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100291}
292
Andrew Morton5136dea2008-05-14 16:10:41 -0700293static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100294{
295 int oldbit;
296
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200297 asm volatile("bt %2,%1\n\t"
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100298 "sbb %0,%0"
299 : "=r" (oldbit)
Simon Holm Thøgerseneb2b4e62008-05-05 15:45:28 +0200300 : "m" (*(unsigned long *)addr), "Ir" (nr));
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100301
302 return oldbit;
303}
304
305#if 0 /* Fool kernel-doc since it doesn't do macros yet */
306/**
307 * test_bit - Determine whether a bit is set
308 * @nr: bit number to test
309 * @addr: Address to start counting from
310 */
311static int test_bit(int nr, const volatile unsigned long *addr);
312#endif
313
Joe Perchesf19dcf42008-03-23 01:03:07 -0700314#define test_bit(nr, addr) \
315 (__builtin_constant_p((nr)) \
316 ? constant_test_bit((nr), (addr)) \
317 : variable_test_bit((nr), (addr)))
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100318
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100319/**
320 * __ffs - find first set bit in word
321 * @word: The word to search
322 *
323 * Undefined if no bit exists, so code should check against 0 first.
324 */
325static inline unsigned long __ffs(unsigned long word)
326{
Joe Perchesf19dcf42008-03-23 01:03:07 -0700327 asm("bsf %1,%0"
328 : "=r" (word)
329 : "rm" (word));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100330 return word;
331}
332
333/**
334 * ffz - find first zero bit in word
335 * @word: The word to search
336 *
337 * Undefined if no zero exists, so code should check against ~0UL first.
338 */
339static inline unsigned long ffz(unsigned long word)
340{
Joe Perchesf19dcf42008-03-23 01:03:07 -0700341 asm("bsf %1,%0"
342 : "=r" (word)
343 : "r" (~word));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100344 return word;
345}
346
347/*
348 * __fls: find last set bit in word
349 * @word: The word to search
350 *
351 * Undefined if no zero exists, so code should check against ~0UL first.
352 */
353static inline unsigned long __fls(unsigned long word)
354{
Joe Perchesf19dcf42008-03-23 01:03:07 -0700355 asm("bsr %1,%0"
356 : "=r" (word)
357 : "rm" (word));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100358 return word;
359}
360
361#ifdef __KERNEL__
362/**
363 * ffs - find first set bit in word
364 * @x: the word to search
365 *
366 * This is defined the same way as the libc and compiler builtin ffs
367 * routines, therefore differs in spirit from the other bitops.
368 *
369 * ffs(value) returns 0 if value is 0 or the position of the first
370 * set bit if value is nonzero. The first (least significant) bit
371 * is at position 1.
372 */
373static inline int ffs(int x)
374{
375 int r;
376#ifdef CONFIG_X86_CMOV
Joe Perchesf19dcf42008-03-23 01:03:07 -0700377 asm("bsfl %1,%0\n\t"
378 "cmovzl %2,%0"
379 : "=r" (r) : "rm" (x), "r" (-1));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100380#else
Joe Perchesf19dcf42008-03-23 01:03:07 -0700381 asm("bsfl %1,%0\n\t"
382 "jnz 1f\n\t"
383 "movl $-1,%0\n"
384 "1:" : "=r" (r) : "rm" (x));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100385#endif
386 return r + 1;
387}
388
389/**
390 * fls - find last set bit in word
391 * @x: the word to search
392 *
393 * This is defined in a similar way as the libc and compiler builtin
394 * ffs, but returns the position of the most significant set bit.
395 *
396 * fls(value) returns 0 if value is 0 or the position of the last
397 * set bit if value is nonzero. The last (most significant) bit is
398 * at position 32.
399 */
400static inline int fls(int x)
401{
402 int r;
403#ifdef CONFIG_X86_CMOV
Joe Perchesf19dcf42008-03-23 01:03:07 -0700404 asm("bsrl %1,%0\n\t"
405 "cmovzl %2,%0"
406 : "=&r" (r) : "rm" (x), "rm" (-1));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100407#else
Joe Perchesf19dcf42008-03-23 01:03:07 -0700408 asm("bsrl %1,%0\n\t"
409 "jnz 1f\n\t"
410 "movl $-1,%0\n"
411 "1:" : "=r" (r) : "rm" (x));
Alexander van Heukelum12d9c842008-03-15 13:04:42 +0100412#endif
413 return r + 1;
414}
415#endif /* __KERNEL__ */
416
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100417#undef ADDR
418
Alexander van Heukelumd66462f2008-04-04 20:49:30 +0200419static inline void set_bit_string(unsigned long *bitmap,
420 unsigned long i, int len)
421{
422 unsigned long end = i + len;
423 while (i < end) {
424 __set_bit(i, bitmap);
425 i++;
426 }
427}
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100428
Alexander van Heukelumd66462f2008-04-04 20:49:30 +0200429#ifdef __KERNEL__
430
431#include <asm-generic/bitops/sched.h>
432
433#define ARCH_HAS_FAST_MULTIPLIER 1
434
435#include <asm-generic/bitops/hweight.h>
436
437#endif /* __KERNEL__ */
438
439#include <asm-generic/bitops/fls64.h>
440
441#ifdef __KERNEL__
442
443#include <asm-generic/bitops/ext2-non-atomic.h>
444
445#define ext2_set_bit_atomic(lock, nr, addr) \
446 test_and_set_bit((nr), (unsigned long *)(addr))
447#define ext2_clear_bit_atomic(lock, nr, addr) \
448 test_and_clear_bit((nr), (unsigned long *)(addr))
449
450#include <asm-generic/bitops/minix.h>
451
452#endif /* __KERNEL__ */
Jeremy Fitzhardinge1c54d772008-01-30 13:30:55 +0100453#endif /* _ASM_X86_BITOPS_H */