blob: b4326bfa684f24c3ad147de3cb7d63a9f066da80 [file] [log] [blame]
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07001#ifndef __LINUX_BIT_SPINLOCK_H
2#define __LINUX_BIT_SPINLOCK_H
3
Nick Piggin626d6072011-01-07 17:50:04 +11004#include <linux/kernel.h>
5#include <linux/preempt.h>
6#include <asm/atomic.h>
7
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07008/*
9 * bit-based spin_lock()
10 *
11 * Don't use this unless you really need to: spin_lock() and spin_unlock()
12 * are significantly faster.
13 */
14static inline void bit_spin_lock(int bitnum, unsigned long *addr)
15{
16 /*
17 * Assuming the lock is uncontended, this never enters
18 * the body of the outer loop. If it is contended, then
19 * within the inner loop a non-atomic test is used to
20 * busywait with less bus contention for a good time to
21 * attempt to acquire the lock bit.
22 */
23 preempt_disable();
24#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
Nick Pigginb8dc93c2007-10-18 03:06:54 -070025 while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
Linus Torvalds3dd2ee42011-04-25 18:10:58 -070026 preempt_enable();
27 do {
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070028 cpu_relax();
Linus Torvalds3dd2ee42011-04-25 18:10:58 -070029 } while (test_bit(bitnum, addr));
30 preempt_disable();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070031 }
32#endif
33 __acquire(bitlock);
34}
35
36/*
37 * Return true if it was acquired
38 */
39static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
40{
41 preempt_disable();
42#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
Nick Pigginb8dc93c2007-10-18 03:06:54 -070043 if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070044 preempt_enable();
45 return 0;
46 }
47#endif
48 __acquire(bitlock);
49 return 1;
50}
51
52/*
53 * bit-based spin_unlock()
54 */
55static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
56{
Nick Pigginb8dc93c2007-10-18 03:06:54 -070057#ifdef CONFIG_DEBUG_SPINLOCK
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070058 BUG_ON(!test_bit(bitnum, addr));
Nick Pigginb8dc93c2007-10-18 03:06:54 -070059#endif
60#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
61 clear_bit_unlock(bitnum, addr);
62#endif
63 preempt_enable();
64 __release(bitlock);
65}
66
67/*
68 * bit-based spin_unlock()
69 * non-atomic version, which can be used eg. if the bit lock itself is
70 * protecting the rest of the flags in the word.
71 */
72static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
73{
74#ifdef CONFIG_DEBUG_SPINLOCK
75 BUG_ON(!test_bit(bitnum, addr));
76#endif
77#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
78 __clear_bit_unlock(bitnum, addr);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070079#endif
80 preempt_enable();
81 __release(bitlock);
82}
83
84/*
85 * Return true if the lock is held.
86 */
87static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
88{
89#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
90 return test_bit(bitnum, addr);
91#elif defined CONFIG_PREEMPT
92 return preempt_count();
93#else
94 return 1;
95#endif
96}
97
98#endif /* __LINUX_BIT_SPINLOCK_H */
99