blob: 5c10b2e1fd0888826d52515da4b01ad966ed3615 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * lib/kernel_lock.c
3 *
4 * This is the traditional BKL - big kernel lock. Largely
Simon Arlott5895df92007-10-20 01:29:18 +02005 * relegated to obsolescence, but used by various less
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * important (or lazy) subsystems.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/kallsyms.h>
Matthew Wilcox6188e102008-04-18 22:21:05 -040010#include <linux/semaphore.h>
Frederic Weisbecker96a2c462009-08-01 01:34:24 +020011#define CREATE_TRACE_POINTS
12#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
Linus Torvalds8e3e0762008-05-10 20:58:02 -070015 * The 'big kernel lock'
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
Linus Torvalds8e3e0762008-05-10 20:58:02 -070017 * This spinlock is taken and released recursively by lock_kernel()
Andreas Mohrd6e05ed2006-06-26 18:35:02 +020018 * and unlock_kernel(). It is transparently dropped and reacquired
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 * over schedule(). It is used to protect legacy code that hasn't
20 * been migrated to a proper locking design yet.
21 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * Don't use in new code.
23 */
Linus Torvalds8e3e0762008-05-10 20:58:02 -070024static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/*
Linus Torvalds8e3e0762008-05-10 20:58:02 -070028 * Acquire/release the underlying lock from the scheduler.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 *
Linus Torvalds8e3e0762008-05-10 20:58:02 -070030 * This is called with preemption disabled, and should
31 * return an error value if it cannot get the lock and
32 * TIF_NEED_RESCHED gets set.
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 *
Linus Torvalds8e3e0762008-05-10 20:58:02 -070034 * If it successfully gets the lock, it should increment
35 * the preemption count like any spinlock does.
36 *
37 * (This works on UP too - _raw_spin_trylock will never
38 * return false in that case)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40int __lockfunc __reacquire_kernel_lock(void)
41{
Linus Torvalds8e3e0762008-05-10 20:58:02 -070042 while (!_raw_spin_trylock(&kernel_flag)) {
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +080043 if (need_resched())
Linus Torvalds8e3e0762008-05-10 20:58:02 -070044 return -EAGAIN;
45 cpu_relax();
46 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 return 0;
49}
50
51void __lockfunc __release_kernel_lock(void)
52{
Linus Torvalds8e3e0762008-05-10 20:58:02 -070053 _raw_spin_unlock(&kernel_flag);
54 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -070055}
56
57/*
Linus Torvalds8e3e0762008-05-10 20:58:02 -070058 * These are the BKL spinlocks - we try to be polite about preemption.
59 * If SMP is not on (ie UP preemption), this all goes away because the
60 * _raw_spin_trylock() will always succeed.
61 */
62#ifdef CONFIG_PREEMPT
63static inline void __lock_kernel(void)
64{
65 preempt_disable();
66 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
67 /*
68 * If preemption was disabled even before this
69 * was called, there's nothing we can be polite
70 * about - just spin.
71 */
72 if (preempt_count() > 1) {
73 _raw_spin_lock(&kernel_flag);
74 return;
75 }
76
77 /*
78 * Otherwise, let's wait for the kernel lock
79 * with preemption enabled..
80 */
81 do {
82 preempt_enable();
83 while (spin_is_locked(&kernel_flag))
84 cpu_relax();
85 preempt_disable();
86 } while (!_raw_spin_trylock(&kernel_flag));
87 }
88}
89
90#else
91
92/*
93 * Non-preemption case - just get the spinlock
94 */
95static inline void __lock_kernel(void)
96{
97 _raw_spin_lock(&kernel_flag);
98}
99#endif
100
101static inline void __unlock_kernel(void)
102{
103 /*
104 * the BKL is not covered by lockdep, so we open-code the
105 * unlocking sequence (and thus avoid the dep-chain ops):
106 */
107 _raw_spin_unlock(&kernel_flag);
108 preempt_enable();
109}
110
111/*
112 * Getting the big kernel lock.
113 *
114 * This cannot happen asynchronously, so we only need to
115 * worry about other CPU's.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 */
Frederic Weisbecker96a2c462009-08-01 01:34:24 +0200117void __lockfunc _lock_kernel(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700119 int depth = current->lock_depth+1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 if (likely(!depth))
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700121 __lock_kernel();
122 current->lock_depth = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123}
124
Frederic Weisbecker96a2c462009-08-01 01:34:24 +0200125void __lockfunc _unlock_kernel(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700127 BUG_ON(current->lock_depth < 0);
128 if (likely(--current->lock_depth < 0))
129 __unlock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130}
131
Frederic Weisbecker96a2c462009-08-01 01:34:24 +0200132EXPORT_SYMBOL(_lock_kernel);
133EXPORT_SYMBOL(_unlock_kernel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134