blob: 4ebfa5a164d7d7f2e6089bbe8ca299c432ca9208 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * lib/kernel_lock.c
3 *
4 * This is the traditional BKL - big kernel lock. Largely
Simon Arlott5895df92007-10-20 01:29:18 +02005 * relegated to obsolescence, but used by various less
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * important (or lazy) subsystems.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/module.h>
9#include <linux/kallsyms.h>
Matthew Wilcox6188e102008-04-18 22:21:05 -040010#include <linux/semaphore.h>
Frederic Weisbecker96a2c462009-08-01 01:34:24 +020011#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Frederic Weisbecker925936e2009-09-28 17:12:49 +020013#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016/*
Linus Torvalds8e3e0762008-05-10 20:58:02 -070017 * The 'big kernel lock'
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
Linus Torvalds8e3e0762008-05-10 20:58:02 -070019 * This spinlock is taken and released recursively by lock_kernel()
Andreas Mohrd6e05ed2006-06-26 18:35:02 +020020 * and unlock_kernel(). It is transparently dropped and reacquired
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * over schedule(). It is used to protect legacy code that hasn't
22 * been migrated to a proper locking design yet.
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * Don't use in new code.
25 */
Linus Torvalds8e3e0762008-05-10 20:58:02 -070026static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/*
Linus Torvalds8e3e0762008-05-10 20:58:02 -070030 * Acquire/release the underlying lock from the scheduler.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 *
Linus Torvalds8e3e0762008-05-10 20:58:02 -070032 * This is called with preemption disabled, and should
33 * return an error value if it cannot get the lock and
34 * TIF_NEED_RESCHED gets set.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 *
Linus Torvalds8e3e0762008-05-10 20:58:02 -070036 * If it successfully gets the lock, it should increment
37 * the preemption count like any spinlock does.
38 *
39 * (This works on UP too - _raw_spin_trylock will never
40 * return false in that case)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 */
42int __lockfunc __reacquire_kernel_lock(void)
43{
Linus Torvalds8e3e0762008-05-10 20:58:02 -070044 while (!_raw_spin_trylock(&kernel_flag)) {
Lai Jiangshan5ed0cec2009-03-06 19:40:20 +080045 if (need_resched())
Linus Torvalds8e3e0762008-05-10 20:58:02 -070046 return -EAGAIN;
47 cpu_relax();
48 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 return 0;
51}
52
53void __lockfunc __release_kernel_lock(void)
54{
Linus Torvalds8e3e0762008-05-10 20:58:02 -070055 _raw_spin_unlock(&kernel_flag);
56 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
59/*
Linus Torvalds8e3e0762008-05-10 20:58:02 -070060 * These are the BKL spinlocks - we try to be polite about preemption.
61 * If SMP is not on (ie UP preemption), this all goes away because the
62 * _raw_spin_trylock() will always succeed.
63 */
64#ifdef CONFIG_PREEMPT
65static inline void __lock_kernel(void)
66{
67 preempt_disable();
68 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
69 /*
70 * If preemption was disabled even before this
71 * was called, there's nothing we can be polite
72 * about - just spin.
73 */
74 if (preempt_count() > 1) {
75 _raw_spin_lock(&kernel_flag);
76 return;
77 }
78
79 /*
80 * Otherwise, let's wait for the kernel lock
81 * with preemption enabled..
82 */
83 do {
84 preempt_enable();
85 while (spin_is_locked(&kernel_flag))
86 cpu_relax();
87 preempt_disable();
88 } while (!_raw_spin_trylock(&kernel_flag));
89 }
90}
91
92#else
93
94/*
95 * Non-preemption case - just get the spinlock
96 */
97static inline void __lock_kernel(void)
98{
99 _raw_spin_lock(&kernel_flag);
100}
101#endif
102
103static inline void __unlock_kernel(void)
104{
105 /*
106 * the BKL is not covered by lockdep, so we open-code the
107 * unlocking sequence (and thus avoid the dep-chain ops):
108 */
109 _raw_spin_unlock(&kernel_flag);
110 preempt_enable();
111}
112
113/*
114 * Getting the big kernel lock.
115 *
116 * This cannot happen asynchronously, so we only need to
117 * worry about other CPU's.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 */
Frederic Weisbecker925936e2009-09-28 17:12:49 +0200119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120{
Frederic Weisbecker925936e2009-09-28 17:12:49 +0200121 int depth = current->lock_depth + 1;
122
123 trace_lock_kernel(func, file, line);
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 if (likely(!depth))
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700126 __lock_kernel();
127 current->lock_depth = depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Frederic Weisbecker925936e2009-09-28 17:12:49 +0200130void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700132 BUG_ON(current->lock_depth < 0);
133 if (likely(--current->lock_depth < 0))
134 __unlock_kernel();
Frederic Weisbecker925936e2009-09-28 17:12:49 +0200135
136 trace_unlock_kernel(func, file, line);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Frederic Weisbecker96a2c462009-08-01 01:34:24 +0200139EXPORT_SYMBOL(_lock_kernel);
140EXPORT_SYMBOL(_unlock_kernel);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141