blob: cd3e82530b03b7099d9ee6f777623a0140a43a0c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * lib/kernel_lock.c
3 *
4 * This is the traditional BKL - big kernel lock. Largely
Simon Arlott5895df92007-10-20 01:29:18 +02005 * relegated to obsolescence, but used by various less
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * important (or lazy) subsystems.
7 */
8#include <linux/smp_lock.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h>
Matthew Wilcox6188e102008-04-18 22:21:05 -040011#include <linux/semaphore.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Linus Torvalds1da177e2005-04-16 15:20:36 -070013/*
14 * The 'big kernel semaphore'
15 *
16 * This mutex is taken and released recursively by lock_kernel()
Andreas Mohrd6e05ed2006-06-26 18:35:02 +020017 * and unlock_kernel(). It is transparently dropped and reacquired
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet.
20 *
21 * Note: code locked by this semaphore will only be serialized against
22 * other code using the same locking facility. The code guarantees that
23 * the task remains on the same CPU.
24 *
25 * Don't use in new code.
26 */
27static DECLARE_MUTEX(kernel_sem);
28
29/*
30 * Re-acquire the kernel semaphore.
31 *
32 * This function is called with preemption off.
33 *
34 * We are executing in schedule() so the code must be extremely careful
35 * about recursion, both due to the down() and due to the enabling of
36 * preemption. schedule() will re-check the preemption flag after
37 * reacquiring the semaphore.
38 */
39int __lockfunc __reacquire_kernel_lock(void)
40{
41 struct task_struct *task = current;
42 int saved_lock_depth = task->lock_depth;
43
44 BUG_ON(saved_lock_depth < 0);
45
46 task->lock_depth = -1;
47 preempt_enable_no_resched();
48
49 down(&kernel_sem);
50
51 preempt_disable();
52 task->lock_depth = saved_lock_depth;
53
54 return 0;
55}
56
57void __lockfunc __release_kernel_lock(void)
58{
59 up(&kernel_sem);
60}
61
62/*
63 * Getting the big kernel semaphore.
64 */
65void __lockfunc lock_kernel(void)
66{
67 struct task_struct *task = current;
68 int depth = task->lock_depth + 1;
69
70 if (likely(!depth))
71 /*
72 * No recursion worries - we set up lock_depth _after_
73 */
74 down(&kernel_sem);
75
76 task->lock_depth = depth;
77}
78
79void __lockfunc unlock_kernel(void)
80{
81 struct task_struct *task = current;
82
83 BUG_ON(task->lock_depth < 0);
84
85 if (likely(--task->lock_depth < 0))
86 up(&kernel_sem);
87}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089EXPORT_SYMBOL(lock_kernel);
90EXPORT_SYMBOL(unlock_kernel);
91