blob: fa1ff3b165fe167739099c19625282eaa5ecd2e7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_SMPLOCK_H
2#define __LINUX_SMPLOCK_H
3
4#include <linux/config.h>
Al Virof0373602005-11-13 16:06:57 -08005#ifdef CONFIG_LOCK_KERNEL
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/sched.h>
7#include <linux/spinlock.h>
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#define kernel_locked() (current->lock_depth >= 0)
10
11extern int __lockfunc __reacquire_kernel_lock(void);
12extern void __lockfunc __release_kernel_lock(void);
13
14/*
15 * Release/re-acquire global kernel lock for the scheduler
16 */
17#define release_kernel_lock(tsk) do { \
18 if (unlikely((tsk)->lock_depth >= 0)) \
19 __release_kernel_lock(); \
20} while (0)
21
22/*
23 * Non-SMP kernels will never block on the kernel lock,
24 * so we are better off returning a constant zero from
25 * reacquire_kernel_lock() so that the compiler can see
26 * it at compile-time.
27 */
28#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
29# define return_value_on_smp return
30#else
31# define return_value_on_smp
32#endif
33
34static inline int reacquire_kernel_lock(struct task_struct *task)
35{
36 if (unlikely(task->lock_depth >= 0))
37 return_value_on_smp __reacquire_kernel_lock();
38 return 0;
39}
40
41extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
42extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
43
44#else
45
46#define lock_kernel() do { } while(0)
47#define unlock_kernel() do { } while(0)
48#define release_kernel_lock(task) do { } while(0)
49#define reacquire_kernel_lock(task) 0
50#define kernel_locked() 1
51
52#endif /* CONFIG_LOCK_KERNEL */
53#endif /* __LINUX_SMPLOCK_H */