blob: 58962c51dee108d3f9e5d12a23ce40e6e9387608 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_SMPLOCK_H
2#define __LINUX_SMPLOCK_H
3
Al Virof0373602005-11-13 16:06:57 -08004#ifdef CONFIG_LOCK_KERNEL
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#define kernel_locked() (current->lock_depth >= 0)
8
9extern int __lockfunc __reacquire_kernel_lock(void);
10extern void __lockfunc __release_kernel_lock(void);
11
12/*
13 * Release/re-acquire global kernel lock for the scheduler
14 */
15#define release_kernel_lock(tsk) do { \
16 if (unlikely((tsk)->lock_depth >= 0)) \
17 __release_kernel_lock(); \
18} while (0)
19
20/*
21 * Non-SMP kernels will never block on the kernel lock,
22 * so we are better off returning a constant zero from
23 * reacquire_kernel_lock() so that the compiler can see
24 * it at compile-time.
25 */
26#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
27# define return_value_on_smp return
28#else
29# define return_value_on_smp
30#endif
31
32static inline int reacquire_kernel_lock(struct task_struct *task)
33{
34 if (unlikely(task->lock_depth >= 0))
35 return_value_on_smp __reacquire_kernel_lock();
36 return 0;
37}
38
39extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
40extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
41
42#else
43
44#define lock_kernel() do { } while(0)
45#define unlock_kernel() do { } while(0)
46#define release_kernel_lock(task) do { } while(0)
47#define reacquire_kernel_lock(task) 0
48#define kernel_locked() 1
49
50#endif /* CONFIG_LOCK_KERNEL */
51#endif /* __LINUX_SMPLOCK_H */