blob: aab3a4cff4e13da65a50ec50531ccfd8ed19444d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_SMPLOCK_H
2#define __LINUX_SMPLOCK_H
3
Al Virof0373602005-11-13 16:06:57 -08004#ifdef CONFIG_LOCK_KERNEL
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#define kernel_locked() (current->lock_depth >= 0)
8
9extern int __lockfunc __reacquire_kernel_lock(void);
10extern void __lockfunc __release_kernel_lock(void);
11
12/*
13 * Release/re-acquire global kernel lock for the scheduler
14 */
15#define release_kernel_lock(tsk) do { \
16 if (unlikely((tsk)->lock_depth >= 0)) \
17 __release_kernel_lock(); \
18} while (0)
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020static inline int reacquire_kernel_lock(struct task_struct *task)
21{
22 if (unlikely(task->lock_depth >= 0))
Ingo Molnar6478d882008-01-25 21:08:33 +010023 return __reacquire_kernel_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 return 0;
25}
26
27extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
28extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
29
30#else
31
32#define lock_kernel() do { } while(0)
33#define unlock_kernel() do { } while(0)
34#define release_kernel_lock(task) do { } while(0)
35#define reacquire_kernel_lock(task) 0
36#define kernel_locked() 1
37
38#endif /* CONFIG_LOCK_KERNEL */
39#endif /* __LINUX_SMPLOCK_H */