blob: 2343d87152996b3988a8406aabfc3625fb5723ed [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
Peter Zijlstraa7878702013-08-14 14:55:40 +020018#include <asm/preempt.h>
Peter Zijlstraf27dde82013-08-14 14:55:31 +020019
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020020#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020021extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#else
Peter Zijlstrabdb43802013-09-10 12:15:23 +020025#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#endif
29
Peter Zijlstrabdb43802013-09-10 12:15:23 +020030#define __preempt_count_inc() __preempt_count_add(1)
31#define __preempt_count_dec() __preempt_count_sub(1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Peter Zijlstrabdb43802013-09-10 12:15:23 +020033#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020035
36#ifdef CONFIG_PREEMPT_COUNT
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#define preempt_disable() \
39do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020040 preempt_count_inc(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 barrier(); \
42} while (0)
43
Thomas Gleixnerba74c142011-03-21 13:32:17 +010044#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -070045do { \
46 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020047 preempt_count_dec(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070048} while (0)
49
Peter Zijlstrabdb43802013-09-10 12:15:23 +020050#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
Thomas Gleixnerba74c142011-03-21 13:32:17 +010051
Peter Zijlstrabdb43802013-09-10 12:15:23 +020052#ifdef CONFIG_PREEMPT
53asmlinkage void preempt_schedule(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define preempt_enable() \
55do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020056 barrier(); \
57 if (unlikely(preempt_count_dec_and_test())) \
58 preempt_schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070059} while (0)
60
Peter Zijlstrabdb43802013-09-10 12:15:23 +020061#define preempt_check_resched() \
62do { \
63 if (should_resched()) \
64 preempt_schedule(); \
65} while (0)
66
67#else
68#define preempt_enable() preempt_enable_no_resched()
69#define preempt_check_resched() do { } while (0)
70#endif
Steven Rostedt50282522008-05-12 21:20:41 +020071
72#define preempt_disable_notrace() \
73do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020074 __preempt_count_inc(); \
Steven Rostedt50282522008-05-12 21:20:41 +020075 barrier(); \
76} while (0)
77
78#define preempt_enable_no_resched_notrace() \
79do { \
80 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020081 __preempt_count_dec(); \
Steven Rostedt50282522008-05-12 21:20:41 +020082} while (0)
83
Peter Zijlstrabdb43802013-09-10 12:15:23 +020084#ifdef CONFIG_PREEMPT
85
86#ifdef CONFIG_CONTEXT_TRACKING
87asmlinkage void preempt_schedule_context(void);
88#else
89#define preempt_schedule_context() preempt_schedule()
90#endif
91
Steven Rostedt50282522008-05-12 21:20:41 +020092#define preempt_enable_notrace() \
93do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020094 barrier(); \
95 if (unlikely(__preempt_count_dec_and_test())) \
96 preempt_schedule_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +020097} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020098#else
99#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
100#endif
Steven Rostedt50282522008-05-12 21:20:41 +0200101
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200102#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Linus Torvalds386afc92013-04-09 10:48:33 -0700104/*
105 * Even if we don't have any preemption, we need preempt disable/enable
106 * to be barriers, so that we don't have things like get_user/put_user
107 * that can cause faults and scheduling migrate into our preempt-protected
108 * region.
109 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200110#define preempt_disable() barrier()
Linus Torvalds386afc92013-04-09 10:48:33 -0700111#define sched_preempt_enable_no_resched() barrier()
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200112#define preempt_enable_no_resched() barrier()
113#define preempt_enable() barrier()
114#define preempt_check_resched() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Linus Torvalds386afc92013-04-09 10:48:33 -0700116#define preempt_disable_notrace() barrier()
117#define preempt_enable_no_resched_notrace() barrier()
118#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200119
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200120#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Avi Kivitye107be32007-07-26 13:40:43 +0200122#ifdef CONFIG_PREEMPT_NOTIFIERS
123
124struct preempt_notifier;
125
126/**
127 * preempt_ops - notifiers called when a task is preempted and rescheduled
128 * @sched_in: we're about to be rescheduled:
129 * notifier: struct preempt_notifier for the task being scheduled
130 * cpu: cpu we're scheduled on
131 * @sched_out: we've just been preempted
132 * notifier: struct preempt_notifier for the task being preempted
133 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900134 *
135 * Please note that sched_in and out are called under different
136 * contexts. sched_out is called with rq lock held and irq disabled
137 * while sched_in is called without rq lock and irq enabled. This
138 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200139 */
140struct preempt_ops {
141 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
142 void (*sched_out)(struct preempt_notifier *notifier,
143 struct task_struct *next);
144};
145
146/**
147 * preempt_notifier - key for installing preemption notifiers
148 * @link: internal use
149 * @ops: defines the notifier functions to be called
150 *
151 * Usually used in conjunction with container_of().
152 */
153struct preempt_notifier {
154 struct hlist_node link;
155 struct preempt_ops *ops;
156};
157
158void preempt_notifier_register(struct preempt_notifier *notifier);
159void preempt_notifier_unregister(struct preempt_notifier *notifier);
160
161static inline void preempt_notifier_init(struct preempt_notifier *notifier,
162 struct preempt_ops *ops)
163{
164 INIT_HLIST_NODE(&notifier->link);
165 notifier->ops = ops;
166}
167
168#endif
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170#endif /* __LINUX_PREEMPT_H */