blob: df8e245e872939d8ce47a821e4f802e70e9ecd1e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
Peter Zijlstraa7878702013-08-14 14:55:40 +020018#include <asm/preempt.h>
Peter Zijlstraf27dde82013-08-14 14:55:31 +020019
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020020#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Harvey Harrisonec701582008-02-08 04:19:55 -080021 extern void add_preempt_count(int val);
22 extern void sub_preempt_count(int val);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#else
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020024# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0)
25# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#endif
27
28#define inc_preempt_count() add_preempt_count(1)
29#define dec_preempt_count() sub_preempt_count(1)
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#ifdef CONFIG_PREEMPT
32
33asmlinkage void preempt_schedule(void);
34
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020035#define preempt_check_resched() \
36do { \
Peter Zijlstraf27dde82013-08-14 14:55:31 +020037 if (unlikely(!*preempt_count_ptr())) \
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020038 preempt_schedule(); \
39} while (0)
40
Steven Rostedt29bb9e52013-05-24 15:23:40 -040041#ifdef CONFIG_CONTEXT_TRACKING
42
43void preempt_schedule_context(void);
44
45#define preempt_check_resched_context() \
46do { \
Peter Zijlstraf27dde82013-08-14 14:55:31 +020047 if (unlikely(!*preempt_count_ptr())) \
Steven Rostedt29bb9e52013-05-24 15:23:40 -040048 preempt_schedule_context(); \
49} while (0)
50#else
51
52#define preempt_check_resched_context() preempt_check_resched()
53
54#endif /* CONFIG_CONTEXT_TRACKING */
55
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020056#else /* !CONFIG_PREEMPT */
57
58#define preempt_check_resched() do { } while (0)
Steven Rostedt29bb9e52013-05-24 15:23:40 -040059#define preempt_check_resched_context() do { } while (0)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020060
61#endif /* CONFIG_PREEMPT */
62
63
64#ifdef CONFIG_PREEMPT_COUNT
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define preempt_disable() \
67do { \
68 inc_preempt_count(); \
69 barrier(); \
70} while (0)
71
Thomas Gleixnerba74c142011-03-21 13:32:17 +010072#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -070073do { \
74 barrier(); \
75 dec_preempt_count(); \
76} while (0)
77
Thomas Gleixnerba74c142011-03-21 13:32:17 +010078#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#define preempt_enable() \
81do { \
82 preempt_enable_no_resched(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 preempt_check_resched(); \
84} while (0)
85
Steven Rostedt50282522008-05-12 21:20:41 +020086/* For debugging and tracer internals only! */
87#define add_preempt_count_notrace(val) \
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020088 do { *preempt_count_ptr() += (val); } while (0)
Steven Rostedt50282522008-05-12 21:20:41 +020089#define sub_preempt_count_notrace(val) \
Peter Zijlstra4a2b4b22013-08-14 14:55:24 +020090 do { *preempt_count_ptr() -= (val); } while (0)
Steven Rostedt50282522008-05-12 21:20:41 +020091#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
92#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
93
94#define preempt_disable_notrace() \
95do { \
96 inc_preempt_count_notrace(); \
97 barrier(); \
98} while (0)
99
100#define preempt_enable_no_resched_notrace() \
101do { \
102 barrier(); \
103 dec_preempt_count_notrace(); \
104} while (0)
105
106/* preempt_check_resched is OK to trace */
107#define preempt_enable_notrace() \
108do { \
109 preempt_enable_no_resched_notrace(); \
Steven Rostedt29bb9e52013-05-24 15:23:40 -0400110 preempt_check_resched_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +0200111} while (0)
112
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200113#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Linus Torvalds386afc92013-04-09 10:48:33 -0700115/*
116 * Even if we don't have any preemption, we need preempt disable/enable
117 * to be barriers, so that we don't have things like get_user/put_user
118 * that can cause faults and scheduling migrate into our preempt-protected
119 * region.
120 */
121#define preempt_disable() barrier()
122#define sched_preempt_enable_no_resched() barrier()
123#define preempt_enable_no_resched() barrier()
124#define preempt_enable() barrier()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Linus Torvalds386afc92013-04-09 10:48:33 -0700126#define preempt_disable_notrace() barrier()
127#define preempt_enable_no_resched_notrace() barrier()
128#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200129
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200130#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Avi Kivitye107be32007-07-26 13:40:43 +0200132#ifdef CONFIG_PREEMPT_NOTIFIERS
133
134struct preempt_notifier;
135
136/**
137 * preempt_ops - notifiers called when a task is preempted and rescheduled
138 * @sched_in: we're about to be rescheduled:
139 * notifier: struct preempt_notifier for the task being scheduled
140 * cpu: cpu we're scheduled on
141 * @sched_out: we've just been preempted
142 * notifier: struct preempt_notifier for the task being preempted
143 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900144 *
145 * Please note that sched_in and out are called under different
146 * contexts. sched_out is called with rq lock held and irq disabled
147 * while sched_in is called without rq lock and irq enabled. This
148 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200149 */
150struct preempt_ops {
151 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
152 void (*sched_out)(struct preempt_notifier *notifier,
153 struct task_struct *next);
154};
155
156/**
157 * preempt_notifier - key for installing preemption notifiers
158 * @link: internal use
159 * @ops: defines the notifier functions to be called
160 *
161 * Usually used in conjunction with container_of().
162 */
163struct preempt_notifier {
164 struct hlist_node link;
165 struct preempt_ops *ops;
166};
167
168void preempt_notifier_register(struct preempt_notifier *notifier);
169void preempt_notifier_unregister(struct preempt_notifier *notifier);
170
171static inline void preempt_notifier_init(struct preempt_notifier *notifier,
172 struct preempt_ops *ops)
173{
174 INIT_HLIST_NODE(&notifier->link);
175 notifier->ops = ops;
176}
177
178#endif
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180#endif /* __LINUX_PREEMPT_H */