blob: de83b4eb164287db363328f87c0f8af216497a91 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
Peter Zijlstraa7878702013-08-14 14:55:40 +020018#include <asm/preempt.h>
Peter Zijlstraf27dde82013-08-14 14:55:31 +020019
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020020#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020021extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#else
Peter Zijlstrabdb43802013-09-10 12:15:23 +020025#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#endif
29
Peter Zijlstrabdb43802013-09-10 12:15:23 +020030#define __preempt_count_inc() __preempt_count_add(1)
31#define __preempt_count_dec() __preempt_count_sub(1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Peter Zijlstrabdb43802013-09-10 12:15:23 +020033#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020035
36#ifdef CONFIG_PREEMPT_COUNT
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#define preempt_disable() \
39do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020040 preempt_count_inc(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 barrier(); \
42} while (0)
43
Thomas Gleixnerba74c142011-03-21 13:32:17 +010044#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -070045do { \
46 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020047 preempt_count_dec(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070048} while (0)
49
Peter Zijlstrabdb43802013-09-10 12:15:23 +020050#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
Thomas Gleixnerba74c142011-03-21 13:32:17 +010051
Peter Zijlstrabdb43802013-09-10 12:15:23 +020052#ifdef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#define preempt_enable() \
54do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020055 barrier(); \
56 if (unlikely(preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020057 __preempt_schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058} while (0)
59
Peter Zijlstrabdb43802013-09-10 12:15:23 +020060#define preempt_check_resched() \
61do { \
62 if (should_resched()) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020063 __preempt_schedule(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020064} while (0)
65
66#else
Peter Zijlstra62b94a02013-11-20 16:52:19 +010067#define preempt_enable() \
68do { \
69 barrier(); \
70 preempt_count_dec(); \
71} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020072#define preempt_check_resched() do { } while (0)
73#endif
Steven Rostedt50282522008-05-12 21:20:41 +020074
75#define preempt_disable_notrace() \
76do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020077 __preempt_count_inc(); \
Steven Rostedt50282522008-05-12 21:20:41 +020078 barrier(); \
79} while (0)
80
81#define preempt_enable_no_resched_notrace() \
82do { \
83 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020084 __preempt_count_dec(); \
Steven Rostedt50282522008-05-12 21:20:41 +020085} while (0)
86
Peter Zijlstrabdb43802013-09-10 12:15:23 +020087#ifdef CONFIG_PREEMPT
88
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020089#ifndef CONFIG_CONTEXT_TRACKING
90#define __preempt_schedule_context() __preempt_schedule()
Peter Zijlstrabdb43802013-09-10 12:15:23 +020091#endif
92
Steven Rostedt50282522008-05-12 21:20:41 +020093#define preempt_enable_notrace() \
94do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020095 barrier(); \
96 if (unlikely(__preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020097 __preempt_schedule_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +020098} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020099#else
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100100#define preempt_enable_notrace() \
101do { \
102 barrier(); \
103 __preempt_count_dec(); \
104} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200105#endif
Steven Rostedt50282522008-05-12 21:20:41 +0200106
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200107#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Linus Torvalds386afc92013-04-09 10:48:33 -0700109/*
110 * Even if we don't have any preemption, we need preempt disable/enable
111 * to be barriers, so that we don't have things like get_user/put_user
112 * that can cause faults and scheduling migrate into our preempt-protected
113 * region.
114 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200115#define preempt_disable() barrier()
Linus Torvalds386afc92013-04-09 10:48:33 -0700116#define sched_preempt_enable_no_resched() barrier()
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200117#define preempt_enable_no_resched() barrier()
118#define preempt_enable() barrier()
119#define preempt_check_resched() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Linus Torvalds386afc92013-04-09 10:48:33 -0700121#define preempt_disable_notrace() barrier()
122#define preempt_enable_no_resched_notrace() barrier()
123#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200124
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200125#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100127#ifdef MODULE
128/*
129 * Modules have no business playing preemption tricks.
130 */
131#undef sched_preempt_enable_no_resched
132#undef preempt_enable_no_resched
133#undef preempt_enable_no_resched_notrace
134#undef preempt_check_resched
135#endif
136
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100137#define preempt_set_need_resched() \
138do { \
139 set_preempt_need_resched(); \
140} while (0)
141#define preempt_fold_need_resched() \
142do { \
143 if (tif_need_resched()) \
144 set_preempt_need_resched(); \
145} while (0)
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100146
Avi Kivitye107be32007-07-26 13:40:43 +0200147#ifdef CONFIG_PREEMPT_NOTIFIERS
148
149struct preempt_notifier;
150
151/**
152 * preempt_ops - notifiers called when a task is preempted and rescheduled
153 * @sched_in: we're about to be rescheduled:
154 * notifier: struct preempt_notifier for the task being scheduled
155 * cpu: cpu we're scheduled on
156 * @sched_out: we've just been preempted
157 * notifier: struct preempt_notifier for the task being preempted
158 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900159 *
160 * Please note that sched_in and out are called under different
161 * contexts. sched_out is called with rq lock held and irq disabled
162 * while sched_in is called without rq lock and irq enabled. This
163 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200164 */
165struct preempt_ops {
166 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
167 void (*sched_out)(struct preempt_notifier *notifier,
168 struct task_struct *next);
169};
170
171/**
172 * preempt_notifier - key for installing preemption notifiers
173 * @link: internal use
174 * @ops: defines the notifier functions to be called
175 *
176 * Usually used in conjunction with container_of().
177 */
178struct preempt_notifier {
179 struct hlist_node link;
180 struct preempt_ops *ops;
181};
182
183void preempt_notifier_register(struct preempt_notifier *notifier);
184void preempt_notifier_unregister(struct preempt_notifier *notifier);
185
186static inline void preempt_notifier_init(struct preempt_notifier *notifier,
187 struct preempt_ops *ops)
188{
189 INIT_HLIST_NODE(&notifier->link);
190 notifier->ops = ops;
191}
192
193#endif
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif /* __LINUX_PREEMPT_H */