blob: a3d9dc8c2c006a02cd7677a6e4a6ac10dac69910 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
13 * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14 * the other bits -- can't include that header due to inclusion hell.
15 */
16#define PREEMPT_NEED_RESCHED 0x80000000
17
Peter Zijlstraa7878702013-08-14 14:55:40 +020018#include <asm/preempt.h>
Peter Zijlstraf27dde82013-08-14 14:55:31 +020019
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020020#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020021extern void preempt_count_add(int val);
22extern void preempt_count_sub(int val);
23#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#else
Peter Zijlstrabdb43802013-09-10 12:15:23 +020025#define preempt_count_add(val) __preempt_count_add(val)
26#define preempt_count_sub(val) __preempt_count_sub(val)
27#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#endif
29
Peter Zijlstrabdb43802013-09-10 12:15:23 +020030#define __preempt_count_inc() __preempt_count_add(1)
31#define __preempt_count_dec() __preempt_count_sub(1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Peter Zijlstrabdb43802013-09-10 12:15:23 +020033#define preempt_count_inc() preempt_count_add(1)
34#define preempt_count_dec() preempt_count_sub(1)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020035
36#ifdef CONFIG_PREEMPT_COUNT
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#define preempt_disable() \
39do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020040 preempt_count_inc(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 barrier(); \
42} while (0)
43
Thomas Gleixnerba74c142011-03-21 13:32:17 +010044#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -070045do { \
46 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020047 preempt_count_dec(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070048} while (0)
49
Peter Zijlstrabdb43802013-09-10 12:15:23 +020050#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
Thomas Gleixnerba74c142011-03-21 13:32:17 +010051
Peter Zijlstrabdb43802013-09-10 12:15:23 +020052#ifdef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#define preempt_enable() \
54do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020055 barrier(); \
56 if (unlikely(preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020057 __preempt_schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070058} while (0)
59
Peter Zijlstrabdb43802013-09-10 12:15:23 +020060#define preempt_check_resched() \
61do { \
62 if (should_resched()) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020063 __preempt_schedule(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020064} while (0)
65
66#else
67#define preempt_enable() preempt_enable_no_resched()
68#define preempt_check_resched() do { } while (0)
69#endif
Steven Rostedt50282522008-05-12 21:20:41 +020070
71#define preempt_disable_notrace() \
72do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020073 __preempt_count_inc(); \
Steven Rostedt50282522008-05-12 21:20:41 +020074 barrier(); \
75} while (0)
76
77#define preempt_enable_no_resched_notrace() \
78do { \
79 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020080 __preempt_count_dec(); \
Steven Rostedt50282522008-05-12 21:20:41 +020081} while (0)
82
Peter Zijlstrabdb43802013-09-10 12:15:23 +020083#ifdef CONFIG_PREEMPT
84
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020085#ifndef CONFIG_CONTEXT_TRACKING
86#define __preempt_schedule_context() __preempt_schedule()
Peter Zijlstrabdb43802013-09-10 12:15:23 +020087#endif
88
Steven Rostedt50282522008-05-12 21:20:41 +020089#define preempt_enable_notrace() \
90do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020091 barrier(); \
92 if (unlikely(__preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020093 __preempt_schedule_context(); \
Steven Rostedt50282522008-05-12 21:20:41 +020094} while (0)
Peter Zijlstrabdb43802013-09-10 12:15:23 +020095#else
96#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
97#endif
Steven Rostedt50282522008-05-12 21:20:41 +020098
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +020099#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Linus Torvalds386afc92013-04-09 10:48:33 -0700101/*
102 * Even if we don't have any preemption, we need preempt disable/enable
103 * to be barriers, so that we don't have things like get_user/put_user
104 * that can cause faults and scheduling migrate into our preempt-protected
105 * region.
106 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200107#define preempt_disable() barrier()
Linus Torvalds386afc92013-04-09 10:48:33 -0700108#define sched_preempt_enable_no_resched() barrier()
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200109#define preempt_enable_no_resched() barrier()
110#define preempt_enable() barrier()
111#define preempt_check_resched() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Linus Torvalds386afc92013-04-09 10:48:33 -0700113#define preempt_disable_notrace() barrier()
114#define preempt_enable_no_resched_notrace() barrier()
115#define preempt_enable_notrace() barrier()
Steven Rostedt50282522008-05-12 21:20:41 +0200116
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200117#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Avi Kivitye107be32007-07-26 13:40:43 +0200119#ifdef CONFIG_PREEMPT_NOTIFIERS
120
121struct preempt_notifier;
122
123/**
124 * preempt_ops - notifiers called when a task is preempted and rescheduled
125 * @sched_in: we're about to be rescheduled:
126 * notifier: struct preempt_notifier for the task being scheduled
127 * cpu: cpu we're scheduled on
128 * @sched_out: we've just been preempted
129 * notifier: struct preempt_notifier for the task being preempted
130 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900131 *
132 * Please note that sched_in and out are called under different
133 * contexts. sched_out is called with rq lock held and irq disabled
134 * while sched_in is called without rq lock and irq enabled. This
135 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200136 */
137struct preempt_ops {
138 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
139 void (*sched_out)(struct preempt_notifier *notifier,
140 struct task_struct *next);
141};
142
143/**
144 * preempt_notifier - key for installing preemption notifiers
145 * @link: internal use
146 * @ops: defines the notifier functions to be called
147 *
148 * Usually used in conjunction with container_of().
149 */
150struct preempt_notifier {
151 struct hlist_node link;
152 struct preempt_ops *ops;
153};
154
155void preempt_notifier_register(struct preempt_notifier *notifier);
156void preempt_notifier_unregister(struct preempt_notifier *notifier);
157
158static inline void preempt_notifier_init(struct preempt_notifier *notifier,
159 struct preempt_ops *ops)
160{
161 INIT_HLIST_NODE(&notifier->link);
162 notifier->ops = ops;
163}
164
165#endif
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167#endif /* __LINUX_PREEMPT_H */