blob: bea8dd8ff5e026f8fc3e3bc446af7aea7ec891e2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
4/*
5 * include/linux/preempt.h - macros for accessing and manipulating
6 * preempt_count (used for kernel preemption, interrupt count, etc.)
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/linkage.h>
Avi Kivitye107be32007-07-26 13:40:43 +020010#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Peter Zijlstraf27dde82013-08-14 14:55:31 +020012/*
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020013 * We put the hardirq and softirq counter into the preemption
14 * counter. The bitmask has the following meaning:
15 *
16 * - bits 0-7 are the preemption count (max preemption depth: 256)
17 * - bits 8-15 are the softirq count (max # of softirqs: 256)
18 *
19 * The hardirq count could in theory be the same as the number of
20 * interrupts in the system, but we run all interrupt handlers with
21 * interrupts disabled, so we cannot have nesting interrupts. Though
22 * there are a few palaeontologic drivers which reenable interrupts in
23 * the handler, so we need more than one bit here.
24 *
Frederic Weisbecker2e10e712015-05-12 16:41:47 +020025 * PREEMPT_MASK: 0x000000ff
26 * SOFTIRQ_MASK: 0x0000ff00
27 * HARDIRQ_MASK: 0x000f0000
28 * NMI_MASK: 0x00100000
29 * PREEMPT_ACTIVE: 0x00200000
30 * PREEMPT_NEED_RESCHED: 0x80000000
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020031 */
32#define PREEMPT_BITS 8
33#define SOFTIRQ_BITS 8
34#define HARDIRQ_BITS 4
35#define NMI_BITS 1
36
37#define PREEMPT_SHIFT 0
38#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
39#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
40#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
41
42#define __IRQ_MASK(x) ((1UL << (x))-1)
43
44#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
45#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
46#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
47#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
48
49#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
50#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
51#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
52#define NMI_OFFSET (1UL << NMI_SHIFT)
53
54#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
55
56#define PREEMPT_ACTIVE_BITS 1
57#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
58#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
59
Frederic Weisbecker2e10e712015-05-12 16:41:47 +020060/* We use the MSB mostly because its available */
61#define PREEMPT_NEED_RESCHED 0x80000000
62
63/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
64#include <asm/preempt.h>
65
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020066#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
67#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
68#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
69 | NMI_MASK))
70
71/*
72 * Are we doing bottom half or hardware interrupt processing?
73 * Are we in a softirq context? Interrupt context?
74 * in_softirq - Are we currently processing softirq or have bh disabled?
75 * in_serving_softirq - Are we currently processing softirq?
76 */
77#define in_irq() (hardirq_count())
78#define in_softirq() (softirq_count())
79#define in_interrupt() (irq_count())
80#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +030087/*
88 * The preempt_count offset after preempt_disable();
89 */
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020090#if defined(CONFIG_PREEMPT_COUNT)
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +030091# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020092#else
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +030093# define PREEMPT_DISABLE_OFFSET 0
Frederic Weisbecker92cf2112015-05-12 16:41:46 +020094#endif
95
96/*
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +030097 * The preempt_count offset after spin_lock()
98 */
99#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
100
101/*
Frederic Weisbecker92cf2112015-05-12 16:41:46 +0200102 * The preempt_count offset needed for things like:
103 *
104 * spin_lock_bh()
105 *
106 * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
107 * softirqs, such that unlock sequences of:
108 *
109 * spin_unlock();
110 * local_bh_enable();
111 *
112 * Work as expected.
113 */
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +0300114#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
Frederic Weisbecker92cf2112015-05-12 16:41:46 +0200115
116/*
117 * Are we running in atomic context? WARNING: this macro cannot
118 * always detect atomic context; in particular, it cannot know about
119 * held spinlocks in non-preemptible kernels. Thus it should not be
120 * used in the general case to determine whether sleeping is possible.
121 * Do not use in_atomic() in driver code.
122 */
Frederic Weisbecker3e51f3c2015-05-12 16:41:51 +0200123#define in_atomic() (preempt_count() != 0)
Frederic Weisbecker92cf2112015-05-12 16:41:46 +0200124
125/*
126 * Check whether we were atomic before we did preempt_disable():
Frederic Weisbeckere017cf22015-05-12 16:41:50 +0200127 * (used by the scheduler)
Frederic Weisbecker92cf2112015-05-12 16:41:46 +0200128 */
129#define in_atomic_preempt_off() \
Frederic Weisbecker90b62b52015-05-12 16:41:48 +0200130 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
Frederic Weisbecker92cf2112015-05-12 16:41:46 +0200131
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200132#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200133extern void preempt_count_add(int val);
134extern void preempt_count_sub(int val);
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +0300135#define preempt_count_dec_and_test() \
136 ({ preempt_count_sub(1); should_resched(0); })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137#else
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200138#define preempt_count_add(val) __preempt_count_add(val)
139#define preempt_count_sub(val) __preempt_count_sub(val)
140#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#endif
142
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200143#define __preempt_count_inc() __preempt_count_add(1)
144#define __preempt_count_dec() __preempt_count_sub(1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200146#define preempt_count_inc() preempt_count_add(1)
147#define preempt_count_dec() preempt_count_sub(1)
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200148
Frederic Weisbeckerb30f0e32015-05-12 16:41:49 +0200149#define preempt_active_enter() \
150do { \
151 preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
152 barrier(); \
153} while (0)
154
155#define preempt_active_exit() \
156do { \
157 barrier(); \
158 preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
159} while (0)
160
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200161#ifdef CONFIG_PREEMPT_COUNT
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163#define preempt_disable() \
164do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200165 preempt_count_inc(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 barrier(); \
167} while (0)
168
Thomas Gleixnerba74c142011-03-21 13:32:17 +0100169#define sched_preempt_enable_no_resched() \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170do { \
171 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200172 preempt_count_dec(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173} while (0)
174
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200175#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
Thomas Gleixnerba74c142011-03-21 13:32:17 +0100176
Frederic Weisbecker2e10e712015-05-12 16:41:47 +0200177#define preemptible() (preempt_count() == 0 && !irqs_disabled())
178
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200179#ifdef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180#define preempt_enable() \
181do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200182 barrier(); \
183 if (unlikely(preempt_count_dec_and_test())) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200184 __preempt_schedule(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185} while (0)
186
Frederic Weisbecker9a92e3d2015-06-04 17:39:09 +0200187#define preempt_enable_notrace() \
188do { \
189 barrier(); \
190 if (unlikely(__preempt_count_dec_and_test())) \
191 __preempt_schedule_notrace(); \
192} while (0)
193
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200194#define preempt_check_resched() \
195do { \
Konstantin Khlebnikovfe32d3c2015-07-15 12:52:04 +0300196 if (should_resched(0)) \
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200197 __preempt_schedule(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200198} while (0)
199
Frederic Weisbecker9a92e3d2015-06-04 17:39:09 +0200200#else /* !CONFIG_PREEMPT */
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100201#define preempt_enable() \
202do { \
203 barrier(); \
204 preempt_count_dec(); \
205} while (0)
Frederic Weisbecker9a92e3d2015-06-04 17:39:09 +0200206
207#define preempt_enable_notrace() \
208do { \
209 barrier(); \
210 __preempt_count_dec(); \
211} while (0)
212
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200213#define preempt_check_resched() do { } while (0)
Frederic Weisbecker9a92e3d2015-06-04 17:39:09 +0200214#endif /* CONFIG_PREEMPT */
Steven Rostedt50282522008-05-12 21:20:41 +0200215
216#define preempt_disable_notrace() \
217do { \
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200218 __preempt_count_inc(); \
Steven Rostedt50282522008-05-12 21:20:41 +0200219 barrier(); \
220} while (0)
221
222#define preempt_enable_no_resched_notrace() \
223do { \
224 barrier(); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200225 __preempt_count_dec(); \
Steven Rostedt50282522008-05-12 21:20:41 +0200226} while (0)
227
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200228#else /* !CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Linus Torvalds386afc92013-04-09 10:48:33 -0700230/*
231 * Even if we don't have any preemption, we need preempt disable/enable
232 * to be barriers, so that we don't have things like get_user/put_user
233 * that can cause faults and scheduling migrate into our preempt-protected
234 * region.
235 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200236#define preempt_disable() barrier()
Linus Torvalds386afc92013-04-09 10:48:33 -0700237#define sched_preempt_enable_no_resched() barrier()
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200238#define preempt_enable_no_resched() barrier()
239#define preempt_enable() barrier()
240#define preempt_check_resched() do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
Linus Torvalds386afc92013-04-09 10:48:33 -0700242#define preempt_disable_notrace() barrier()
243#define preempt_enable_no_resched_notrace() barrier()
244#define preempt_enable_notrace() barrier()
Frederic Weisbecker2e10e712015-05-12 16:41:47 +0200245#define preemptible() 0
Steven Rostedt50282522008-05-12 21:20:41 +0200246
Frederic Weisbeckerbdd4e852011-06-08 01:13:27 +0200247#endif /* CONFIG_PREEMPT_COUNT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
Peter Zijlstra62b94a02013-11-20 16:52:19 +0100249#ifdef MODULE
250/*
251 * Modules have no business playing preemption tricks.
252 */
253#undef sched_preempt_enable_no_resched
254#undef preempt_enable_no_resched
255#undef preempt_enable_no_resched_notrace
256#undef preempt_check_resched
257#endif
258
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100259#define preempt_set_need_resched() \
260do { \
261 set_preempt_need_resched(); \
262} while (0)
263#define preempt_fold_need_resched() \
264do { \
265 if (tif_need_resched()) \
266 set_preempt_need_resched(); \
267} while (0)
Peter Zijlstra8cb75e02013-11-20 12:22:37 +0100268
Avi Kivitye107be32007-07-26 13:40:43 +0200269#ifdef CONFIG_PREEMPT_NOTIFIERS
270
271struct preempt_notifier;
272
273/**
274 * preempt_ops - notifiers called when a task is preempted and rescheduled
275 * @sched_in: we're about to be rescheduled:
276 * notifier: struct preempt_notifier for the task being scheduled
277 * cpu: cpu we're scheduled on
278 * @sched_out: we've just been preempted
279 * notifier: struct preempt_notifier for the task being preempted
280 * next: the task that's kicking us out
Tejun Heo8592e642009-12-02 12:56:46 +0900281 *
282 * Please note that sched_in and out are called under different
283 * contexts. sched_out is called with rq lock held and irq disabled
284 * while sched_in is called without rq lock and irq enabled. This
285 * difference is intentional and depended upon by its users.
Avi Kivitye107be32007-07-26 13:40:43 +0200286 */
287struct preempt_ops {
288 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
289 void (*sched_out)(struct preempt_notifier *notifier,
290 struct task_struct *next);
291};
292
293/**
294 * preempt_notifier - key for installing preemption notifiers
295 * @link: internal use
296 * @ops: defines the notifier functions to be called
297 *
298 * Usually used in conjunction with container_of().
299 */
300struct preempt_notifier {
301 struct hlist_node link;
302 struct preempt_ops *ops;
303};
304
Peter Zijlstra2ecd9d22015-07-03 18:53:58 +0200305void preempt_notifier_inc(void);
306void preempt_notifier_dec(void);
Avi Kivitye107be32007-07-26 13:40:43 +0200307void preempt_notifier_register(struct preempt_notifier *notifier);
308void preempt_notifier_unregister(struct preempt_notifier *notifier);
309
310static inline void preempt_notifier_init(struct preempt_notifier *notifier,
311 struct preempt_ops *ops)
312{
313 INIT_HLIST_NODE(&notifier->link);
314 notifier->ops = ops;
315}
316
317#endif
318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319#endif /* __LINUX_PREEMPT_H */