blob: 5ca63ebad6b40e2b2c3e7eee245e45f9d5b8fa26 [file] [log] [blame]
Ingo Molnar4c822692017-02-01 16:36:40 +01001#ifndef _LINUX_SCHED_IDLE_H
2#define _LINUX_SCHED_IDLE_H
3
4#include <linux/sched.h>
5
Ingo Molnarb7689172017-02-01 16:51:00 +01006enum cpu_idle_type {
7 CPU_IDLE,
8 CPU_NOT_IDLE,
9 CPU_NEWLY_IDLE,
10 CPU_MAX_IDLE_TYPES
11};
12
Ingo Molnar44377222017-02-01 16:58:52 +010013extern void wake_up_if_idle(int cpu);
14
Ingo Molnar5dbe91d2017-02-01 18:47:28 +010015/*
16 * Idle thread specific functions to determine the need_resched
17 * polling state.
18 */
19#ifdef TIF_POLLING_NRFLAG
Ingo Molnar5dbe91d2017-02-01 18:47:28 +010020
21static inline void __current_set_polling(void)
22{
23 set_thread_flag(TIF_POLLING_NRFLAG);
24}
25
26static inline bool __must_check current_set_polling_and_test(void)
27{
28 __current_set_polling();
29
30 /*
31 * Polling state must be visible before we test NEED_RESCHED,
32 * paired by resched_curr()
33 */
34 smp_mb__after_atomic();
35
36 return unlikely(tif_need_resched());
37}
38
39static inline void __current_clr_polling(void)
40{
41 clear_thread_flag(TIF_POLLING_NRFLAG);
42}
43
44static inline bool __must_check current_clr_polling_and_test(void)
45{
46 __current_clr_polling();
47
48 /*
49 * Polling state must be visible before we test NEED_RESCHED,
50 * paired by resched_curr()
51 */
52 smp_mb__after_atomic();
53
54 return unlikely(tif_need_resched());
55}
56
57#else
Ingo Molnar5dbe91d2017-02-01 18:47:28 +010058static inline void __current_set_polling(void) { }
59static inline void __current_clr_polling(void) { }
60
61static inline bool __must_check current_set_polling_and_test(void)
62{
63 return unlikely(tif_need_resched());
64}
65static inline bool __must_check current_clr_polling_and_test(void)
66{
67 return unlikely(tif_need_resched());
68}
69#endif
70
71static inline void current_clr_polling(void)
72{
73 __current_clr_polling();
74
75 /*
76 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
77 * Once the bit is cleared, we'll get IPIs with every new
78 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
79 * fold.
80 */
81 smp_mb(); /* paired with resched_curr() */
82
83 preempt_fold_need_resched();
84}
85
Ingo Molnar4c822692017-02-01 16:36:40 +010086#endif /* _LINUX_SCHED_IDLE_H */