Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | 4c82269 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_IDLE_H |
| 3 | #define _LINUX_SCHED_IDLE_H |
| 4 | |
| 5 | #include <linux/sched.h> |
| 6 | |
Ingo Molnar | b768917 | 2017-02-01 16:51:00 +0100 | [diff] [blame] | 7 | enum cpu_idle_type { |
| 8 | CPU_IDLE, |
| 9 | CPU_NOT_IDLE, |
| 10 | CPU_NEWLY_IDLE, |
| 11 | CPU_MAX_IDLE_TYPES |
| 12 | }; |
| 13 | |
Ingo Molnar | 4437722 | 2017-02-01 16:58:52 +0100 | [diff] [blame] | 14 | extern void wake_up_if_idle(int cpu); |
| 15 | |
Ingo Molnar | 5dbe91d | 2017-02-01 18:47:28 +0100 | [diff] [blame] | 16 | /* |
| 17 | * Idle thread specific functions to determine the need_resched |
| 18 | * polling state. |
| 19 | */ |
| 20 | #ifdef TIF_POLLING_NRFLAG |
Ingo Molnar | 5dbe91d | 2017-02-01 18:47:28 +0100 | [diff] [blame] | 21 | |
| 22 | static inline void __current_set_polling(void) |
| 23 | { |
| 24 | set_thread_flag(TIF_POLLING_NRFLAG); |
| 25 | } |
| 26 | |
| 27 | static inline bool __must_check current_set_polling_and_test(void) |
| 28 | { |
| 29 | __current_set_polling(); |
| 30 | |
| 31 | /* |
| 32 | * Polling state must be visible before we test NEED_RESCHED, |
| 33 | * paired by resched_curr() |
| 34 | */ |
| 35 | smp_mb__after_atomic(); |
| 36 | |
| 37 | return unlikely(tif_need_resched()); |
| 38 | } |
| 39 | |
| 40 | static inline void __current_clr_polling(void) |
| 41 | { |
| 42 | clear_thread_flag(TIF_POLLING_NRFLAG); |
| 43 | } |
| 44 | |
| 45 | static inline bool __must_check current_clr_polling_and_test(void) |
| 46 | { |
| 47 | __current_clr_polling(); |
| 48 | |
| 49 | /* |
| 50 | * Polling state must be visible before we test NEED_RESCHED, |
| 51 | * paired by resched_curr() |
| 52 | */ |
| 53 | smp_mb__after_atomic(); |
| 54 | |
| 55 | return unlikely(tif_need_resched()); |
| 56 | } |
| 57 | |
| 58 | #else |
Ingo Molnar | 5dbe91d | 2017-02-01 18:47:28 +0100 | [diff] [blame] | 59 | static inline void __current_set_polling(void) { } |
| 60 | static inline void __current_clr_polling(void) { } |
| 61 | |
| 62 | static inline bool __must_check current_set_polling_and_test(void) |
| 63 | { |
| 64 | return unlikely(tif_need_resched()); |
| 65 | } |
| 66 | static inline bool __must_check current_clr_polling_and_test(void) |
| 67 | { |
| 68 | return unlikely(tif_need_resched()); |
| 69 | } |
| 70 | #endif |
| 71 | |
| 72 | static inline void current_clr_polling(void) |
| 73 | { |
| 74 | __current_clr_polling(); |
| 75 | |
| 76 | /* |
| 77 | * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. |
| 78 | * Once the bit is cleared, we'll get IPIs with every new |
| 79 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also |
| 80 | * fold. |
| 81 | */ |
| 82 | smp_mb(); /* paired with resched_curr() */ |
| 83 | |
| 84 | preempt_fold_need_resched(); |
| 85 | } |
| 86 | |
Ingo Molnar | 4c82269 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 87 | #endif /* _LINUX_SCHED_IDLE_H */ |