Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/tick-sched.c |
| 3 | * |
| 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
| 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
| 7 | * |
| 8 | * No idle tick implementation for low and high resolution timers |
| 9 | * |
| 10 | * Started by: Thomas Gleixner and Ingo Molnar |
| 11 | * |
Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 12 | * Distribute under GPLv2. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 13 | */ |
| 14 | #include <linux/cpu.h> |
| 15 | #include <linux/err.h> |
| 16 | #include <linux/hrtimer.h> |
| 17 | #include <linux/interrupt.h> |
| 18 | #include <linux/kernel_stat.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/profile.h> |
| 21 | #include <linux/sched.h> |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 22 | #include <linux/module.h> |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 23 | #include <linux/irq_work.h> |
Frederic Weisbecker | 9014c45 | 2013-04-20 15:43:57 +0200 | [diff] [blame] | 24 | #include <linux/posix-timers.h> |
Prasad Sodagudi | 602c4e2 | 2017-05-17 23:26:09 -0700 | [diff] [blame] | 25 | #include <linux/timer.h> |
Frederic Weisbecker | 2e70933 | 2013-07-10 00:55:25 +0200 | [diff] [blame] | 26 | #include <linux/context_tracking.h> |
Kyle Yan | 36d7870 | 2016-08-23 16:07:11 -0700 | [diff] [blame] | 27 | #include <linux/rq_stats.h> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 28 | |
David S. Miller | 9e203bc | 2007-02-24 22:10:13 -0800 | [diff] [blame] | 29 | #include <asm/irq_regs.h> |
| 30 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 31 | #include "tick-internal.h" |
| 32 | |
Frederic Weisbecker | cb41a29 | 2013-04-20 17:35:50 +0200 | [diff] [blame] | 33 | #include <trace/events/timer.h> |
| 34 | |
Kyle Yan | 36d7870 | 2016-08-23 16:07:11 -0700 | [diff] [blame] | 35 | struct rq_data rq_info; |
| 36 | struct workqueue_struct *rq_wq; |
| 37 | spinlock_t rq_lock; |
| 38 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 39 | /* |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 40 | * Per-CPU nohz control structure |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 41 | */ |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 42 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 43 | |
Ingo Molnar | 289f480 | 2007-02-16 01:28:15 -0800 | [diff] [blame] | 44 | struct tick_sched *tick_get_tick_sched(int cpu) |
| 45 | { |
| 46 | return &per_cpu(tick_cpu_sched, cpu); |
| 47 | } |
| 48 | |
Arnd Bergmann | 7809998a | 2016-01-25 16:41:49 +0100 | [diff] [blame] | 49 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) |
| 50 | /* |
| 51 | * The time, when the last jiffy update happened. Protected by jiffies_lock. |
| 52 | */ |
| 53 | static ktime_t last_jiffies_update; |
| 54 | |
Syed Rameez Mustafa | dddcab7 | 2016-09-07 16:18:27 -0700 | [diff] [blame] | 55 | u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns) |
| 56 | { |
| 57 | u64 cur_jiffies; |
| 58 | unsigned long seq; |
| 59 | |
| 60 | do { |
| 61 | seq = read_seqbegin(&jiffies_lock); |
| 62 | *now = ktime_get_ns(); |
| 63 | *jiffy_ktime_ns = ktime_to_ns(last_jiffies_update); |
| 64 | cur_jiffies = get_jiffies_64(); |
| 65 | } while (read_seqretry(&jiffies_lock, seq)); |
| 66 | |
| 67 | return cur_jiffies; |
| 68 | } |
| 69 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 70 | /* |
| 71 | * Must be called with interrupts disabled ! |
| 72 | */ |
| 73 | static void tick_do_update_jiffies64(ktime_t now) |
| 74 | { |
| 75 | unsigned long ticks = 0; |
| 76 | ktime_t delta; |
| 77 | |
Ingo Molnar | 7a14ce1 | 2008-05-12 15:43:53 +0200 | [diff] [blame] | 78 | /* |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 79 | * Do a quick check without holding jiffies_lock: |
Ingo Molnar | 7a14ce1 | 2008-05-12 15:43:53 +0200 | [diff] [blame] | 80 | */ |
| 81 | delta = ktime_sub(now, last_jiffies_update); |
| 82 | if (delta.tv64 < tick_period.tv64) |
| 83 | return; |
| 84 | |
Wei Jiangang | 6168f8e | 2016-06-29 12:51:50 +0800 | [diff] [blame] | 85 | /* Reevaluate with jiffies_lock held */ |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 86 | write_seqlock(&jiffies_lock); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 87 | |
| 88 | delta = ktime_sub(now, last_jiffies_update); |
| 89 | if (delta.tv64 >= tick_period.tv64) { |
| 90 | |
| 91 | delta = ktime_sub(delta, tick_period); |
| 92 | last_jiffies_update = ktime_add(last_jiffies_update, |
| 93 | tick_period); |
| 94 | |
| 95 | /* Slow path for long timeouts */ |
| 96 | if (unlikely(delta.tv64 >= tick_period.tv64)) { |
| 97 | s64 incr = ktime_to_ns(tick_period); |
| 98 | |
| 99 | ticks = ktime_divns(delta, incr); |
| 100 | |
| 101 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
| 102 | incr * ticks); |
| 103 | } |
| 104 | do_timer(++ticks); |
Thomas Gleixner | 49d670f | 2008-09-22 18:56:01 +0200 | [diff] [blame] | 105 | |
| 106 | /* Keep the tick_next_period variable up to date */ |
| 107 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
Viresh Kumar | 03e6bdc | 2014-04-15 10:54:40 +0530 | [diff] [blame] | 108 | } else { |
| 109 | write_sequnlock(&jiffies_lock); |
| 110 | return; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 111 | } |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 112 | write_sequnlock(&jiffies_lock); |
John Stultz | 47a1b796 | 2013-12-12 13:10:55 -0800 | [diff] [blame] | 113 | update_wall_time(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | /* |
| 117 | * Initialize and return retrieve the jiffies update. |
| 118 | */ |
| 119 | static ktime_t tick_init_jiffy_update(void) |
| 120 | { |
| 121 | ktime_t period; |
| 122 | |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 123 | write_seqlock(&jiffies_lock); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 124 | /* Did we start the jiffies update yet ? */ |
| 125 | if (last_jiffies_update.tv64 == 0) |
| 126 | last_jiffies_update = tick_next_period; |
| 127 | period = last_jiffies_update; |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 128 | write_sequnlock(&jiffies_lock); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 129 | return period; |
| 130 | } |
| 131 | |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 132 | |
| 133 | static void tick_sched_do_timer(ktime_t now) |
| 134 | { |
| 135 | int cpu = smp_processor_id(); |
| 136 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 137 | #ifdef CONFIG_NO_HZ_COMMON |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 138 | /* |
| 139 | * Check if the do_timer duty was dropped. We don't care about |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 140 | * concurrency: This happens only when the CPU in charge went |
| 141 | * into a long sleep. If two CPUs happen to assign themselves to |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 142 | * this duty, then the jiffies update is still serialized by |
Thomas Gleixner | 9c3f9e2 | 2012-11-21 20:31:52 +0100 | [diff] [blame] | 143 | * jiffies_lock. |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 144 | */ |
Frederic Weisbecker | a382bf9 | 2012-12-18 18:24:35 +0100 | [diff] [blame] | 145 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE) |
Frederic Weisbecker | c5bfece | 2013-04-12 16:45:34 +0200 | [diff] [blame] | 146 | && !tick_nohz_full_cpu(cpu)) |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 147 | tick_do_timer_cpu = cpu; |
| 148 | #endif |
| 149 | |
| 150 | /* Check, if the jiffies need an update */ |
| 151 | if (tick_do_timer_cpu == cpu) |
| 152 | tick_do_update_jiffies64(now); |
| 153 | } |
| 154 | |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 155 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
| 156 | { |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 157 | #ifdef CONFIG_NO_HZ_COMMON |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 158 | /* |
| 159 | * When we are idle and the tick is stopped, we have to touch |
| 160 | * the watchdog as we might not schedule for a really long |
| 161 | * time. This happens on complete idle SMP systems while |
| 162 | * waiting on the login prompt. We also increment the "start of |
| 163 | * idle" jiffy stamp so the idle accounting adjustment we do |
| 164 | * when we go busy again does not account too much ticks. |
| 165 | */ |
| 166 | if (ts->tick_stopped) { |
Tejun Heo | 03e0d46 | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 167 | touch_softlockup_watchdog_sched(); |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 168 | if (is_idle_task(current)) |
| 169 | ts->idle_jiffies++; |
| 170 | } |
Frederic Weisbecker | 94a5714 | 2012-10-15 16:17:16 +0200 | [diff] [blame] | 171 | #endif |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 172 | update_process_times(user_mode(regs)); |
| 173 | profile_tick(CPU_PROFILING); |
| 174 | } |
Arnd Bergmann | 7809998a | 2016-01-25 16:41:49 +0100 | [diff] [blame] | 175 | #endif |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 176 | |
Frederic Weisbecker | c5bfece | 2013-04-12 16:45:34 +0200 | [diff] [blame] | 177 | #ifdef CONFIG_NO_HZ_FULL |
Frederic Weisbecker | 460775d | 2013-07-24 23:52:27 +0200 | [diff] [blame] | 178 | cpumask_var_t tick_nohz_full_mask; |
Paul E. McKenney | c0f489d | 2014-06-04 13:46:03 -0700 | [diff] [blame] | 179 | cpumask_var_t housekeeping_mask; |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 180 | bool tick_nohz_full_running; |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 181 | static atomic_t tick_dep_mask; |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 182 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 183 | static bool check_tick_dependency(atomic_t *dep) |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 184 | { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 185 | int val = atomic_read(dep); |
| 186 | |
| 187 | if (val & TICK_DEP_MASK_POSIX_TIMER) { |
Frederic Weisbecker | e6e6cc2 | 2015-12-11 03:27:25 +0100 | [diff] [blame] | 188 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 189 | return true; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 190 | } |
| 191 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 192 | if (val & TICK_DEP_MASK_PERF_EVENTS) { |
Frederic Weisbecker | e6e6cc2 | 2015-12-11 03:27:25 +0100 | [diff] [blame] | 193 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 194 | return true; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 195 | } |
| 196 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 197 | if (val & TICK_DEP_MASK_SCHED) { |
Frederic Weisbecker | e6e6cc2 | 2015-12-11 03:27:25 +0100 | [diff] [blame] | 198 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 199 | return true; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 200 | } |
| 201 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 202 | if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { |
Frederic Weisbecker | e6e6cc2 | 2015-12-11 03:27:25 +0100 | [diff] [blame] | 203 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 204 | return true; |
| 205 | } |
| 206 | |
| 207 | return false; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 208 | } |
| 209 | |
Wanpeng Li | 57ccdf4 | 2016-09-07 18:51:13 +0800 | [diff] [blame] | 210 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) |
Frederic Weisbecker | 9014c45 | 2013-04-20 15:43:57 +0200 | [diff] [blame] | 211 | { |
| 212 | WARN_ON_ONCE(!irqs_disabled()); |
| 213 | |
Wanpeng Li | 57ccdf4 | 2016-09-07 18:51:13 +0800 | [diff] [blame] | 214 | if (unlikely(!cpu_online(cpu))) |
| 215 | return false; |
| 216 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 217 | if (check_tick_dependency(&tick_dep_mask)) |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 218 | return false; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 219 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 220 | if (check_tick_dependency(&ts->tick_dep_mask)) |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 221 | return false; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 222 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 223 | if (check_tick_dependency(¤t->tick_dep_mask)) |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 224 | return false; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 225 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 226 | if (check_tick_dependency(¤t->signal->tick_dep_mask)) |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 227 | return false; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 228 | |
Frederic Weisbecker | 9014c45 | 2013-04-20 15:43:57 +0200 | [diff] [blame] | 229 | return true; |
| 230 | } |
| 231 | |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 232 | static void nohz_full_kick_func(struct irq_work *work) |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 233 | { |
Frederic Weisbecker | 73738a9 | 2015-05-27 19:22:08 +0200 | [diff] [blame] | 234 | /* Empty, the tick restart happens on tick_nohz_irq_exit() */ |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 235 | } |
| 236 | |
| 237 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 238 | .func = nohz_full_kick_func, |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 239 | }; |
| 240 | |
| 241 | /* |
Frederic Weisbecker | 40bea03 | 2014-08-13 18:50:16 +0200 | [diff] [blame] | 242 | * Kick this CPU if it's full dynticks in order to force it to |
| 243 | * re-evaluate its dependency on the tick and restart it if necessary. |
| 244 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), |
| 245 | * is NMI safe. |
| 246 | */ |
Frederic Weisbecker | 555e0c1 | 2015-07-16 17:42:29 +0200 | [diff] [blame] | 247 | static void tick_nohz_full_kick(void) |
Frederic Weisbecker | 40bea03 | 2014-08-13 18:50:16 +0200 | [diff] [blame] | 248 | { |
| 249 | if (!tick_nohz_full_cpu(smp_processor_id())) |
| 250 | return; |
| 251 | |
Christoph Lameter | 56e4dea | 2014-10-27 10:49:45 -0500 | [diff] [blame] | 252 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
Frederic Weisbecker | 40bea03 | 2014-08-13 18:50:16 +0200 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | /* |
Frederic Weisbecker | 3d36aeb | 2014-06-04 16:17:33 +0200 | [diff] [blame] | 256 | * Kick the CPU if it's full dynticks in order to force it to |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 257 | * re-evaluate its dependency on the tick and restart it if necessary. |
| 258 | */ |
Frederic Weisbecker | 3d36aeb | 2014-06-04 16:17:33 +0200 | [diff] [blame] | 259 | void tick_nohz_full_kick_cpu(int cpu) |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 260 | { |
Frederic Weisbecker | 3d36aeb | 2014-06-04 16:17:33 +0200 | [diff] [blame] | 261 | if (!tick_nohz_full_cpu(cpu)) |
| 262 | return; |
| 263 | |
| 264 | irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 265 | } |
| 266 | |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 267 | /* |
| 268 | * Kick all full dynticks CPUs in order to force these to re-evaluate |
| 269 | * their dependency on the tick and restart it if necessary. |
| 270 | */ |
Frederic Weisbecker | b787830 | 2015-07-17 22:25:49 +0200 | [diff] [blame] | 271 | static void tick_nohz_full_kick_all(void) |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 272 | { |
Frederic Weisbecker | 8537bb9 | 2015-12-07 16:55:23 +0100 | [diff] [blame] | 273 | int cpu; |
| 274 | |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 275 | if (!tick_nohz_full_running) |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 276 | return; |
| 277 | |
| 278 | preempt_disable(); |
Frederic Weisbecker | 8537bb9 | 2015-12-07 16:55:23 +0100 | [diff] [blame] | 279 | for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) |
| 280 | tick_nohz_full_kick_cpu(cpu); |
Frederic Weisbecker | 76c24fb | 2013-04-18 00:15:40 +0200 | [diff] [blame] | 281 | preempt_enable(); |
| 282 | } |
| 283 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 284 | static void tick_nohz_dep_set_all(atomic_t *dep, |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 285 | enum tick_dep_bits bit) |
| 286 | { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 287 | int prev; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 288 | |
Peter Zijlstra | a1cc5bc | 2016-04-21 20:35:25 +0200 | [diff] [blame] | 289 | prev = atomic_fetch_or(BIT(bit), dep); |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 290 | if (!prev) |
| 291 | tick_nohz_full_kick_all(); |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * Set a global tick dependency. Used by perf events that rely on freq and |
| 296 | * by unstable clock. |
| 297 | */ |
| 298 | void tick_nohz_dep_set(enum tick_dep_bits bit) |
| 299 | { |
| 300 | tick_nohz_dep_set_all(&tick_dep_mask, bit); |
| 301 | } |
| 302 | |
| 303 | void tick_nohz_dep_clear(enum tick_dep_bits bit) |
| 304 | { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 305 | atomic_andnot(BIT(bit), &tick_dep_mask); |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | /* |
| 309 | * Set per-CPU tick dependency. Used by scheduler and perf events in order to |
| 310 | * manage events throttling. |
| 311 | */ |
| 312 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) |
| 313 | { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 314 | int prev; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 315 | struct tick_sched *ts; |
| 316 | |
| 317 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
| 318 | |
Peter Zijlstra | a1cc5bc | 2016-04-21 20:35:25 +0200 | [diff] [blame] | 319 | prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 320 | if (!prev) { |
| 321 | preempt_disable(); |
| 322 | /* Perf needs local kick that is NMI safe */ |
| 323 | if (cpu == smp_processor_id()) { |
| 324 | tick_nohz_full_kick(); |
| 325 | } else { |
| 326 | /* Remote irq work not NMI-safe */ |
| 327 | if (!WARN_ON_ONCE(in_nmi())) |
| 328 | tick_nohz_full_kick_cpu(cpu); |
| 329 | } |
| 330 | preempt_enable(); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) |
| 335 | { |
| 336 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); |
| 337 | |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 338 | atomic_andnot(BIT(bit), &ts->tick_dep_mask); |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | /* |
| 342 | * Set a per-task tick dependency. Posix CPU timers need this in order to elapse |
| 343 | * per task timers. |
| 344 | */ |
| 345 | void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) |
| 346 | { |
| 347 | /* |
| 348 | * We could optimize this with just kicking the target running the task |
| 349 | * if that noise matters for nohz full users. |
| 350 | */ |
| 351 | tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit); |
| 352 | } |
| 353 | |
| 354 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) |
| 355 | { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 356 | atomic_andnot(BIT(bit), &tsk->tick_dep_mask); |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | /* |
| 360 | * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse |
| 361 | * per process timers. |
| 362 | */ |
| 363 | void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit) |
| 364 | { |
| 365 | tick_nohz_dep_set_all(&sig->tick_dep_mask, bit); |
| 366 | } |
| 367 | |
| 368 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) |
| 369 | { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 370 | atomic_andnot(BIT(bit), &sig->tick_dep_mask); |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 371 | } |
| 372 | |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 373 | /* |
| 374 | * Re-evaluate the need for the tick as we switch the current task. |
| 375 | * It might need the tick due to per task/process properties: |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 376 | * perf events, posix CPU timers, ... |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 377 | */ |
Frederic Weisbecker | de734f8 | 2015-06-11 18:07:12 +0200 | [diff] [blame] | 378 | void __tick_nohz_task_switch(void) |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 379 | { |
| 380 | unsigned long flags; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 381 | struct tick_sched *ts; |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 382 | |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 383 | local_irq_save(flags); |
| 384 | |
Li Zhong | 6296ace | 2013-04-28 11:25:58 +0800 | [diff] [blame] | 385 | if (!tick_nohz_full_cpu(smp_processor_id())) |
| 386 | goto out; |
| 387 | |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 388 | ts = this_cpu_ptr(&tick_cpu_sched); |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 389 | |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 390 | if (ts->tick_stopped) { |
Frederic Weisbecker | f009a7a | 2016-03-24 15:38:00 +0100 | [diff] [blame] | 391 | if (atomic_read(¤t->tick_dep_mask) || |
| 392 | atomic_read(¤t->signal->tick_dep_mask)) |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 393 | tick_nohz_full_kick(); |
| 394 | } |
Li Zhong | 6296ace | 2013-04-28 11:25:58 +0800 | [diff] [blame] | 395 | out: |
Frederic Weisbecker | 99e5ada | 2013-04-20 17:11:50 +0200 | [diff] [blame] | 396 | local_irq_restore(flags); |
| 397 | } |
| 398 | |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 399 | /* Parse the boot-time nohz CPU list from the kernel parameters. */ |
Frederic Weisbecker | c5bfece | 2013-04-12 16:45:34 +0200 | [diff] [blame] | 400 | static int __init tick_nohz_full_setup(char *str) |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 401 | { |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 402 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
| 403 | if (cpulist_parse(str, tick_nohz_full_mask) < 0) { |
Joe Perches | a395d6a | 2016-03-22 14:28:09 -0700 | [diff] [blame] | 404 | pr_warn("NO_HZ: Incorrect nohz_full cpumask\n"); |
Frederic Weisbecker | 4327b15 | 2014-08-17 22:02:55 +0200 | [diff] [blame] | 405 | free_bootmem_cpumask_var(tick_nohz_full_mask); |
Frederic Weisbecker | 0453b43 | 2013-03-27 02:18:34 +0100 | [diff] [blame] | 406 | return 1; |
| 407 | } |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 408 | tick_nohz_full_running = true; |
Frederic Weisbecker | 0453b43 | 2013-03-27 02:18:34 +0100 | [diff] [blame] | 409 | |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 410 | return 1; |
| 411 | } |
Frederic Weisbecker | c5bfece | 2013-04-12 16:45:34 +0200 | [diff] [blame] | 412 | __setup("nohz_full=", tick_nohz_full_setup); |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 413 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 414 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
Frederic Weisbecker | 7c8bb6c | 2015-09-01 16:51:00 +0200 | [diff] [blame] | 415 | unsigned long action, |
| 416 | void *hcpu) |
Frederic Weisbecker | a382bf9 | 2012-12-18 18:24:35 +0100 | [diff] [blame] | 417 | { |
| 418 | unsigned int cpu = (unsigned long)hcpu; |
| 419 | |
| 420 | switch (action & ~CPU_TASKS_FROZEN) { |
| 421 | case CPU_DOWN_PREPARE: |
| 422 | /* |
Frederic Weisbecker | 7c8bb6c | 2015-09-01 16:51:00 +0200 | [diff] [blame] | 423 | * The boot CPU handles housekeeping duty (unbound timers, |
| 424 | * workqueues, timekeeping, ...) on behalf of full dynticks |
| 425 | * CPUs. It must remain online when nohz full is enabled. |
Frederic Weisbecker | a382bf9 | 2012-12-18 18:24:35 +0100 | [diff] [blame] | 426 | */ |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 427 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) |
Li Zhong | 1a7f829 | 2013-05-17 16:44:04 +0800 | [diff] [blame] | 428 | return NOTIFY_BAD; |
Frederic Weisbecker | a382bf9 | 2012-12-18 18:24:35 +0100 | [diff] [blame] | 429 | break; |
| 430 | } |
| 431 | return NOTIFY_OK; |
| 432 | } |
| 433 | |
Frederic Weisbecker | f98823a | 2013-03-27 01:17:22 +0100 | [diff] [blame] | 434 | static int tick_nohz_init_all(void) |
| 435 | { |
| 436 | int err = -1; |
| 437 | |
| 438 | #ifdef CONFIG_NO_HZ_FULL_ALL |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 439 | if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) { |
Frederic Weisbecker | 4327b15 | 2014-08-17 22:02:55 +0200 | [diff] [blame] | 440 | WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n"); |
Paul E. McKenney | c0f489d | 2014-06-04 13:46:03 -0700 | [diff] [blame] | 441 | return err; |
| 442 | } |
Frederic Weisbecker | f98823a | 2013-03-27 01:17:22 +0100 | [diff] [blame] | 443 | err = 0; |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 444 | cpumask_setall(tick_nohz_full_mask); |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 445 | tick_nohz_full_running = true; |
Frederic Weisbecker | f98823a | 2013-03-27 01:17:22 +0100 | [diff] [blame] | 446 | #endif |
| 447 | return err; |
| 448 | } |
| 449 | |
Frederic Weisbecker | d1e43fa | 2013-03-26 23:47:24 +0100 | [diff] [blame] | 450 | void __init tick_nohz_init(void) |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 451 | { |
Frederic Weisbecker | d1e43fa | 2013-03-26 23:47:24 +0100 | [diff] [blame] | 452 | int cpu; |
| 453 | |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 454 | if (!tick_nohz_full_running) { |
Frederic Weisbecker | f98823a | 2013-03-27 01:17:22 +0100 | [diff] [blame] | 455 | if (tick_nohz_init_all() < 0) |
| 456 | return; |
| 457 | } |
Frederic Weisbecker | d1e43fa | 2013-03-26 23:47:24 +0100 | [diff] [blame] | 458 | |
Frederic Weisbecker | 4327b15 | 2014-08-17 22:02:55 +0200 | [diff] [blame] | 459 | if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) { |
| 460 | WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n"); |
| 461 | cpumask_clear(tick_nohz_full_mask); |
| 462 | tick_nohz_full_running = false; |
| 463 | return; |
| 464 | } |
| 465 | |
Frederic Weisbecker | 9b01f5b | 2014-08-18 01:36:07 +0200 | [diff] [blame] | 466 | /* |
| 467 | * Full dynticks uses irq work to drive the tick rescheduling on safe |
| 468 | * locking contexts. But then we need irq work to raise its own |
| 469 | * interrupts to avoid circular dependency on the tick |
| 470 | */ |
| 471 | if (!arch_irq_work_has_interrupt()) { |
Joe Perches | a395d6a | 2016-03-22 14:28:09 -0700 | [diff] [blame] | 472 | pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); |
Frederic Weisbecker | 9b01f5b | 2014-08-18 01:36:07 +0200 | [diff] [blame] | 473 | cpumask_clear(tick_nohz_full_mask); |
| 474 | cpumask_copy(housekeeping_mask, cpu_possible_mask); |
| 475 | tick_nohz_full_running = false; |
| 476 | return; |
| 477 | } |
| 478 | |
Frederic Weisbecker | 4327b15 | 2014-08-17 22:02:55 +0200 | [diff] [blame] | 479 | cpu = smp_processor_id(); |
| 480 | |
| 481 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { |
Joe Perches | a395d6a | 2016-03-22 14:28:09 -0700 | [diff] [blame] | 482 | pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", |
| 483 | cpu); |
Frederic Weisbecker | 4327b15 | 2014-08-17 22:02:55 +0200 | [diff] [blame] | 484 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); |
| 485 | } |
| 486 | |
| 487 | cpumask_andnot(housekeeping_mask, |
| 488 | cpu_possible_mask, tick_nohz_full_mask); |
| 489 | |
Frederic Weisbecker | 73867dc | 2013-07-24 23:31:00 +0200 | [diff] [blame] | 490 | for_each_cpu(cpu, tick_nohz_full_mask) |
Frederic Weisbecker | 2e70933 | 2013-07-10 00:55:25 +0200 | [diff] [blame] | 491 | context_tracking_cpu_set(cpu); |
| 492 | |
Frederic Weisbecker | d1e43fa | 2013-03-26 23:47:24 +0100 | [diff] [blame] | 493 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
Tejun Heo | ffda22c | 2015-02-13 14:37:31 -0800 | [diff] [blame] | 494 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
| 495 | cpumask_pr_args(tick_nohz_full_mask)); |
Frederic Weisbecker | 7c8bb6c | 2015-09-01 16:51:00 +0200 | [diff] [blame] | 496 | |
| 497 | /* |
| 498 | * We need at least one CPU to handle housekeeping work such |
| 499 | * as timekeeping, unbound timers, workqueues, ... |
| 500 | */ |
| 501 | WARN_ON_ONCE(cpumask_empty(housekeeping_mask)); |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 502 | } |
Frederic Weisbecker | a831881 | 2012-12-18 17:32:19 +0100 | [diff] [blame] | 503 | #endif |
| 504 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 505 | /* |
| 506 | * NOHZ - aka dynamic tick functionality |
| 507 | */ |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 508 | #ifdef CONFIG_NO_HZ_COMMON |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 509 | /* |
| 510 | * NO HZ enabled ? |
| 511 | */ |
Kees Cook | 4cc7ecb7 | 2016-03-17 14:23:00 -0700 | [diff] [blame] | 512 | bool tick_nohz_enabled __read_mostly = true; |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame] | 513 | unsigned long tick_nohz_active __read_mostly; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 514 | /* |
| 515 | * Enable / Disable tickless mode |
| 516 | */ |
| 517 | static int __init setup_tick_nohz(char *str) |
| 518 | { |
Kees Cook | 4cc7ecb7 | 2016-03-17 14:23:00 -0700 | [diff] [blame] | 519 | return (kstrtobool(str, &tick_nohz_enabled) == 0); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | __setup("nohz=", setup_tick_nohz); |
| 523 | |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 524 | int tick_nohz_tick_stopped(void) |
| 525 | { |
| 526 | return __this_cpu_read(tick_cpu_sched.tick_stopped); |
| 527 | } |
| 528 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 529 | /** |
| 530 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted |
| 531 | * |
| 532 | * Called from interrupt entry when the CPU was idle |
| 533 | * |
| 534 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies |
| 535 | * must be updated. Otherwise an interrupt handler could use a stale jiffy |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 536 | * value. We do this unconditionally on any CPU, as we don't know whether the |
| 537 | * CPU, which has the update task assigned is in a long sleep. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 538 | */ |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 539 | static void tick_nohz_update_jiffies(ktime_t now) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 540 | { |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 541 | unsigned long flags; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 542 | |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 543 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 544 | |
| 545 | local_irq_save(flags); |
| 546 | tick_do_update_jiffies64(now); |
| 547 | local_irq_restore(flags); |
Ingo Molnar | 02ff375 | 2008-05-12 15:43:53 +0200 | [diff] [blame] | 548 | |
Tejun Heo | 03e0d46 | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 549 | touch_softlockup_watchdog_sched(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 550 | } |
| 551 | |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 552 | /* |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 553 | * Updates the per-CPU time idle statistics counters |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 554 | */ |
Arjan van de Ven | 8d63bf9 | 2010-05-09 08:24:03 -0700 | [diff] [blame] | 555 | static void |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 556 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 557 | { |
| 558 | ktime_t delta; |
| 559 | |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 560 | if (ts->idle_active) { |
| 561 | delta = ktime_sub(now, ts->idle_entrytime); |
Peter Zijlstra | 8c215bd | 2010-07-01 09:07:17 +0200 | [diff] [blame] | 562 | if (nr_iowait_cpu(cpu) > 0) |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 563 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
Michal Hocko | 6beea0c | 2011-08-24 09:37:48 +0200 | [diff] [blame] | 564 | else |
| 565 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
Arjan van de Ven | 8c7b09f | 2010-05-09 08:23:23 -0700 | [diff] [blame] | 566 | ts->idle_entrytime = now; |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 567 | } |
Arjan van de Ven | 8d63bf9 | 2010-05-09 08:24:03 -0700 | [diff] [blame] | 568 | |
Arjan van de Ven | e0e37c2 | 2010-05-09 08:24:39 -0700 | [diff] [blame] | 569 | if (last_update_time) |
Arjan van de Ven | 8d63bf9 | 2010-05-09 08:24:03 -0700 | [diff] [blame] | 570 | *last_update_time = ktime_to_us(now); |
| 571 | |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 572 | } |
| 573 | |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 574 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 575 | { |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 576 | update_ts_time_stats(smp_processor_id(), ts, now, NULL); |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 577 | ts->idle_active = 0; |
Peter Zijlstra | 56c7426 | 2008-09-01 16:44:23 +0200 | [diff] [blame] | 578 | |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 579 | sched_clock_idle_wakeup_event(0); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 580 | } |
| 581 | |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 582 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 583 | { |
Michal Hocko | 430ee88 | 2011-12-01 17:00:22 +0100 | [diff] [blame] | 584 | ktime_t now = ktime_get(); |
Arjan van de Ven | 595aac4 | 2010-05-09 08:22:45 -0700 | [diff] [blame] | 585 | |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 586 | ts->idle_entrytime = now; |
| 587 | ts->idle_active = 1; |
Peter Zijlstra | 56c7426 | 2008-09-01 16:44:23 +0200 | [diff] [blame] | 588 | sched_clock_idle_sleep_event(); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 589 | return now; |
| 590 | } |
| 591 | |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 592 | /** |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 593 | * get_cpu_idle_time_us - get the total idle time of a CPU |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 594 | * @cpu: CPU number to query |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 595 | * @last_update_time: variable to store update time in. Do not update |
| 596 | * counters if NULL. |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 597 | * |
Wei Jiangang | 6168f8e | 2016-06-29 12:51:50 +0800 | [diff] [blame] | 598 | * Return the cumulative idle time (since boot) for a given |
Michal Hocko | 6beea0c | 2011-08-24 09:37:48 +0200 | [diff] [blame] | 599 | * CPU, in microseconds. |
Arjan van de Ven | b1f724c | 2010-05-09 08:22:08 -0700 | [diff] [blame] | 600 | * |
| 601 | * This time is measured via accounting rather than sampling, |
| 602 | * and is as accurate as ktime_get() is. |
| 603 | * |
| 604 | * This function returns -1 if NOHZ is not enabled. |
| 605 | */ |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 606 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
| 607 | { |
| 608 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 609 | ktime_t now, idle; |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 610 | |
Thomas Gleixner | d689fe2 | 2013-11-13 21:01:57 +0100 | [diff] [blame] | 611 | if (!tick_nohz_active) |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 612 | return -1; |
| 613 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 614 | now = ktime_get(); |
| 615 | if (last_update_time) { |
| 616 | update_ts_time_stats(cpu, ts, now, last_update_time); |
| 617 | idle = ts->idle_sleeptime; |
| 618 | } else { |
| 619 | if (ts->idle_active && !nr_iowait_cpu(cpu)) { |
| 620 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 621 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 622 | idle = ktime_add(ts->idle_sleeptime, delta); |
| 623 | } else { |
| 624 | idle = ts->idle_sleeptime; |
| 625 | } |
| 626 | } |
| 627 | |
| 628 | return ktime_to_us(idle); |
| 629 | |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 630 | } |
venkatesh.pallipadi@intel.com | 8083e4a | 2008-08-04 11:59:11 -0700 | [diff] [blame] | 631 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 632 | |
Michal Hocko | 6beea0c | 2011-08-24 09:37:48 +0200 | [diff] [blame] | 633 | /** |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 634 | * get_cpu_iowait_time_us - get the total iowait time of a CPU |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 635 | * @cpu: CPU number to query |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 636 | * @last_update_time: variable to store update time in. Do not update |
| 637 | * counters if NULL. |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 638 | * |
Wei Jiangang | 6168f8e | 2016-06-29 12:51:50 +0800 | [diff] [blame] | 639 | * Return the cumulative iowait time (since boot) for a given |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 640 | * CPU, in microseconds. |
| 641 | * |
| 642 | * This time is measured via accounting rather than sampling, |
| 643 | * and is as accurate as ktime_get() is. |
| 644 | * |
| 645 | * This function returns -1 if NOHZ is not enabled. |
| 646 | */ |
| 647 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) |
| 648 | { |
| 649 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 650 | ktime_t now, iowait; |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 651 | |
Thomas Gleixner | d689fe2 | 2013-11-13 21:01:57 +0100 | [diff] [blame] | 652 | if (!tick_nohz_active) |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 653 | return -1; |
| 654 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 655 | now = ktime_get(); |
| 656 | if (last_update_time) { |
| 657 | update_ts_time_stats(cpu, ts, now, last_update_time); |
| 658 | iowait = ts->iowait_sleeptime; |
| 659 | } else { |
| 660 | if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { |
| 661 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 662 | |
Michal Hocko | 09a1d34 | 2011-08-24 09:39:30 +0200 | [diff] [blame] | 663 | iowait = ktime_add(ts->iowait_sleeptime, delta); |
| 664 | } else { |
| 665 | iowait = ts->iowait_sleeptime; |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | return ktime_to_us(iowait); |
Arjan van de Ven | 0224cf4 | 2010-05-09 08:25:23 -0700 | [diff] [blame] | 670 | } |
| 671 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); |
| 672 | |
Thomas Gleixner | 0ff53d0 | 2015-04-14 21:08:54 +0000 | [diff] [blame] | 673 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
| 674 | { |
| 675 | hrtimer_cancel(&ts->sched_timer); |
| 676 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); |
| 677 | |
| 678 | /* Forward the time to expire in the future */ |
| 679 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 680 | |
| 681 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
| 682 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); |
| 683 | else |
| 684 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
| 685 | } |
| 686 | |
Thomas Gleixner | e8119ac | 2017-12-22 15:51:13 +0100 | [diff] [blame] | 687 | static inline bool local_timer_softirq_pending(void) |
| 688 | { |
Anna-Maria Gleixner | f4a9db5 | 2018-07-31 18:13:58 +0200 | [diff] [blame] | 689 | return local_softirq_pending() & BIT(TIMER_SOFTIRQ); |
Thomas Gleixner | e8119ac | 2017-12-22 15:51:13 +0100 | [diff] [blame] | 690 | } |
| 691 | |
Frederic Weisbecker | 84bf1bc | 2011-08-01 01:25:38 +0200 | [diff] [blame] | 692 | static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, |
| 693 | ktime_t now, int cpu) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 694 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 695 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 696 | u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; |
| 697 | unsigned long seq, basejiff; |
| 698 | ktime_t tick; |
Frederic Weisbecker | 855a0fc | 2013-12-17 00:16:37 +0100 | [diff] [blame] | 699 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 700 | /* Read jiffies and the time when jiffies were updated last */ |
| 701 | do { |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 702 | seq = read_seqbegin(&jiffies_lock); |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 703 | basemono = last_jiffies_update.tv64; |
| 704 | basejiff = jiffies; |
John Stultz | d6ad418 | 2012-02-28 16:50:11 -0800 | [diff] [blame] | 705 | } while (read_seqretry(&jiffies_lock, seq)); |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 706 | ts->last_jiffies = basejiff; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 707 | |
Thomas Gleixner | e8119ac | 2017-12-22 15:51:13 +0100 | [diff] [blame] | 708 | /* |
| 709 | * Keep the periodic tick, when RCU, architecture or irq_work |
| 710 | * requests it. |
| 711 | * Aside of that check whether the local timer softirq is |
| 712 | * pending. If so its a bad idea to call get_next_timer_interrupt() |
| 713 | * because there is an already expired timer, so it will request |
| 714 | * immeditate expiry, which rearms the hardware timer with a |
| 715 | * minimal delta which brings us back to this place |
| 716 | * immediately. Lather, rinse and repeat... |
| 717 | */ |
| 718 | if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || |
| 719 | irq_work_needs_cpu() || local_timer_softirq_pending()) { |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 720 | next_tick = basemono + TICK_NSEC; |
Martin Schwidefsky | 3c5d92a | 2009-09-29 14:25:16 +0200 | [diff] [blame] | 721 | } else { |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 722 | /* |
| 723 | * Get the next pending timer. If high resolution |
| 724 | * timers are enabled this only takes the timer wheel |
| 725 | * timers into account. If high resolution timers are |
| 726 | * disabled this also looks at the next expiring |
| 727 | * hrtimer. |
| 728 | */ |
| 729 | next_tmr = get_next_timer_interrupt(basejiff, basemono); |
| 730 | ts->next_timer = next_tmr; |
| 731 | /* Take the next rcu event into account */ |
| 732 | next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; |
Martin Schwidefsky | 3c5d92a | 2009-09-29 14:25:16 +0200 | [diff] [blame] | 733 | } |
Ingo Molnar | 47aa8b6 | 2013-04-26 10:05:59 +0200 | [diff] [blame] | 734 | |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 735 | /* |
| 736 | * If the tick is due in the next period, keep it ticking or |
Peter Zijlstra | 82bbe34 | 2015-11-19 17:21:06 +0100 | [diff] [blame] | 737 | * force prod the timer. |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 738 | */ |
| 739 | delta = next_tick - basemono; |
| 740 | if (delta <= (u64)TICK_NSEC) { |
| 741 | tick.tv64 = 0; |
Thomas Gleixner | a683f39 | 2016-07-04 09:50:36 +0000 | [diff] [blame] | 742 | |
| 743 | /* |
| 744 | * Tell the timer code that the base is not idle, i.e. undo |
| 745 | * the effect of get_next_timer_interrupt(): |
| 746 | */ |
| 747 | timer_clear_idle(); |
Peter Zijlstra | 82bbe34 | 2015-11-19 17:21:06 +0100 | [diff] [blame] | 748 | /* |
| 749 | * We've not stopped the tick yet, and there's a timer in the |
| 750 | * next period, so no point in stopping it either, bail. |
| 751 | */ |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 752 | if (!ts->tick_stopped) |
Woodruff, Richard | 0014744 | 2008-12-01 14:18:11 -0800 | [diff] [blame] | 753 | goto out; |
Peter Zijlstra | 82bbe34 | 2015-11-19 17:21:06 +0100 | [diff] [blame] | 754 | |
| 755 | /* |
| 756 | * If, OTOH, we did stop it, but there's a pending (expired) |
| 757 | * timer reprogram the timer hardware to fire now. |
| 758 | * |
| 759 | * We will not restart the tick proper, just prod the timer |
| 760 | * hardware into firing an interrupt to process the pending |
| 761 | * timers. Just like tick_irq_exit() will not restart the tick |
| 762 | * for 'normal' interrupts. |
| 763 | * |
| 764 | * Only once we exit the idle loop will we re-enable the tick, |
| 765 | * see tick_nohz_idle_exit(). |
| 766 | */ |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 767 | if (delta == 0) { |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 768 | tick_nohz_restart(ts, now); |
Thomas Gleixner | eaad084 | 2007-05-29 23:47:39 +0200 | [diff] [blame] | 769 | goto out; |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 770 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 771 | } |
Thomas Gleixner | 0ff53d0 | 2015-04-14 21:08:54 +0000 | [diff] [blame] | 772 | |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 773 | /* |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 774 | * If this CPU is the one which updates jiffies, then give up |
| 775 | * the assignment and let it be taken by the CPU which runs |
| 776 | * the tick timer next, which might be this CPU as well. If we |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 777 | * don't drop this here the jiffies might be stale and |
| 778 | * do_timer() never invoked. Keep track of the fact that it |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 779 | * was the one which had the do_timer() duty last. If this CPU |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 780 | * is the one which had the do_timer() duty last, we limit the |
Wei Jiangang | 6168f8e | 2016-06-29 12:51:50 +0800 | [diff] [blame] | 781 | * sleep time to the timekeeping max_deferment value. |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 782 | * Otherwise we can sleep as long as we want. |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 783 | */ |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 784 | delta = timekeeping_max_deferment(); |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 785 | if (cpu == tick_do_timer_cpu) { |
| 786 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
| 787 | ts->do_timer_last = 1; |
| 788 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 789 | delta = KTIME_MAX; |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 790 | ts->do_timer_last = 0; |
| 791 | } else if (!ts->do_timer_last) { |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 792 | delta = KTIME_MAX; |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 793 | } |
| 794 | |
| 795 | #ifdef CONFIG_NO_HZ_FULL |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 796 | /* Limit the tick delta to the maximum scheduler deferment */ |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 797 | if (!ts->inidle) |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 798 | delta = min(delta, scheduler_tick_max_deferment()); |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 799 | #endif |
| 800 | |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 801 | /* Calculate the next expiry time */ |
| 802 | if (delta < (KTIME_MAX - basemono)) |
| 803 | expires = basemono + delta; |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 804 | else |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 805 | expires = KTIME_MAX; |
| 806 | |
| 807 | expires = min_t(u64, expires, next_tick); |
| 808 | tick.tv64 = expires; |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 809 | |
| 810 | /* Skip reprogram of event if its not changed */ |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 811 | if (ts->tick_stopped && (expires == dev->next_event.tv64)) |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 812 | goto out; |
| 813 | |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 814 | /* |
| 815 | * nohz_stop_sched_tick can be called several times before |
| 816 | * the nohz_restart_sched_tick is called. This happens when |
| 817 | * interrupts arrive which do not cause a reschedule. In the |
| 818 | * first call we save the current tick time, so we can restart |
| 819 | * the scheduler tick in nohz_restart_sched_tick. |
| 820 | */ |
| 821 | if (!ts->tick_stopped) { |
| 822 | nohz_balance_enter_idle(cpu); |
| 823 | calc_load_enter_idle(); |
Frederic Weisbecker | 1f41906 | 2016-04-13 15:56:51 +0200 | [diff] [blame] | 824 | cpu_load_update_nohz_start(); |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 825 | |
| 826 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
| 827 | ts->tick_stopped = 1; |
Frederic Weisbecker | e6e6cc2 | 2015-12-11 03:27:25 +0100 | [diff] [blame] | 828 | trace_tick_stop(1, TICK_DEP_MASK_NONE); |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 829 | } |
| 830 | |
| 831 | /* |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 832 | * If the expiration time == KTIME_MAX, then we simply stop |
| 833 | * the tick timer. |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 834 | */ |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 835 | if (unlikely(expires == KTIME_MAX)) { |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 836 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
| 837 | hrtimer_cancel(&ts->sched_timer); |
| 838 | goto out; |
| 839 | } |
| 840 | |
| 841 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 842 | hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED); |
Thomas Gleixner | 157d29e | 2015-04-14 21:08:56 +0000 | [diff] [blame] | 843 | else |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 844 | tick_program_event(tick, 1); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 845 | out: |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 846 | /* Update the estimated sleep length */ |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 847 | ts->sleep_length = ktime_sub(dev->next_event, now); |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 848 | return tick; |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 849 | } |
| 850 | |
Frederic Weisbecker | 1f41906 | 2016-04-13 15:56:51 +0200 | [diff] [blame] | 851 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
Frederic Weisbecker | 59d2c7c | 2015-05-29 14:42:15 +0200 | [diff] [blame] | 852 | { |
| 853 | /* Update jiffies first */ |
| 854 | tick_do_update_jiffies64(now); |
Frederic Weisbecker | 1f41906 | 2016-04-13 15:56:51 +0200 | [diff] [blame] | 855 | cpu_load_update_nohz_stop(); |
Frederic Weisbecker | 59d2c7c | 2015-05-29 14:42:15 +0200 | [diff] [blame] | 856 | |
Thomas Gleixner | a683f39 | 2016-07-04 09:50:36 +0000 | [diff] [blame] | 857 | /* |
| 858 | * Clear the timer idle flag, so we avoid IPIs on remote queueing and |
| 859 | * the clock forward checks in the enqueue path: |
| 860 | */ |
| 861 | timer_clear_idle(); |
| 862 | |
Frederic Weisbecker | 59d2c7c | 2015-05-29 14:42:15 +0200 | [diff] [blame] | 863 | calc_load_exit_idle(); |
Tejun Heo | 03e0d46 | 2015-12-08 11:28:04 -0500 | [diff] [blame] | 864 | touch_softlockup_watchdog_sched(); |
Frederic Weisbecker | 59d2c7c | 2015-05-29 14:42:15 +0200 | [diff] [blame] | 865 | /* |
| 866 | * Cancel the scheduled timer and restore the tick |
| 867 | */ |
| 868 | ts->tick_stopped = 0; |
| 869 | ts->idle_exittime = now; |
| 870 | |
| 871 | tick_nohz_restart(ts, now); |
| 872 | } |
Frederic Weisbecker | 73738a9 | 2015-05-27 19:22:08 +0200 | [diff] [blame] | 873 | |
| 874 | static void tick_nohz_full_update_tick(struct tick_sched *ts) |
Frederic Weisbecker | 5811d99 | 2013-04-20 16:40:31 +0200 | [diff] [blame] | 875 | { |
| 876 | #ifdef CONFIG_NO_HZ_FULL |
Alex Shi | e9a2eb4 | 2013-11-28 14:27:11 +0800 | [diff] [blame] | 877 | int cpu = smp_processor_id(); |
Frederic Weisbecker | 5811d99 | 2013-04-20 16:40:31 +0200 | [diff] [blame] | 878 | |
Frederic Weisbecker | 5944935 | 2015-05-27 15:42:42 +0200 | [diff] [blame] | 879 | if (!tick_nohz_full_cpu(cpu)) |
Alex Shi | e9a2eb4 | 2013-11-28 14:27:11 +0800 | [diff] [blame] | 880 | return; |
Frederic Weisbecker | 5811d99 | 2013-04-20 16:40:31 +0200 | [diff] [blame] | 881 | |
Alex Shi | e9a2eb4 | 2013-11-28 14:27:11 +0800 | [diff] [blame] | 882 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) |
| 883 | return; |
Frederic Weisbecker | 5811d99 | 2013-04-20 16:40:31 +0200 | [diff] [blame] | 884 | |
Wanpeng Li | 57ccdf4 | 2016-09-07 18:51:13 +0800 | [diff] [blame] | 885 | if (can_stop_full_tick(cpu, ts)) |
Frederic Weisbecker | 73738a9 | 2015-05-27 19:22:08 +0200 | [diff] [blame] | 886 | tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); |
| 887 | else if (ts->tick_stopped) |
Frederic Weisbecker | 1f41906 | 2016-04-13 15:56:51 +0200 | [diff] [blame] | 888 | tick_nohz_restart_sched_tick(ts, ktime_get()); |
Frederic Weisbecker | 5811d99 | 2013-04-20 16:40:31 +0200 | [diff] [blame] | 889 | #endif |
| 890 | } |
| 891 | |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 892 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
| 893 | { |
| 894 | /* |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 895 | * If this CPU is offline and it is the one which updates |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 896 | * jiffies, then give up the assignment and let it be taken by |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 897 | * the CPU which runs the tick timer next. If we don't drop |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 898 | * this here the jiffies might be stale and do_timer() never |
| 899 | * invoked. |
| 900 | */ |
| 901 | if (unlikely(!cpu_online(cpu))) { |
| 902 | if (cpu == tick_do_timer_cpu) |
| 903 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
Thomas Gleixner | f7ea0fd | 2013-05-13 21:40:27 +0200 | [diff] [blame] | 904 | return false; |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 905 | } |
| 906 | |
Thomas Gleixner | 0e576acb | 2013-11-29 12:18:13 +0100 | [diff] [blame] | 907 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) { |
| 908 | ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ }; |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 909 | return false; |
Thomas Gleixner | 0e576acb | 2013-11-29 12:18:13 +0100 | [diff] [blame] | 910 | } |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 911 | |
| 912 | if (need_resched()) |
| 913 | return false; |
| 914 | |
| 915 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
| 916 | static int ratelimit; |
| 917 | |
Paul E. McKenney | 803b0eb | 2012-08-23 08:34:07 -0700 | [diff] [blame] | 918 | if (ratelimit < 10 && |
| 919 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { |
Rado Vrbovsky | cfea7d7 | 2013-02-08 12:37:30 -0500 | [diff] [blame] | 920 | pr_warn("NOHZ: local_softirq_pending %02x\n", |
| 921 | (unsigned int) local_softirq_pending()); |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 922 | ratelimit++; |
| 923 | } |
| 924 | return false; |
| 925 | } |
| 926 | |
Frederic Weisbecker | 460775d | 2013-07-24 23:52:27 +0200 | [diff] [blame] | 927 | if (tick_nohz_full_enabled()) { |
Frederic Weisbecker | a382bf9 | 2012-12-18 18:24:35 +0100 | [diff] [blame] | 928 | /* |
| 929 | * Keep the tick alive to guarantee timekeeping progression |
| 930 | * if there are full dynticks CPUs around |
| 931 | */ |
| 932 | if (tick_do_timer_cpu == cpu) |
| 933 | return false; |
| 934 | /* |
| 935 | * Boot safety: make sure the timekeeping duty has been |
| 936 | * assigned before entering dyntick-idle mode, |
| 937 | */ |
| 938 | if (tick_do_timer_cpu == TICK_DO_TIMER_NONE) |
| 939 | return false; |
| 940 | } |
| 941 | |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 942 | return true; |
| 943 | } |
| 944 | |
Frederic Weisbecker | 19f5f73 | 2011-07-27 17:29:28 +0200 | [diff] [blame] | 945 | static void __tick_nohz_idle_enter(struct tick_sched *ts) |
| 946 | { |
Frederic Weisbecker | 84bf1bc | 2011-08-01 01:25:38 +0200 | [diff] [blame] | 947 | ktime_t now, expires; |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 948 | int cpu = smp_processor_id(); |
Frederic Weisbecker | 19f5f73 | 2011-07-27 17:29:28 +0200 | [diff] [blame] | 949 | |
Wanpeng Li | 08d07259 | 2016-09-02 14:38:23 +0800 | [diff] [blame] | 950 | now = tick_nohz_start_idle(ts); |
| 951 | |
Prasad Sodagudi | 602c4e2 | 2017-05-17 23:26:09 -0700 | [diff] [blame] | 952 | #ifdef CONFIG_SMP |
| 953 | if (check_pending_deferrable_timers(cpu)) |
| 954 | raise_softirq_irqoff(TIMER_SOFTIRQ); |
| 955 | #endif |
| 956 | |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 957 | if (can_stop_idle_tick(cpu, ts)) { |
| 958 | int was_stopped = ts->tick_stopped; |
| 959 | |
| 960 | ts->idle_calls++; |
Frederic Weisbecker | 84bf1bc | 2011-08-01 01:25:38 +0200 | [diff] [blame] | 961 | |
| 962 | expires = tick_nohz_stop_sched_tick(ts, now, cpu); |
| 963 | if (expires.tv64 > 0LL) { |
| 964 | ts->idle_sleeps++; |
| 965 | ts->idle_expires = expires; |
| 966 | } |
Frederic Weisbecker | 5b39939 | 2011-08-01 00:06:10 +0200 | [diff] [blame] | 967 | |
| 968 | if (!was_stopped && ts->tick_stopped) |
| 969 | ts->idle_jiffies = ts->last_jiffies; |
| 970 | } |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | /** |
| 974 | * tick_nohz_idle_enter - stop the idle tick from the idle task |
| 975 | * |
| 976 | * When the next event is more than a tick into the future, stop the idle tick |
| 977 | * Called when we start the idle loop. |
Frederic Weisbecker | 2bbb681 | 2011-10-08 16:01:00 +0200 | [diff] [blame] | 978 | * |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 979 | * The arch is responsible of calling: |
Frederic Weisbecker | 2bbb681 | 2011-10-08 16:01:00 +0200 | [diff] [blame] | 980 | * |
| 981 | * - rcu_idle_enter() after its last use of RCU before the CPU is put |
| 982 | * to sleep. |
| 983 | * - rcu_idle_exit() before the first use of RCU after the CPU is woken up. |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 984 | */ |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 985 | void tick_nohz_idle_enter(void) |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 986 | { |
| 987 | struct tick_sched *ts; |
| 988 | |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 989 | WARN_ON_ONCE(irqs_disabled()); |
| 990 | |
Linus Torvalds | 0db49b7 | 2012-01-06 08:33:28 -0800 | [diff] [blame] | 991 | /* |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 992 | * Update the idle state in the scheduler domain hierarchy |
| 993 | * when tick_nohz_stop_sched_tick() is called from the idle loop. |
| 994 | * State will be updated to busy during the first busy tick after |
| 995 | * exiting idle. |
| 996 | */ |
Linus Torvalds | 0db49b7 | 2012-01-06 08:33:28 -0800 | [diff] [blame] | 997 | set_cpu_sd_state_idle(); |
| 998 | |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 999 | local_irq_disable(); |
| 1000 | |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1001 | ts = this_cpu_ptr(&tick_cpu_sched); |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 1002 | ts->inidle = 1; |
Frederic Weisbecker | 19f5f73 | 2011-07-27 17:29:28 +0200 | [diff] [blame] | 1003 | __tick_nohz_idle_enter(ts); |
Frederic Weisbecker | 1268fbc | 2011-11-17 18:48:14 +0100 | [diff] [blame] | 1004 | |
| 1005 | local_irq_enable(); |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 1006 | } |
| 1007 | |
| 1008 | /** |
| 1009 | * tick_nohz_irq_exit - update next tick event from interrupt exit |
| 1010 | * |
| 1011 | * When an interrupt fires while we are idle and it doesn't cause |
| 1012 | * a reschedule, it may still add, modify or delete a timer, enqueue |
| 1013 | * an RCU callback, etc... |
| 1014 | * So we need to re-calculate and reprogram the next tick event. |
| 1015 | */ |
| 1016 | void tick_nohz_irq_exit(void) |
| 1017 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1018 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 1019 | |
Rafael J. Wysocki | 1485191 | 2013-07-27 01:41:34 +0200 | [diff] [blame] | 1020 | if (ts->inidle) |
Frederic Weisbecker | 5811d99 | 2013-04-20 16:40:31 +0200 | [diff] [blame] | 1021 | __tick_nohz_idle_enter(ts); |
Rafael J. Wysocki | 1485191 | 2013-07-27 01:41:34 +0200 | [diff] [blame] | 1022 | else |
Frederic Weisbecker | 73738a9 | 2015-05-27 19:22:08 +0200 | [diff] [blame] | 1023 | tick_nohz_full_update_tick(ts); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1024 | } |
| 1025 | |
| 1026 | /** |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 1027 | * tick_nohz_get_sleep_length - return the length of the current sleep |
| 1028 | * |
| 1029 | * Called from power state control code with interrupts disabled |
| 1030 | */ |
| 1031 | ktime_t tick_nohz_get_sleep_length(void) |
| 1032 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1033 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Len Brown | 4f86d3a | 2007-10-03 18:58:00 -0400 | [diff] [blame] | 1034 | |
| 1035 | return ts->sleep_length; |
| 1036 | } |
| 1037 | |
Chris Redpath | 595ae4a | 2017-05-25 15:24:58 +0100 | [diff] [blame] | 1038 | /** |
| 1039 | * tick_nohz_get_idle_calls - return the current idle calls counter value |
| 1040 | * |
| 1041 | * Called from the schedutil frequency scaling governor in scheduler context. |
| 1042 | */ |
| 1043 | unsigned long tick_nohz_get_idle_calls(void) |
| 1044 | { |
| 1045 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
| 1046 | |
| 1047 | return ts->idle_calls; |
| 1048 | } |
| 1049 | |
Frederic Weisbecker | 2ac0d98 | 2011-07-28 04:00:47 +0200 | [diff] [blame] | 1050 | static void tick_nohz_account_idle_ticks(struct tick_sched *ts) |
| 1051 | { |
Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 1052 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Frederic Weisbecker | 2ac0d98 | 2011-07-28 04:00:47 +0200 | [diff] [blame] | 1053 | unsigned long ticks; |
Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 1054 | |
Frederic Weisbecker | 55dbdcf | 2015-11-19 16:47:32 +0100 | [diff] [blame] | 1055 | if (vtime_accounting_cpu_enabled()) |
Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 1056 | return; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1057 | /* |
| 1058 | * We stopped the tick in idle. Update process times would miss the |
| 1059 | * time we slept as update_process_times does only a 1 tick |
| 1060 | * accounting. Enforce that this is accounted to idle ! |
| 1061 | */ |
| 1062 | ticks = jiffies - ts->idle_jiffies; |
| 1063 | /* |
| 1064 | * We might be one off. Do not randomly account a huge number of ticks! |
| 1065 | */ |
Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 1066 | if (ticks && ticks < LONG_MAX) |
| 1067 | account_idle_ticks(ticks); |
| 1068 | #endif |
Frederic Weisbecker | 19f5f73 | 2011-07-27 17:29:28 +0200 | [diff] [blame] | 1069 | } |
| 1070 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1071 | /** |
| 1072 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
| 1073 | * |
| 1074 | * Restart the idle tick when the CPU is woken up from idle |
| 1075 | * This also exit the RCU extended quiescent state. The CPU |
| 1076 | * can use RCU again after this function is called. |
| 1077 | */ |
| 1078 | void tick_nohz_idle_exit(void) |
| 1079 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 1080 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1081 | ktime_t now; |
| 1082 | |
| 1083 | local_irq_disable(); |
| 1084 | |
| 1085 | WARN_ON_ONCE(!ts->inidle); |
| 1086 | |
| 1087 | ts->inidle = 0; |
| 1088 | |
| 1089 | if (ts->idle_active || ts->tick_stopped) |
| 1090 | now = ktime_get(); |
| 1091 | |
| 1092 | if (ts->idle_active) |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 1093 | tick_nohz_stop_idle(ts, now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1094 | |
Frederic Weisbecker | 2ac0d98 | 2011-07-28 04:00:47 +0200 | [diff] [blame] | 1095 | if (ts->tick_stopped) { |
Frederic Weisbecker | 1f41906 | 2016-04-13 15:56:51 +0200 | [diff] [blame] | 1096 | tick_nohz_restart_sched_tick(ts, now); |
Frederic Weisbecker | 2ac0d98 | 2011-07-28 04:00:47 +0200 | [diff] [blame] | 1097 | tick_nohz_account_idle_ticks(ts); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1098 | } |
| 1099 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1100 | local_irq_enable(); |
| 1101 | } |
| 1102 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1103 | /* |
| 1104 | * The nohz low res interrupt handler |
| 1105 | */ |
| 1106 | static void tick_nohz_handler(struct clock_event_device *dev) |
| 1107 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1108 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1109 | struct pt_regs *regs = get_irq_regs(); |
| 1110 | ktime_t now = ktime_get(); |
| 1111 | |
| 1112 | dev->next_event.tv64 = KTIME_MAX; |
| 1113 | |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 1114 | tick_sched_do_timer(now); |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 1115 | tick_sched_handle(ts, regs); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1116 | |
Viresh Kumar | b5e995e | 2014-06-12 16:24:41 +0530 | [diff] [blame] | 1117 | /* No need to reprogram if we are running tickless */ |
| 1118 | if (unlikely(ts->tick_stopped)) |
| 1119 | return; |
| 1120 | |
Thomas Gleixner | 0ff53d0 | 2015-04-14 21:08:54 +0000 | [diff] [blame] | 1121 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 1122 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1123 | } |
| 1124 | |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame] | 1125 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) |
| 1126 | { |
| 1127 | if (!tick_nohz_enabled) |
| 1128 | return; |
| 1129 | ts->nohz_mode = mode; |
| 1130 | /* One update is enough */ |
| 1131 | if (!test_and_set_bit(0, &tick_nohz_active)) |
Thomas Gleixner | 683be13 | 2015-05-26 22:50:35 +0000 | [diff] [blame] | 1132 | timers_update_migration(true); |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame] | 1133 | } |
| 1134 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1135 | /** |
| 1136 | * tick_nohz_switch_to_nohz - switch to nohz mode |
| 1137 | */ |
| 1138 | static void tick_nohz_switch_to_nohz(void) |
| 1139 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1140 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1141 | ktime_t next; |
| 1142 | |
Viresh Kumar | 2763053 | 2014-04-15 10:54:41 +0530 | [diff] [blame] | 1143 | if (!tick_nohz_enabled) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1144 | return; |
| 1145 | |
Thomas Gleixner | 6b442bc | 2015-05-07 14:35:59 +0200 | [diff] [blame] | 1146 | if (tick_switch_to_oneshot(tick_nohz_handler)) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1147 | return; |
Thomas Gleixner | 6b442bc | 2015-05-07 14:35:59 +0200 | [diff] [blame] | 1148 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1149 | /* |
| 1150 | * Recycle the hrtimer in ts, so we can share the |
| 1151 | * hrtimer_forward with the highres code. |
| 1152 | */ |
| 1153 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 1154 | /* Get the next period */ |
| 1155 | next = tick_init_jiffy_update(); |
| 1156 | |
Thomas Gleixner | 0ff53d0 | 2015-04-14 21:08:54 +0000 | [diff] [blame] | 1157 | hrtimer_set_expires(&ts->sched_timer, next); |
Wanpeng Li | 1ca8ec5 | 2016-01-27 19:26:07 +0800 | [diff] [blame] | 1158 | hrtimer_forward_now(&ts->sched_timer, tick_period); |
| 1159 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame] | 1160 | tick_nohz_activate(ts, NOHZ_MODE_LOWRES); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1161 | } |
| 1162 | |
Frederic Weisbecker | 5acac1b | 2013-12-04 18:28:20 +0100 | [diff] [blame] | 1163 | static inline void tick_nohz_irq_enter(void) |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 1164 | { |
Christoph Lameter | 4a32fea | 2014-08-17 12:30:27 -0500 | [diff] [blame] | 1165 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 1166 | ktime_t now; |
| 1167 | |
| 1168 | if (!ts->idle_active && !ts->tick_stopped) |
| 1169 | return; |
| 1170 | now = ktime_get(); |
| 1171 | if (ts->idle_active) |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 1172 | tick_nohz_stop_idle(ts, now); |
Thomas Gleixner | ff00673 | 2016-07-04 09:50:35 +0000 | [diff] [blame] | 1173 | if (ts->tick_stopped) |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 1174 | tick_nohz_update_jiffies(now); |
Martin Schwidefsky | eed3b9c | 2009-09-29 14:25:15 +0200 | [diff] [blame] | 1175 | } |
| 1176 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1177 | #else |
| 1178 | |
| 1179 | static inline void tick_nohz_switch_to_nohz(void) { } |
Frederic Weisbecker | 5acac1b | 2013-12-04 18:28:20 +0100 | [diff] [blame] | 1180 | static inline void tick_nohz_irq_enter(void) { } |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame] | 1181 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1182 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 1183 | #endif /* CONFIG_NO_HZ_COMMON */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1184 | |
| 1185 | /* |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 1186 | * Called from irq_enter to notify about the possible interruption of idle() |
| 1187 | */ |
Frederic Weisbecker | 5acac1b | 2013-12-04 18:28:20 +0100 | [diff] [blame] | 1188 | void tick_irq_enter(void) |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 1189 | { |
Frederic Weisbecker | e8fcaa5 | 2013-08-07 22:28:01 +0200 | [diff] [blame] | 1190 | tick_check_oneshot_broadcast_this_cpu(); |
Frederic Weisbecker | 5acac1b | 2013-12-04 18:28:20 +0100 | [diff] [blame] | 1191 | tick_nohz_irq_enter(); |
Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 1192 | } |
| 1193 | |
| 1194 | /* |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1195 | * High resolution timer specific code |
| 1196 | */ |
| 1197 | #ifdef CONFIG_HIGH_RES_TIMERS |
Kyle Yan | 36d7870 | 2016-08-23 16:07:11 -0700 | [diff] [blame] | 1198 | static void update_rq_stats(void) |
| 1199 | { |
| 1200 | unsigned long jiffy_gap = 0; |
Runmin Wang | 3ac7057 | 2017-04-12 17:26:50 -0700 | [diff] [blame] | 1201 | unsigned long long rq_avg = 0; |
Kyle Yan | 36d7870 | 2016-08-23 16:07:11 -0700 | [diff] [blame] | 1202 | unsigned long flags = 0; |
| 1203 | |
| 1204 | jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy; |
| 1205 | if (jiffy_gap >= rq_info.rq_poll_jiffies) { |
| 1206 | spin_lock_irqsave(&rq_lock, flags); |
| 1207 | if (!rq_info.rq_avg) |
| 1208 | rq_info.rq_poll_total_jiffies = 0; |
| 1209 | rq_avg = nr_running() * 10; |
| 1210 | if (rq_info.rq_poll_total_jiffies) { |
| 1211 | rq_avg = (rq_avg * jiffy_gap) + |
| 1212 | (rq_info.rq_avg * |
| 1213 | rq_info.rq_poll_total_jiffies); |
| 1214 | do_div(rq_avg, |
| 1215 | rq_info.rq_poll_total_jiffies + jiffy_gap); |
| 1216 | } |
| 1217 | rq_info.rq_avg = rq_avg; |
| 1218 | rq_info.rq_poll_total_jiffies += jiffy_gap; |
| 1219 | rq_info.rq_poll_last_jiffy = jiffies; |
| 1220 | spin_unlock_irqrestore(&rq_lock, flags); |
| 1221 | } |
| 1222 | } |
| 1223 | static void wakeup_user(void) |
| 1224 | { |
| 1225 | unsigned long jiffy_gap; |
| 1226 | |
| 1227 | jiffy_gap = jiffies - rq_info.def_timer_last_jiffy; |
| 1228 | if (jiffy_gap >= rq_info.def_timer_jiffies) { |
| 1229 | rq_info.def_timer_last_jiffy = jiffies; |
| 1230 | queue_work(rq_wq, &rq_info.def_timer_work); |
| 1231 | } |
| 1232 | } |
| 1233 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1234 | /* |
Pavel Machek | 4c9dc64 | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 1235 | * We rearm the timer until we get disabled by the idle code. |
Chuansheng Liu | 351f181 | 2012-10-25 01:07:35 +0800 | [diff] [blame] | 1236 | * Called with interrupts disabled. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1237 | */ |
| 1238 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) |
| 1239 | { |
| 1240 | struct tick_sched *ts = |
| 1241 | container_of(timer, struct tick_sched, sched_timer); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1242 | struct pt_regs *regs = get_irq_regs(); |
| 1243 | ktime_t now = ktime_get(); |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 1244 | |
Frederic Weisbecker | 5bb9622 | 2012-10-15 02:03:27 +0200 | [diff] [blame] | 1245 | tick_sched_do_timer(now); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1246 | |
| 1247 | /* |
| 1248 | * Do not call, when we are not in irq context and have |
| 1249 | * no valid regs pointer |
| 1250 | */ |
Kyle Yan | 36d7870 | 2016-08-23 16:07:11 -0700 | [diff] [blame] | 1251 | if (regs) { |
Frederic Weisbecker | 9e8f559 | 2012-10-15 02:43:03 +0200 | [diff] [blame] | 1252 | tick_sched_handle(ts, regs); |
Kyle Yan | 36d7870 | 2016-08-23 16:07:11 -0700 | [diff] [blame] | 1253 | if (rq_info.init == 1 && |
| 1254 | tick_do_timer_cpu == smp_processor_id()) { |
| 1255 | /* |
| 1256 | * update run queue statistics |
| 1257 | */ |
| 1258 | update_rq_stats(); |
| 1259 | /* |
| 1260 | * wakeup user if needed |
| 1261 | */ |
| 1262 | wakeup_user(); |
| 1263 | } |
| 1264 | } |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1265 | |
Viresh Kumar | 2a16fc9 | 2014-06-12 16:24:41 +0530 | [diff] [blame] | 1266 | /* No need to reprogram if we are in idle or full dynticks mode */ |
| 1267 | if (unlikely(ts->tick_stopped)) |
| 1268 | return HRTIMER_NORESTART; |
| 1269 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1270 | hrtimer_forward(timer, now, tick_period); |
| 1271 | |
| 1272 | return HRTIMER_RESTART; |
| 1273 | } |
| 1274 | |
Mike Galbraith | 5307c95 | 2012-05-08 12:20:58 +0200 | [diff] [blame] | 1275 | static int sched_skew_tick; |
| 1276 | |
Thomas Gleixner | 62cf20b | 2012-05-25 14:08:57 +0200 | [diff] [blame] | 1277 | static int __init skew_tick(char *str) |
| 1278 | { |
| 1279 | get_option(&str, &sched_skew_tick); |
| 1280 | |
| 1281 | return 0; |
| 1282 | } |
| 1283 | early_param("skew_tick", skew_tick); |
| 1284 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1285 | /** |
| 1286 | * tick_setup_sched_timer - setup the tick emulation timer |
| 1287 | */ |
| 1288 | void tick_setup_sched_timer(void) |
| 1289 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1290 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1291 | ktime_t now = ktime_get(); |
| 1292 | |
| 1293 | /* |
| 1294 | * Emulate tick processing via per-CPU hrtimers: |
| 1295 | */ |
| 1296 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| 1297 | ts->sched_timer.function = tick_sched_timer; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1298 | |
Ingo Molnar | 0de7611 | 2016-07-01 12:42:35 +0200 | [diff] [blame] | 1299 | /* Get the next period (per-CPU) */ |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1300 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1301 | |
Thomas Gleixner | 9c3f9e2 | 2012-11-21 20:31:52 +0100 | [diff] [blame] | 1302 | /* Offset the tick to avert jiffies_lock contention. */ |
Mike Galbraith | 5307c95 | 2012-05-08 12:20:58 +0200 | [diff] [blame] | 1303 | if (sched_skew_tick) { |
| 1304 | u64 offset = ktime_to_ns(tick_period) >> 1; |
| 1305 | do_div(offset, num_possible_cpus()); |
| 1306 | offset *= smp_processor_id(); |
| 1307 | hrtimer_add_expires_ns(&ts->sched_timer, offset); |
| 1308 | } |
| 1309 | |
Thomas Gleixner | afc08b1 | 2015-04-14 21:08:52 +0000 | [diff] [blame] | 1310 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
| 1311 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame] | 1312 | tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1313 | } |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 1314 | #endif /* HIGH_RES_TIMERS */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1315 | |
Frederic Weisbecker | 3451d02 | 2011-08-10 23:21:01 +0200 | [diff] [blame] | 1316 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1317 | void tick_cancel_sched_timer(int cpu) |
| 1318 | { |
| 1319 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 1320 | |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 1321 | # ifdef CONFIG_HIGH_RES_TIMERS |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1322 | if (ts->sched_timer.base) |
| 1323 | hrtimer_cancel(&ts->sched_timer); |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 1324 | # endif |
Karsten Wiese | a790176 | 2008-03-04 14:59:55 -0800 | [diff] [blame] | 1325 | |
Thomas Gleixner | 4b0c0f2 | 2013-05-03 15:02:50 +0200 | [diff] [blame] | 1326 | memset(ts, 0, sizeof(*ts)); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1327 | } |
Miao Xie | 3c4fbe5 | 2008-08-20 16:37:38 -0700 | [diff] [blame] | 1328 | #endif |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1329 | |
| 1330 | /** |
| 1331 | * Async notification about clocksource changes |
| 1332 | */ |
| 1333 | void tick_clock_notify(void) |
| 1334 | { |
| 1335 | int cpu; |
| 1336 | |
| 1337 | for_each_possible_cpu(cpu) |
| 1338 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); |
| 1339 | } |
| 1340 | |
| 1341 | /* |
| 1342 | * Async notification about clock event changes |
| 1343 | */ |
| 1344 | void tick_oneshot_notify(void) |
| 1345 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1346 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1347 | |
| 1348 | set_bit(0, &ts->check_clocks); |
| 1349 | } |
| 1350 | |
| 1351 | /** |
| 1352 | * Check, if a change happened, which makes oneshot possible. |
| 1353 | * |
| 1354 | * Called cyclic from the hrtimer softirq (driven by the timer |
| 1355 | * softirq) allow_nohz signals, that we can switch into low-res nohz |
| 1356 | * mode, because high resolution timers are disabled (either compile |
Thomas Gleixner | 6b442bc | 2015-05-07 14:35:59 +0200 | [diff] [blame] | 1357 | * or runtime). Called with interrupts disabled. |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1358 | */ |
| 1359 | int tick_check_oneshot_change(int allow_nohz) |
| 1360 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 1361 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1362 | |
| 1363 | if (!test_and_clear_bit(0, &ts->check_clocks)) |
| 1364 | return 0; |
| 1365 | |
| 1366 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) |
| 1367 | return 0; |
| 1368 | |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 1369 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1370 | return 0; |
| 1371 | |
| 1372 | if (!allow_nohz) |
| 1373 | return 1; |
| 1374 | |
| 1375 | tick_nohz_switch_to_nohz(); |
| 1376 | return 0; |
| 1377 | } |
Mahesh Sivasubramanian | 930e235 | 2017-01-26 16:37:08 -0700 | [diff] [blame] | 1378 | |
| 1379 | ktime_t *get_next_event_cpu(unsigned int cpu) |
| 1380 | { |
| 1381 | return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event); |
| 1382 | } |