Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016, The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * |
| 14 | * Window Assisted Load Tracking (WALT) implementation credits: |
| 15 | * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park, |
| 16 | * Pavan Kumar Kondeti, Olav Haugan |
| 17 | * |
| 18 | * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla |
| 19 | * and Todd Kjos |
| 20 | */ |
| 21 | |
Abhilash Kesavan | 3861f0b | 2017-09-11 19:07:44 +0530 | [diff] [blame] | 22 | #include <linux/acpi.h> |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 23 | #include <linux/syscore_ops.h> |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 24 | #include <trace/events/sched.h> |
| 25 | #include "sched.h" |
| 26 | #include "walt.h" |
| 27 | |
| 28 | #define WINDOW_STATS_RECENT 0 |
| 29 | #define WINDOW_STATS_MAX 1 |
| 30 | #define WINDOW_STATS_MAX_RECENT_AVG 2 |
| 31 | #define WINDOW_STATS_AVG 3 |
| 32 | #define WINDOW_STATS_INVALID_POLICY 4 |
| 33 | |
| 34 | #define EXITING_TASK_MARKER 0xdeaddead |
| 35 | |
| 36 | static __read_mostly unsigned int walt_ravg_hist_size = 5; |
| 37 | static __read_mostly unsigned int walt_window_stats_policy = |
| 38 | WINDOW_STATS_MAX_RECENT_AVG; |
| 39 | static __read_mostly unsigned int walt_account_wait_time = 1; |
| 40 | static __read_mostly unsigned int walt_freq_account_wait_time = 0; |
| 41 | static __read_mostly unsigned int walt_io_is_busy = 0; |
| 42 | |
| 43 | unsigned int sysctl_sched_walt_init_task_load_pct = 15; |
| 44 | |
Vikram Mulukutla | 44310bf | 2017-08-10 17:26:20 -0700 | [diff] [blame] | 45 | /* true -> use PELT based load stats, false -> use window-based load stats */ |
| 46 | bool __read_mostly walt_disabled = false; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 47 | |
Joonwoo Park | 578db5d | 2017-06-01 10:59:12 -0700 | [diff] [blame] | 48 | /* |
Vikram Mulukutla | 44310bf | 2017-08-10 17:26:20 -0700 | [diff] [blame] | 49 | * Window size (in ns). Adjust for the tick size so that the window |
| 50 | * rollover occurs just before the tick boundary. |
Joonwoo Park | 578db5d | 2017-06-01 10:59:12 -0700 | [diff] [blame] | 51 | */ |
Vikram Mulukutla | 44310bf | 2017-08-10 17:26:20 -0700 | [diff] [blame] | 52 | __read_mostly unsigned int walt_ravg_window = |
| 53 | (20000000 / TICK_NSEC) * TICK_NSEC; |
| 54 | #define MIN_SCHED_RAVG_WINDOW ((10000000 / TICK_NSEC) * TICK_NSEC) |
| 55 | #define MAX_SCHED_RAVG_WINDOW ((1000000000 / TICK_NSEC) * TICK_NSEC) |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 56 | |
| 57 | static unsigned int sync_cpu; |
| 58 | static ktime_t ktime_last; |
Todd Poynor | 932dcee | 2017-04-10 18:31:28 -0700 | [diff] [blame] | 59 | static __read_mostly bool walt_ktime_suspended; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 60 | |
| 61 | static unsigned int task_load(struct task_struct *p) |
| 62 | { |
| 63 | return p->ravg.demand; |
| 64 | } |
| 65 | |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 66 | static inline void fixup_cum_window_demand(struct rq *rq, s64 delta) |
| 67 | { |
| 68 | rq->cum_window_demand += delta; |
| 69 | if (unlikely((s64)rq->cum_window_demand < 0)) |
| 70 | rq->cum_window_demand = 0; |
| 71 | } |
| 72 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 73 | void |
| 74 | walt_inc_cumulative_runnable_avg(struct rq *rq, |
| 75 | struct task_struct *p) |
| 76 | { |
| 77 | rq->cumulative_runnable_avg += p->ravg.demand; |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 78 | |
| 79 | /* |
| 80 | * Add a task's contribution to the cumulative window demand when |
| 81 | * |
| 82 | * (1) task is enqueued with on_rq = 1 i.e migration, |
| 83 | * prio/cgroup/class change. |
| 84 | * (2) task is waking for the first time in this window. |
| 85 | */ |
| 86 | if (p->on_rq || (p->last_sleep_ts < rq->window_start)) |
| 87 | fixup_cum_window_demand(rq, p->ravg.demand); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | void |
| 91 | walt_dec_cumulative_runnable_avg(struct rq *rq, |
| 92 | struct task_struct *p) |
| 93 | { |
| 94 | rq->cumulative_runnable_avg -= p->ravg.demand; |
| 95 | BUG_ON((s64)rq->cumulative_runnable_avg < 0); |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 96 | |
| 97 | /* |
| 98 | * on_rq will be 1 for sleeping tasks. So check if the task |
| 99 | * is migrating or dequeuing in RUNNING state to change the |
| 100 | * prio/cgroup/class. |
| 101 | */ |
| 102 | if (task_on_rq_migrating(p) || p->state == TASK_RUNNING) |
| 103 | fixup_cum_window_demand(rq, -(s64)p->ravg.demand); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Pavankumar Kondeti | 1c847af | 2019-09-04 10:08:32 +0530 | [diff] [blame] | 106 | void |
| 107 | walt_fixup_cumulative_runnable_avg(struct rq *rq, |
| 108 | struct task_struct *p, u64 new_task_load) |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 109 | { |
Joonwoo Park | 8b1a1ce | 2017-05-26 11:19:36 -0700 | [diff] [blame] | 110 | s64 task_load_delta = (s64)new_task_load - task_load(p); |
| 111 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 112 | rq->cumulative_runnable_avg += task_load_delta; |
| 113 | if ((s64)rq->cumulative_runnable_avg < 0) |
| 114 | panic("cra less than zero: tld: %lld, task_load(p) = %u\n", |
| 115 | task_load_delta, task_load(p)); |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 116 | |
| 117 | fixup_cum_window_demand(rq, task_load_delta); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 118 | } |
| 119 | |
| 120 | u64 walt_ktime_clock(void) |
| 121 | { |
| 122 | if (unlikely(walt_ktime_suspended)) |
| 123 | return ktime_to_ns(ktime_last); |
| 124 | return ktime_get_ns(); |
| 125 | } |
| 126 | |
| 127 | static void walt_resume(void) |
| 128 | { |
| 129 | walt_ktime_suspended = false; |
| 130 | } |
| 131 | |
| 132 | static int walt_suspend(void) |
| 133 | { |
| 134 | ktime_last = ktime_get(); |
| 135 | walt_ktime_suspended = true; |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | static struct syscore_ops walt_syscore_ops = { |
| 140 | .resume = walt_resume, |
| 141 | .suspend = walt_suspend |
| 142 | }; |
| 143 | |
| 144 | static int __init walt_init_ops(void) |
| 145 | { |
| 146 | register_syscore_ops(&walt_syscore_ops); |
| 147 | return 0; |
| 148 | } |
| 149 | late_initcall(walt_init_ops); |
| 150 | |
Satya Durga Srinivasu Prabhala | a56900fa | 2017-11-08 12:13:47 -0800 | [diff] [blame] | 151 | #ifdef CONFIG_CFS_BANDWIDTH |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 152 | void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq, |
| 153 | struct task_struct *p) |
| 154 | { |
| 155 | cfs_rq->cumulative_runnable_avg += p->ravg.demand; |
| 156 | } |
| 157 | |
| 158 | void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq, |
| 159 | struct task_struct *p) |
| 160 | { |
| 161 | cfs_rq->cumulative_runnable_avg -= p->ravg.demand; |
| 162 | } |
Satya Durga Srinivasu Prabhala | a56900fa | 2017-11-08 12:13:47 -0800 | [diff] [blame] | 163 | #endif |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 164 | |
| 165 | static int exiting_task(struct task_struct *p) |
| 166 | { |
| 167 | if (p->flags & PF_EXITING) { |
| 168 | if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) { |
| 169 | p->ravg.sum_history[0] = EXITING_TASK_MARKER; |
| 170 | } |
| 171 | return 1; |
| 172 | } |
| 173 | return 0; |
| 174 | } |
| 175 | |
| 176 | static int __init set_walt_ravg_window(char *str) |
| 177 | { |
Vikram Mulukutla | 44310bf | 2017-08-10 17:26:20 -0700 | [diff] [blame] | 178 | unsigned int adj_window; |
| 179 | bool no_walt = walt_disabled; |
| 180 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 181 | get_option(&str, &walt_ravg_window); |
| 182 | |
Vikram Mulukutla | 44310bf | 2017-08-10 17:26:20 -0700 | [diff] [blame] | 183 | /* Adjust for CONFIG_HZ */ |
| 184 | adj_window = (walt_ravg_window / TICK_NSEC) * TICK_NSEC; |
| 185 | |
| 186 | /* Warn if we're a bit too far away from the expected window size */ |
| 187 | WARN(adj_window < walt_ravg_window - NSEC_PER_MSEC, |
| 188 | "tick-adjusted window size %u, original was %u\n", adj_window, |
| 189 | walt_ravg_window); |
| 190 | |
| 191 | walt_ravg_window = adj_window; |
| 192 | |
| 193 | walt_disabled = walt_disabled || |
| 194 | (walt_ravg_window < MIN_SCHED_RAVG_WINDOW || |
| 195 | walt_ravg_window > MAX_SCHED_RAVG_WINDOW); |
| 196 | |
| 197 | WARN(!no_walt && walt_disabled, |
| 198 | "invalid window size, disabling WALT\n"); |
| 199 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 200 | return 0; |
| 201 | } |
| 202 | |
| 203 | early_param("walt_ravg_window", set_walt_ravg_window); |
| 204 | |
| 205 | static void |
| 206 | update_window_start(struct rq *rq, u64 wallclock) |
| 207 | { |
| 208 | s64 delta; |
| 209 | int nr_windows; |
| 210 | |
| 211 | delta = wallclock - rq->window_start; |
Chris Redpath | b5e1207 | 2016-07-25 15:13:58 +0100 | [diff] [blame] | 212 | /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */ |
| 213 | if (delta < 0) { |
Chris Redpath | 5ea9de8 | 2016-09-20 17:00:47 +0100 | [diff] [blame] | 214 | delta = 0; |
| 215 | WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n"); |
Chris Redpath | b5e1207 | 2016-07-25 15:13:58 +0100 | [diff] [blame] | 216 | } |
| 217 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 218 | if (delta < walt_ravg_window) |
| 219 | return; |
| 220 | |
| 221 | nr_windows = div64_u64(delta, walt_ravg_window); |
| 222 | rq->window_start += (u64)nr_windows * (u64)walt_ravg_window; |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 223 | |
| 224 | rq->cum_window_demand = rq->cumulative_runnable_avg; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 225 | } |
| 226 | |
Vikram Mulukutla | b28cab9 | 2017-08-24 11:38:00 -0700 | [diff] [blame] | 227 | /* |
| 228 | * Translate absolute delta time accounted on a CPU |
| 229 | * to a scale where 1024 is the capacity of the most |
| 230 | * capable CPU running at FMAX |
| 231 | */ |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 232 | static u64 scale_exec_time(u64 delta, struct rq *rq) |
| 233 | { |
Vikram Mulukutla | b28cab9 | 2017-08-24 11:38:00 -0700 | [diff] [blame] | 234 | unsigned long capcurr = capacity_curr_of(cpu_of(rq)); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 235 | |
Vikram Mulukutla | b28cab9 | 2017-08-24 11:38:00 -0700 | [diff] [blame] | 236 | return (delta * capcurr) >> SCHED_CAPACITY_SHIFT; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | static int cpu_is_waiting_on_io(struct rq *rq) |
| 240 | { |
| 241 | if (!walt_io_is_busy) |
| 242 | return 0; |
| 243 | |
| 244 | return atomic_read(&rq->nr_iowait); |
| 245 | } |
| 246 | |
Srinath Sridharan | 3a73c96 | 2016-07-22 13:21:15 +0100 | [diff] [blame] | 247 | void walt_account_irqtime(int cpu, struct task_struct *curr, |
| 248 | u64 delta, u64 wallclock) |
| 249 | { |
| 250 | struct rq *rq = cpu_rq(cpu); |
| 251 | unsigned long flags, nr_windows; |
| 252 | u64 cur_jiffies_ts; |
| 253 | |
| 254 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 255 | |
| 256 | /* |
| 257 | * cputime (wallclock) uses sched_clock so use the same here for |
| 258 | * consistency. |
| 259 | */ |
| 260 | delta += sched_clock() - wallclock; |
| 261 | cur_jiffies_ts = get_jiffies_64(); |
| 262 | |
| 263 | if (is_idle_task(curr)) |
| 264 | walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(), |
| 265 | delta); |
| 266 | |
| 267 | nr_windows = cur_jiffies_ts - rq->irqload_ts; |
| 268 | |
| 269 | if (nr_windows) { |
| 270 | if (nr_windows < 10) { |
| 271 | /* Decay CPU's irqload by 3/4 for each window. */ |
| 272 | rq->avg_irqload *= (3 * nr_windows); |
| 273 | rq->avg_irqload = div64_u64(rq->avg_irqload, |
| 274 | 4 * nr_windows); |
| 275 | } else { |
| 276 | rq->avg_irqload = 0; |
| 277 | } |
| 278 | rq->avg_irqload += rq->cur_irqload; |
| 279 | rq->cur_irqload = 0; |
| 280 | } |
| 281 | |
| 282 | rq->cur_irqload += delta; |
| 283 | rq->irqload_ts = cur_jiffies_ts; |
| 284 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 285 | } |
| 286 | |
| 287 | |
| 288 | #define WALT_HIGH_IRQ_TIMEOUT 3 |
| 289 | |
| 290 | u64 walt_irqload(int cpu) { |
| 291 | struct rq *rq = cpu_rq(cpu); |
| 292 | s64 delta; |
| 293 | delta = get_jiffies_64() - rq->irqload_ts; |
| 294 | |
| 295 | /* |
| 296 | * Current context can be preempted by irq and rq->irqload_ts can be |
| 297 | * updated by irq context so that delta can be negative. |
| 298 | * But this is okay and we can safely return as this means there |
| 299 | * was recent irq occurrence. |
| 300 | */ |
| 301 | |
| 302 | if (delta < WALT_HIGH_IRQ_TIMEOUT) |
| 303 | return rq->avg_irqload; |
| 304 | else |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | int walt_cpu_high_irqload(int cpu) { |
| 309 | return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload; |
| 310 | } |
| 311 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 312 | static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p, |
| 313 | u64 irqtime, int event) |
| 314 | { |
| 315 | if (is_idle_task(p)) { |
| 316 | /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */ |
| 317 | if (event == PICK_NEXT_TASK) |
| 318 | return 0; |
| 319 | |
| 320 | /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */ |
| 321 | return irqtime || cpu_is_waiting_on_io(rq); |
| 322 | } |
| 323 | |
| 324 | if (event == TASK_WAKE) |
| 325 | return 0; |
| 326 | |
| 327 | if (event == PUT_PREV_TASK || event == IRQ_UPDATE || |
| 328 | event == TASK_UPDATE) |
| 329 | return 1; |
| 330 | |
| 331 | /* Only TASK_MIGRATE && PICK_NEXT_TASK left */ |
| 332 | return walt_freq_account_wait_time; |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum) |
| 337 | */ |
| 338 | static void update_cpu_busy_time(struct task_struct *p, struct rq *rq, |
| 339 | int event, u64 wallclock, u64 irqtime) |
| 340 | { |
| 341 | int new_window, nr_full_windows = 0; |
| 342 | int p_is_curr_task = (p == rq->curr); |
| 343 | u64 mark_start = p->ravg.mark_start; |
| 344 | u64 window_start = rq->window_start; |
| 345 | u32 window_size = walt_ravg_window; |
| 346 | u64 delta; |
| 347 | |
| 348 | new_window = mark_start < window_start; |
| 349 | if (new_window) { |
| 350 | nr_full_windows = div64_u64((window_start - mark_start), |
| 351 | window_size); |
| 352 | if (p->ravg.active_windows < USHRT_MAX) |
| 353 | p->ravg.active_windows++; |
| 354 | } |
| 355 | |
| 356 | /* Handle per-task window rollover. We don't care about the idle |
| 357 | * task or exiting tasks. */ |
| 358 | if (new_window && !is_idle_task(p) && !exiting_task(p)) { |
| 359 | u32 curr_window = 0; |
| 360 | |
| 361 | if (!nr_full_windows) |
| 362 | curr_window = p->ravg.curr_window; |
| 363 | |
| 364 | p->ravg.prev_window = curr_window; |
| 365 | p->ravg.curr_window = 0; |
| 366 | } |
| 367 | |
| 368 | if (!account_busy_for_cpu_time(rq, p, irqtime, event)) { |
| 369 | /* account_busy_for_cpu_time() = 0, so no update to the |
| 370 | * task's current window needs to be made. This could be |
| 371 | * for example |
| 372 | * |
| 373 | * - a wakeup event on a task within the current |
| 374 | * window (!new_window below, no action required), |
| 375 | * - switching to a new task from idle (PICK_NEXT_TASK) |
| 376 | * in a new window where irqtime is 0 and we aren't |
| 377 | * waiting on IO */ |
| 378 | |
| 379 | if (!new_window) |
| 380 | return; |
| 381 | |
| 382 | /* A new window has started. The RQ demand must be rolled |
| 383 | * over if p is the current task. */ |
| 384 | if (p_is_curr_task) { |
| 385 | u64 prev_sum = 0; |
| 386 | |
| 387 | /* p is either idle task or an exiting task */ |
| 388 | if (!nr_full_windows) { |
| 389 | prev_sum = rq->curr_runnable_sum; |
| 390 | } |
| 391 | |
| 392 | rq->prev_runnable_sum = prev_sum; |
| 393 | rq->curr_runnable_sum = 0; |
| 394 | } |
| 395 | |
| 396 | return; |
| 397 | } |
| 398 | |
| 399 | if (!new_window) { |
| 400 | /* account_busy_for_cpu_time() = 1 so busy time needs |
| 401 | * to be accounted to the current window. No rollover |
| 402 | * since we didn't start a new window. An example of this is |
| 403 | * when a task starts execution and then sleeps within the |
| 404 | * same window. */ |
| 405 | |
| 406 | if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) |
| 407 | delta = wallclock - mark_start; |
| 408 | else |
| 409 | delta = irqtime; |
| 410 | delta = scale_exec_time(delta, rq); |
| 411 | rq->curr_runnable_sum += delta; |
| 412 | if (!is_idle_task(p) && !exiting_task(p)) |
| 413 | p->ravg.curr_window += delta; |
| 414 | |
| 415 | return; |
| 416 | } |
| 417 | |
| 418 | if (!p_is_curr_task) { |
| 419 | /* account_busy_for_cpu_time() = 1 so busy time needs |
| 420 | * to be accounted to the current window. A new window |
| 421 | * has also started, but p is not the current task, so the |
| 422 | * window is not rolled over - just split up and account |
| 423 | * as necessary into curr and prev. The window is only |
| 424 | * rolled over when a new window is processed for the current |
| 425 | * task. |
| 426 | * |
| 427 | * Irqtime can't be accounted by a task that isn't the |
| 428 | * currently running task. */ |
| 429 | |
| 430 | if (!nr_full_windows) { |
| 431 | /* A full window hasn't elapsed, account partial |
| 432 | * contribution to previous completed window. */ |
| 433 | delta = scale_exec_time(window_start - mark_start, rq); |
| 434 | if (!exiting_task(p)) |
| 435 | p->ravg.prev_window += delta; |
| 436 | } else { |
| 437 | /* Since at least one full window has elapsed, |
| 438 | * the contribution to the previous window is the |
| 439 | * full window (window_size). */ |
| 440 | delta = scale_exec_time(window_size, rq); |
| 441 | if (!exiting_task(p)) |
| 442 | p->ravg.prev_window = delta; |
| 443 | } |
| 444 | rq->prev_runnable_sum += delta; |
| 445 | |
| 446 | /* Account piece of busy time in the current window. */ |
| 447 | delta = scale_exec_time(wallclock - window_start, rq); |
| 448 | rq->curr_runnable_sum += delta; |
| 449 | if (!exiting_task(p)) |
| 450 | p->ravg.curr_window = delta; |
| 451 | |
| 452 | return; |
| 453 | } |
| 454 | |
| 455 | if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) { |
| 456 | /* account_busy_for_cpu_time() = 1 so busy time needs |
| 457 | * to be accounted to the current window. A new window |
| 458 | * has started and p is the current task so rollover is |
| 459 | * needed. If any of these three above conditions are true |
| 460 | * then this busy time can't be accounted as irqtime. |
| 461 | * |
| 462 | * Busy time for the idle task or exiting tasks need not |
| 463 | * be accounted. |
| 464 | * |
| 465 | * An example of this would be a task that starts execution |
| 466 | * and then sleeps once a new window has begun. */ |
| 467 | |
| 468 | if (!nr_full_windows) { |
| 469 | /* A full window hasn't elapsed, account partial |
| 470 | * contribution to previous completed window. */ |
| 471 | delta = scale_exec_time(window_start - mark_start, rq); |
| 472 | if (!is_idle_task(p) && !exiting_task(p)) |
| 473 | p->ravg.prev_window += delta; |
| 474 | |
| 475 | delta += rq->curr_runnable_sum; |
| 476 | } else { |
| 477 | /* Since at least one full window has elapsed, |
| 478 | * the contribution to the previous window is the |
| 479 | * full window (window_size). */ |
| 480 | delta = scale_exec_time(window_size, rq); |
| 481 | if (!is_idle_task(p) && !exiting_task(p)) |
| 482 | p->ravg.prev_window = delta; |
| 483 | |
| 484 | } |
| 485 | /* |
| 486 | * Rollover for normal runnable sum is done here by overwriting |
| 487 | * the values in prev_runnable_sum and curr_runnable_sum. |
| 488 | * Rollover for new task runnable sum has completed by previous |
| 489 | * if-else statement. |
| 490 | */ |
| 491 | rq->prev_runnable_sum = delta; |
| 492 | |
| 493 | /* Account piece of busy time in the current window. */ |
| 494 | delta = scale_exec_time(wallclock - window_start, rq); |
| 495 | rq->curr_runnable_sum = delta; |
| 496 | if (!is_idle_task(p) && !exiting_task(p)) |
| 497 | p->ravg.curr_window = delta; |
| 498 | |
| 499 | return; |
| 500 | } |
| 501 | |
| 502 | if (irqtime) { |
| 503 | /* account_busy_for_cpu_time() = 1 so busy time needs |
| 504 | * to be accounted to the current window. A new window |
| 505 | * has started and p is the current task so rollover is |
| 506 | * needed. The current task must be the idle task because |
| 507 | * irqtime is not accounted for any other task. |
| 508 | * |
| 509 | * Irqtime will be accounted each time we process IRQ activity |
| 510 | * after a period of idleness, so we know the IRQ busy time |
| 511 | * started at wallclock - irqtime. */ |
| 512 | |
| 513 | BUG_ON(!is_idle_task(p)); |
| 514 | mark_start = wallclock - irqtime; |
| 515 | |
| 516 | /* Roll window over. If IRQ busy time was just in the current |
| 517 | * window then that is all that need be accounted. */ |
| 518 | rq->prev_runnable_sum = rq->curr_runnable_sum; |
| 519 | if (mark_start > window_start) { |
| 520 | rq->curr_runnable_sum = scale_exec_time(irqtime, rq); |
| 521 | return; |
| 522 | } |
| 523 | |
| 524 | /* The IRQ busy time spanned multiple windows. Process the |
| 525 | * busy time preceding the current window start first. */ |
| 526 | delta = window_start - mark_start; |
| 527 | if (delta > window_size) |
| 528 | delta = window_size; |
| 529 | delta = scale_exec_time(delta, rq); |
| 530 | rq->prev_runnable_sum += delta; |
| 531 | |
| 532 | /* Process the remaining IRQ busy time in the current window. */ |
| 533 | delta = wallclock - window_start; |
| 534 | rq->curr_runnable_sum = scale_exec_time(delta, rq); |
| 535 | |
| 536 | return; |
| 537 | } |
| 538 | |
| 539 | BUG(); |
| 540 | } |
| 541 | |
| 542 | static int account_busy_for_task_demand(struct task_struct *p, int event) |
| 543 | { |
| 544 | /* No need to bother updating task demand for exiting tasks |
| 545 | * or the idle task. */ |
| 546 | if (exiting_task(p) || is_idle_task(p)) |
| 547 | return 0; |
| 548 | |
| 549 | /* When a task is waking up it is completing a segment of non-busy |
| 550 | * time. Likewise, if wait time is not treated as busy time, then |
| 551 | * when a task begins to run or is migrated, it is not running and |
| 552 | * is completing a segment of non-busy time. */ |
| 553 | if (event == TASK_WAKE || (!walt_account_wait_time && |
| 554 | (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) |
| 555 | return 0; |
| 556 | |
| 557 | return 1; |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * Called when new window is starting for a task, to record cpu usage over |
| 562 | * recently concluded window(s). Normally 'samples' should be 1. It can be > 1 |
| 563 | * when, say, a real-time task runs without preemption for several windows at a |
| 564 | * stretch. |
| 565 | */ |
| 566 | static void update_history(struct rq *rq, struct task_struct *p, |
| 567 | u32 runtime, int samples, int event) |
| 568 | { |
| 569 | u32 *hist = &p->ravg.sum_history[0]; |
| 570 | int ridx, widx; |
| 571 | u32 max = 0, avg, demand; |
| 572 | u64 sum = 0; |
| 573 | |
| 574 | /* Ignore windows where task had no activity */ |
| 575 | if (!runtime || is_idle_task(p) || exiting_task(p) || !samples) |
| 576 | goto done; |
| 577 | |
| 578 | /* Push new 'runtime' value onto stack */ |
| 579 | widx = walt_ravg_hist_size - 1; |
| 580 | ridx = widx - samples; |
| 581 | for (; ridx >= 0; --widx, --ridx) { |
| 582 | hist[widx] = hist[ridx]; |
| 583 | sum += hist[widx]; |
| 584 | if (hist[widx] > max) |
| 585 | max = hist[widx]; |
| 586 | } |
| 587 | |
| 588 | for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) { |
| 589 | hist[widx] = runtime; |
| 590 | sum += hist[widx]; |
| 591 | if (hist[widx] > max) |
| 592 | max = hist[widx]; |
| 593 | } |
| 594 | |
| 595 | p->ravg.sum = 0; |
| 596 | |
| 597 | if (walt_window_stats_policy == WINDOW_STATS_RECENT) { |
| 598 | demand = runtime; |
| 599 | } else if (walt_window_stats_policy == WINDOW_STATS_MAX) { |
| 600 | demand = max; |
| 601 | } else { |
| 602 | avg = div64_u64(sum, walt_ravg_hist_size); |
| 603 | if (walt_window_stats_policy == WINDOW_STATS_AVG) |
| 604 | demand = avg; |
| 605 | else |
| 606 | demand = max(avg, runtime); |
| 607 | } |
| 608 | |
| 609 | /* |
| 610 | * A throttled deadline sched class task gets dequeued without |
| 611 | * changing p->on_rq. Since the dequeue decrements hmp stats |
| 612 | * avoid decrementing it here again. |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 613 | * |
| 614 | * When window is rolled over, the cumulative window demand |
| 615 | * is reset to the cumulative runnable average (contribution from |
| 616 | * the tasks on the runqueue). If the current task is dequeued |
| 617 | * already, it's demand is not included in the cumulative runnable |
| 618 | * average. So add the task demand separately to cumulative window |
| 619 | * demand. |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 620 | */ |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 621 | if (!task_has_dl_policy(p) || !p->dl.dl_throttled) { |
| 622 | if (task_on_rq_queued(p)) |
Pavankumar Kondeti | 1c847af | 2019-09-04 10:08:32 +0530 | [diff] [blame] | 623 | p->sched_class->fixup_cumulative_runnable_avg(rq, p, |
| 624 | demand); |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 625 | else if (rq->curr == p) |
| 626 | fixup_cum_window_demand(rq, demand); |
| 627 | } |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 628 | |
| 629 | p->ravg.demand = demand; |
| 630 | |
| 631 | done: |
| 632 | trace_walt_update_history(rq, p, runtime, samples, event); |
| 633 | return; |
| 634 | } |
| 635 | |
| 636 | static void add_to_task_demand(struct rq *rq, struct task_struct *p, |
| 637 | u64 delta) |
| 638 | { |
| 639 | delta = scale_exec_time(delta, rq); |
| 640 | p->ravg.sum += delta; |
| 641 | if (unlikely(p->ravg.sum > walt_ravg_window)) |
| 642 | p->ravg.sum = walt_ravg_window; |
| 643 | } |
| 644 | |
| 645 | /* |
| 646 | * Account cpu demand of task and/or update task's cpu demand history |
| 647 | * |
| 648 | * ms = p->ravg.mark_start; |
| 649 | * wc = wallclock |
| 650 | * ws = rq->window_start |
| 651 | * |
| 652 | * Three possibilities: |
| 653 | * |
| 654 | * a) Task event is contained within one window. |
| 655 | * window_start < mark_start < wallclock |
| 656 | * |
| 657 | * ws ms wc |
| 658 | * | | | |
| 659 | * V V V |
| 660 | * |---------------| |
| 661 | * |
| 662 | * In this case, p->ravg.sum is updated *iff* event is appropriate |
| 663 | * (ex: event == PUT_PREV_TASK) |
| 664 | * |
| 665 | * b) Task event spans two windows. |
| 666 | * mark_start < window_start < wallclock |
| 667 | * |
| 668 | * ms ws wc |
| 669 | * | | | |
| 670 | * V V V |
| 671 | * -----|------------------- |
| 672 | * |
| 673 | * In this case, p->ravg.sum is updated with (ws - ms) *iff* event |
| 674 | * is appropriate, then a new window sample is recorded followed |
| 675 | * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate. |
| 676 | * |
| 677 | * c) Task event spans more than two windows. |
| 678 | * |
| 679 | * ms ws_tmp ws wc |
| 680 | * | | | | |
| 681 | * V V V V |
| 682 | * ---|-------|-------|-------|-------|------ |
| 683 | * | | |
| 684 | * |<------ nr_full_windows ------>| |
| 685 | * |
| 686 | * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff* |
| 687 | * event is appropriate, window sample of p->ravg.sum is recorded, |
| 688 | * 'nr_full_window' samples of window_size is also recorded *iff* |
| 689 | * event is appropriate and finally p->ravg.sum is set to (wc - ws) |
| 690 | * *iff* event is appropriate. |
| 691 | * |
| 692 | * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time() |
| 693 | * depends on it! |
| 694 | */ |
| 695 | static void update_task_demand(struct task_struct *p, struct rq *rq, |
| 696 | int event, u64 wallclock) |
| 697 | { |
| 698 | u64 mark_start = p->ravg.mark_start; |
| 699 | u64 delta, window_start = rq->window_start; |
| 700 | int new_window, nr_full_windows; |
| 701 | u32 window_size = walt_ravg_window; |
| 702 | |
| 703 | new_window = mark_start < window_start; |
| 704 | if (!account_busy_for_task_demand(p, event)) { |
| 705 | if (new_window) |
| 706 | /* If the time accounted isn't being accounted as |
| 707 | * busy time, and a new window started, only the |
| 708 | * previous window need be closed out with the |
| 709 | * pre-existing demand. Multiple windows may have |
| 710 | * elapsed, but since empty windows are dropped, |
| 711 | * it is not necessary to account those. */ |
| 712 | update_history(rq, p, p->ravg.sum, 1, event); |
| 713 | return; |
| 714 | } |
| 715 | |
| 716 | if (!new_window) { |
| 717 | /* The simple case - busy time contained within the existing |
| 718 | * window. */ |
| 719 | add_to_task_demand(rq, p, wallclock - mark_start); |
| 720 | return; |
| 721 | } |
| 722 | |
| 723 | /* Busy time spans at least two windows. Temporarily rewind |
| 724 | * window_start to first window boundary after mark_start. */ |
| 725 | delta = window_start - mark_start; |
| 726 | nr_full_windows = div64_u64(delta, window_size); |
| 727 | window_start -= (u64)nr_full_windows * (u64)window_size; |
| 728 | |
| 729 | /* Process (window_start - mark_start) first */ |
| 730 | add_to_task_demand(rq, p, window_start - mark_start); |
| 731 | |
| 732 | /* Push new sample(s) into task's demand history */ |
| 733 | update_history(rq, p, p->ravg.sum, 1, event); |
| 734 | if (nr_full_windows) |
| 735 | update_history(rq, p, scale_exec_time(window_size, rq), |
| 736 | nr_full_windows, event); |
| 737 | |
| 738 | /* Roll window_start back to current to process any remainder |
| 739 | * in current window. */ |
| 740 | window_start += (u64)nr_full_windows * (u64)window_size; |
| 741 | |
| 742 | /* Process (wallclock - window_start) next */ |
| 743 | mark_start = window_start; |
| 744 | add_to_task_demand(rq, p, wallclock - mark_start); |
| 745 | } |
| 746 | |
| 747 | /* Reflect task activity on its demand and cpu's busy time statistics */ |
| 748 | void walt_update_task_ravg(struct task_struct *p, struct rq *rq, |
| 749 | int event, u64 wallclock, u64 irqtime) |
| 750 | { |
| 751 | if (walt_disabled || !rq->window_start) |
| 752 | return; |
| 753 | |
| 754 | lockdep_assert_held(&rq->lock); |
| 755 | |
| 756 | update_window_start(rq, wallclock); |
| 757 | |
| 758 | if (!p->ravg.mark_start) |
| 759 | goto done; |
| 760 | |
| 761 | update_task_demand(p, rq, event, wallclock); |
| 762 | update_cpu_busy_time(p, rq, event, wallclock, irqtime); |
| 763 | |
| 764 | done: |
| 765 | trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime); |
| 766 | |
| 767 | p->ravg.mark_start = wallclock; |
| 768 | } |
| 769 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 770 | static void reset_task_stats(struct task_struct *p) |
| 771 | { |
| 772 | u32 sum = 0; |
| 773 | |
| 774 | if (exiting_task(p)) |
| 775 | sum = EXITING_TASK_MARKER; |
| 776 | |
| 777 | memset(&p->ravg, 0, sizeof(struct ravg)); |
| 778 | /* Retain EXITING_TASK marker */ |
| 779 | p->ravg.sum_history[0] = sum; |
| 780 | } |
| 781 | |
| 782 | void walt_mark_task_starting(struct task_struct *p) |
| 783 | { |
| 784 | u64 wallclock; |
| 785 | struct rq *rq = task_rq(p); |
| 786 | |
| 787 | if (!rq->window_start) { |
| 788 | reset_task_stats(p); |
| 789 | return; |
| 790 | } |
| 791 | |
| 792 | wallclock = walt_ktime_clock(); |
| 793 | p->ravg.mark_start = wallclock; |
| 794 | } |
| 795 | |
| 796 | void walt_set_window_start(struct rq *rq) |
| 797 | { |
| 798 | int cpu = cpu_of(rq); |
| 799 | struct rq *sync_rq = cpu_rq(sync_cpu); |
| 800 | |
Joonwoo Park | 98a5fa3 | 2017-05-16 11:13:00 -0700 | [diff] [blame] | 801 | if (likely(rq->window_start)) |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 802 | return; |
| 803 | |
| 804 | if (cpu == sync_cpu) { |
Joonwoo Park | 98a5fa3 | 2017-05-16 11:13:00 -0700 | [diff] [blame] | 805 | rq->window_start = 1; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 806 | } else { |
| 807 | raw_spin_unlock(&rq->lock); |
| 808 | double_rq_lock(rq, sync_rq); |
| 809 | rq->window_start = cpu_rq(sync_cpu)->window_start; |
| 810 | rq->curr_runnable_sum = rq->prev_runnable_sum = 0; |
| 811 | raw_spin_unlock(&sync_rq->lock); |
| 812 | } |
| 813 | |
| 814 | rq->curr->ravg.mark_start = rq->window_start; |
| 815 | } |
| 816 | |
| 817 | void walt_migrate_sync_cpu(int cpu) |
| 818 | { |
| 819 | if (cpu == sync_cpu) |
| 820 | sync_cpu = smp_processor_id(); |
| 821 | } |
| 822 | |
| 823 | void walt_fixup_busy_time(struct task_struct *p, int new_cpu) |
| 824 | { |
| 825 | struct rq *src_rq = task_rq(p); |
| 826 | struct rq *dest_rq = cpu_rq(new_cpu); |
| 827 | u64 wallclock; |
| 828 | |
| 829 | if (!p->on_rq && p->state != TASK_WAKING) |
| 830 | return; |
| 831 | |
| 832 | if (exiting_task(p)) { |
| 833 | return; |
| 834 | } |
| 835 | |
| 836 | if (p->state == TASK_WAKING) |
| 837 | double_rq_lock(src_rq, dest_rq); |
| 838 | |
| 839 | wallclock = walt_ktime_clock(); |
| 840 | |
| 841 | walt_update_task_ravg(task_rq(p)->curr, task_rq(p), |
| 842 | TASK_UPDATE, wallclock, 0); |
| 843 | walt_update_task_ravg(dest_rq->curr, dest_rq, |
| 844 | TASK_UPDATE, wallclock, 0); |
| 845 | |
| 846 | walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); |
| 847 | |
Joonwoo Park | 7f17fff | 2017-02-03 11:15:31 -0800 | [diff] [blame] | 848 | /* |
| 849 | * When a task is migrating during the wakeup, adjust |
| 850 | * the task's contribution towards cumulative window |
| 851 | * demand. |
| 852 | */ |
| 853 | if (p->state == TASK_WAKING && |
| 854 | p->last_sleep_ts >= src_rq->window_start) { |
| 855 | fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand); |
| 856 | fixup_cum_window_demand(dest_rq, p->ravg.demand); |
| 857 | } |
| 858 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 859 | if (p->ravg.curr_window) { |
| 860 | src_rq->curr_runnable_sum -= p->ravg.curr_window; |
| 861 | dest_rq->curr_runnable_sum += p->ravg.curr_window; |
| 862 | } |
| 863 | |
| 864 | if (p->ravg.prev_window) { |
| 865 | src_rq->prev_runnable_sum -= p->ravg.prev_window; |
| 866 | dest_rq->prev_runnable_sum += p->ravg.prev_window; |
| 867 | } |
| 868 | |
| 869 | if ((s64)src_rq->prev_runnable_sum < 0) { |
| 870 | src_rq->prev_runnable_sum = 0; |
| 871 | WARN_ON(1); |
| 872 | } |
| 873 | if ((s64)src_rq->curr_runnable_sum < 0) { |
| 874 | src_rq->curr_runnable_sum = 0; |
| 875 | WARN_ON(1); |
| 876 | } |
| 877 | |
| 878 | trace_walt_migration_update_sum(src_rq, p); |
| 879 | trace_walt_migration_update_sum(dest_rq, p); |
| 880 | |
| 881 | if (p->state == TASK_WAKING) |
| 882 | double_rq_unlock(src_rq, dest_rq); |
| 883 | } |
| 884 | |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 885 | void walt_init_new_task_load(struct task_struct *p) |
| 886 | { |
| 887 | int i; |
| 888 | u32 init_load_windows = |
| 889 | div64_u64((u64)sysctl_sched_walt_init_task_load_pct * |
| 890 | (u64)walt_ravg_window, 100); |
| 891 | u32 init_load_pct = current->init_load_pct; |
| 892 | |
| 893 | p->init_load_pct = 0; |
| 894 | memset(&p->ravg, 0, sizeof(struct ravg)); |
| 895 | |
| 896 | if (init_load_pct) { |
| 897 | init_load_windows = div64_u64((u64)init_load_pct * |
| 898 | (u64)walt_ravg_window, 100); |
| 899 | } |
| 900 | |
| 901 | p->ravg.demand = init_load_windows; |
| 902 | for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i) |
| 903 | p->ravg.sum_history[i] = init_load_windows; |
| 904 | } |