blob: 0162ddedcd68e99940ffe4d60908233c6beaea8a [file] [log] [blame]
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07001/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 *
14 * Window Assisted Load Tracking (WALT) implementation credits:
15 * Srivatsa Vaddagiri, Steve Muckle, Syed Rameez Mustafa, Joonwoo Park,
16 * Pavan Kumar Kondeti, Olav Haugan
17 *
18 * 2016-03-06: Integration with EAS/refactoring by Vikram Mulukutla
19 * and Todd Kjos
20 */
21
Abhilash Kesavan3861f0b2017-09-11 19:07:44 +053022#include <linux/acpi.h>
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070023#include <linux/syscore_ops.h>
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070024#include <trace/events/sched.h>
25#include "sched.h"
26#include "walt.h"
27
28#define WINDOW_STATS_RECENT 0
29#define WINDOW_STATS_MAX 1
30#define WINDOW_STATS_MAX_RECENT_AVG 2
31#define WINDOW_STATS_AVG 3
32#define WINDOW_STATS_INVALID_POLICY 4
33
34#define EXITING_TASK_MARKER 0xdeaddead
35
36static __read_mostly unsigned int walt_ravg_hist_size = 5;
37static __read_mostly unsigned int walt_window_stats_policy =
38 WINDOW_STATS_MAX_RECENT_AVG;
39static __read_mostly unsigned int walt_account_wait_time = 1;
40static __read_mostly unsigned int walt_freq_account_wait_time = 0;
41static __read_mostly unsigned int walt_io_is_busy = 0;
42
43unsigned int sysctl_sched_walt_init_task_load_pct = 15;
44
Vikram Mulukutla44310bf2017-08-10 17:26:20 -070045/* true -> use PELT based load stats, false -> use window-based load stats */
46bool __read_mostly walt_disabled = false;
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070047
Joonwoo Park578db5d2017-06-01 10:59:12 -070048/*
Vikram Mulukutla44310bf2017-08-10 17:26:20 -070049 * Window size (in ns). Adjust for the tick size so that the window
50 * rollover occurs just before the tick boundary.
Joonwoo Park578db5d2017-06-01 10:59:12 -070051 */
Vikram Mulukutla44310bf2017-08-10 17:26:20 -070052__read_mostly unsigned int walt_ravg_window =
53 (20000000 / TICK_NSEC) * TICK_NSEC;
54#define MIN_SCHED_RAVG_WINDOW ((10000000 / TICK_NSEC) * TICK_NSEC)
55#define MAX_SCHED_RAVG_WINDOW ((1000000000 / TICK_NSEC) * TICK_NSEC)
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070056
57static unsigned int sync_cpu;
58static ktime_t ktime_last;
Todd Poynor932dcee2017-04-10 18:31:28 -070059static __read_mostly bool walt_ktime_suspended;
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070060
61static unsigned int task_load(struct task_struct *p)
62{
63 return p->ravg.demand;
64}
65
Joonwoo Park7f17fff2017-02-03 11:15:31 -080066static inline void fixup_cum_window_demand(struct rq *rq, s64 delta)
67{
68 rq->cum_window_demand += delta;
69 if (unlikely((s64)rq->cum_window_demand < 0))
70 rq->cum_window_demand = 0;
71}
72
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070073void
74walt_inc_cumulative_runnable_avg(struct rq *rq,
75 struct task_struct *p)
76{
77 rq->cumulative_runnable_avg += p->ravg.demand;
Joonwoo Park7f17fff2017-02-03 11:15:31 -080078
79 /*
80 * Add a task's contribution to the cumulative window demand when
81 *
82 * (1) task is enqueued with on_rq = 1 i.e migration,
83 * prio/cgroup/class change.
84 * (2) task is waking for the first time in this window.
85 */
86 if (p->on_rq || (p->last_sleep_ts < rq->window_start))
87 fixup_cum_window_demand(rq, p->ravg.demand);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -070088}
89
90void
91walt_dec_cumulative_runnable_avg(struct rq *rq,
92 struct task_struct *p)
93{
94 rq->cumulative_runnable_avg -= p->ravg.demand;
95 BUG_ON((s64)rq->cumulative_runnable_avg < 0);
Joonwoo Park7f17fff2017-02-03 11:15:31 -080096
97 /*
98 * on_rq will be 1 for sleeping tasks. So check if the task
99 * is migrating or dequeuing in RUNNING state to change the
100 * prio/cgroup/class.
101 */
102 if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
103 fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700104}
105
106static void
107fixup_cumulative_runnable_avg(struct rq *rq,
Joonwoo Park8b1a1ce2017-05-26 11:19:36 -0700108 struct task_struct *p, u64 new_task_load)
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700109{
Joonwoo Park8b1a1ce2017-05-26 11:19:36 -0700110 s64 task_load_delta = (s64)new_task_load - task_load(p);
111
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700112 rq->cumulative_runnable_avg += task_load_delta;
113 if ((s64)rq->cumulative_runnable_avg < 0)
114 panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
115 task_load_delta, task_load(p));
Joonwoo Park7f17fff2017-02-03 11:15:31 -0800116
117 fixup_cum_window_demand(rq, task_load_delta);
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700118}
119
120u64 walt_ktime_clock(void)
121{
122 if (unlikely(walt_ktime_suspended))
123 return ktime_to_ns(ktime_last);
124 return ktime_get_ns();
125}
126
127static void walt_resume(void)
128{
129 walt_ktime_suspended = false;
130}
131
132static int walt_suspend(void)
133{
134 ktime_last = ktime_get();
135 walt_ktime_suspended = true;
136 return 0;
137}
138
139static struct syscore_ops walt_syscore_ops = {
140 .resume = walt_resume,
141 .suspend = walt_suspend
142};
143
144static int __init walt_init_ops(void)
145{
146 register_syscore_ops(&walt_syscore_ops);
147 return 0;
148}
149late_initcall(walt_init_ops);
150
Satya Durga Srinivasu Prabhalaa56900fa2017-11-08 12:13:47 -0800151#ifdef CONFIG_CFS_BANDWIDTH
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700152void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
153 struct task_struct *p)
154{
155 cfs_rq->cumulative_runnable_avg += p->ravg.demand;
156}
157
158void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq,
159 struct task_struct *p)
160{
161 cfs_rq->cumulative_runnable_avg -= p->ravg.demand;
162}
Satya Durga Srinivasu Prabhalaa56900fa2017-11-08 12:13:47 -0800163#endif
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700164
165static int exiting_task(struct task_struct *p)
166{
167 if (p->flags & PF_EXITING) {
168 if (p->ravg.sum_history[0] != EXITING_TASK_MARKER) {
169 p->ravg.sum_history[0] = EXITING_TASK_MARKER;
170 }
171 return 1;
172 }
173 return 0;
174}
175
176static int __init set_walt_ravg_window(char *str)
177{
Vikram Mulukutla44310bf2017-08-10 17:26:20 -0700178 unsigned int adj_window;
179 bool no_walt = walt_disabled;
180
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700181 get_option(&str, &walt_ravg_window);
182
Vikram Mulukutla44310bf2017-08-10 17:26:20 -0700183 /* Adjust for CONFIG_HZ */
184 adj_window = (walt_ravg_window / TICK_NSEC) * TICK_NSEC;
185
186 /* Warn if we're a bit too far away from the expected window size */
187 WARN(adj_window < walt_ravg_window - NSEC_PER_MSEC,
188 "tick-adjusted window size %u, original was %u\n", adj_window,
189 walt_ravg_window);
190
191 walt_ravg_window = adj_window;
192
193 walt_disabled = walt_disabled ||
194 (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
195 walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
196
197 WARN(!no_walt && walt_disabled,
198 "invalid window size, disabling WALT\n");
199
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700200 return 0;
201}
202
203early_param("walt_ravg_window", set_walt_ravg_window);
204
205static void
206update_window_start(struct rq *rq, u64 wallclock)
207{
208 s64 delta;
209 int nr_windows;
210
211 delta = wallclock - rq->window_start;
Chris Redpathb5e12072016-07-25 15:13:58 +0100212 /* If the MPM global timer is cleared, set delta as 0 to avoid kernel BUG happening */
213 if (delta < 0) {
Chris Redpath5ea9de82016-09-20 17:00:47 +0100214 delta = 0;
215 WARN_ONCE(1, "WALT wallclock appears to have gone backwards or reset\n");
Chris Redpathb5e12072016-07-25 15:13:58 +0100216 }
217
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700218 if (delta < walt_ravg_window)
219 return;
220
221 nr_windows = div64_u64(delta, walt_ravg_window);
222 rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
Joonwoo Park7f17fff2017-02-03 11:15:31 -0800223
224 rq->cum_window_demand = rq->cumulative_runnable_avg;
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700225}
226
Vikram Mulukutlab28cab92017-08-24 11:38:00 -0700227/*
228 * Translate absolute delta time accounted on a CPU
229 * to a scale where 1024 is the capacity of the most
230 * capable CPU running at FMAX
231 */
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700232static u64 scale_exec_time(u64 delta, struct rq *rq)
233{
Vikram Mulukutlab28cab92017-08-24 11:38:00 -0700234 unsigned long capcurr = capacity_curr_of(cpu_of(rq));
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700235
Vikram Mulukutlab28cab92017-08-24 11:38:00 -0700236 return (delta * capcurr) >> SCHED_CAPACITY_SHIFT;
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700237}
238
239static int cpu_is_waiting_on_io(struct rq *rq)
240{
241 if (!walt_io_is_busy)
242 return 0;
243
244 return atomic_read(&rq->nr_iowait);
245}
246
Srinath Sridharan3a73c962016-07-22 13:21:15 +0100247void walt_account_irqtime(int cpu, struct task_struct *curr,
248 u64 delta, u64 wallclock)
249{
250 struct rq *rq = cpu_rq(cpu);
251 unsigned long flags, nr_windows;
252 u64 cur_jiffies_ts;
253
254 raw_spin_lock_irqsave(&rq->lock, flags);
255
256 /*
257 * cputime (wallclock) uses sched_clock so use the same here for
258 * consistency.
259 */
260 delta += sched_clock() - wallclock;
261 cur_jiffies_ts = get_jiffies_64();
262
263 if (is_idle_task(curr))
264 walt_update_task_ravg(curr, rq, IRQ_UPDATE, walt_ktime_clock(),
265 delta);
266
267 nr_windows = cur_jiffies_ts - rq->irqload_ts;
268
269 if (nr_windows) {
270 if (nr_windows < 10) {
271 /* Decay CPU's irqload by 3/4 for each window. */
272 rq->avg_irqload *= (3 * nr_windows);
273 rq->avg_irqload = div64_u64(rq->avg_irqload,
274 4 * nr_windows);
275 } else {
276 rq->avg_irqload = 0;
277 }
278 rq->avg_irqload += rq->cur_irqload;
279 rq->cur_irqload = 0;
280 }
281
282 rq->cur_irqload += delta;
283 rq->irqload_ts = cur_jiffies_ts;
284 raw_spin_unlock_irqrestore(&rq->lock, flags);
285}
286
287
288#define WALT_HIGH_IRQ_TIMEOUT 3
289
290u64 walt_irqload(int cpu) {
291 struct rq *rq = cpu_rq(cpu);
292 s64 delta;
293 delta = get_jiffies_64() - rq->irqload_ts;
294
295 /*
296 * Current context can be preempted by irq and rq->irqload_ts can be
297 * updated by irq context so that delta can be negative.
298 * But this is okay and we can safely return as this means there
299 * was recent irq occurrence.
300 */
301
302 if (delta < WALT_HIGH_IRQ_TIMEOUT)
303 return rq->avg_irqload;
304 else
305 return 0;
306}
307
308int walt_cpu_high_irqload(int cpu) {
309 return walt_irqload(cpu) >= sysctl_sched_walt_cpu_high_irqload;
310}
311
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700312static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
313 u64 irqtime, int event)
314{
315 if (is_idle_task(p)) {
316 /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
317 if (event == PICK_NEXT_TASK)
318 return 0;
319
320 /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
321 return irqtime || cpu_is_waiting_on_io(rq);
322 }
323
324 if (event == TASK_WAKE)
325 return 0;
326
327 if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
328 event == TASK_UPDATE)
329 return 1;
330
331 /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
332 return walt_freq_account_wait_time;
333}
334
335/*
336 * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
337 */
338static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
339 int event, u64 wallclock, u64 irqtime)
340{
341 int new_window, nr_full_windows = 0;
342 int p_is_curr_task = (p == rq->curr);
343 u64 mark_start = p->ravg.mark_start;
344 u64 window_start = rq->window_start;
345 u32 window_size = walt_ravg_window;
346 u64 delta;
347
348 new_window = mark_start < window_start;
349 if (new_window) {
350 nr_full_windows = div64_u64((window_start - mark_start),
351 window_size);
352 if (p->ravg.active_windows < USHRT_MAX)
353 p->ravg.active_windows++;
354 }
355
356 /* Handle per-task window rollover. We don't care about the idle
357 * task or exiting tasks. */
358 if (new_window && !is_idle_task(p) && !exiting_task(p)) {
359 u32 curr_window = 0;
360
361 if (!nr_full_windows)
362 curr_window = p->ravg.curr_window;
363
364 p->ravg.prev_window = curr_window;
365 p->ravg.curr_window = 0;
366 }
367
368 if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
369 /* account_busy_for_cpu_time() = 0, so no update to the
370 * task's current window needs to be made. This could be
371 * for example
372 *
373 * - a wakeup event on a task within the current
374 * window (!new_window below, no action required),
375 * - switching to a new task from idle (PICK_NEXT_TASK)
376 * in a new window where irqtime is 0 and we aren't
377 * waiting on IO */
378
379 if (!new_window)
380 return;
381
382 /* A new window has started. The RQ demand must be rolled
383 * over if p is the current task. */
384 if (p_is_curr_task) {
385 u64 prev_sum = 0;
386
387 /* p is either idle task or an exiting task */
388 if (!nr_full_windows) {
389 prev_sum = rq->curr_runnable_sum;
390 }
391
392 rq->prev_runnable_sum = prev_sum;
393 rq->curr_runnable_sum = 0;
394 }
395
396 return;
397 }
398
399 if (!new_window) {
400 /* account_busy_for_cpu_time() = 1 so busy time needs
401 * to be accounted to the current window. No rollover
402 * since we didn't start a new window. An example of this is
403 * when a task starts execution and then sleeps within the
404 * same window. */
405
406 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
407 delta = wallclock - mark_start;
408 else
409 delta = irqtime;
410 delta = scale_exec_time(delta, rq);
411 rq->curr_runnable_sum += delta;
412 if (!is_idle_task(p) && !exiting_task(p))
413 p->ravg.curr_window += delta;
414
415 return;
416 }
417
418 if (!p_is_curr_task) {
419 /* account_busy_for_cpu_time() = 1 so busy time needs
420 * to be accounted to the current window. A new window
421 * has also started, but p is not the current task, so the
422 * window is not rolled over - just split up and account
423 * as necessary into curr and prev. The window is only
424 * rolled over when a new window is processed for the current
425 * task.
426 *
427 * Irqtime can't be accounted by a task that isn't the
428 * currently running task. */
429
430 if (!nr_full_windows) {
431 /* A full window hasn't elapsed, account partial
432 * contribution to previous completed window. */
433 delta = scale_exec_time(window_start - mark_start, rq);
434 if (!exiting_task(p))
435 p->ravg.prev_window += delta;
436 } else {
437 /* Since at least one full window has elapsed,
438 * the contribution to the previous window is the
439 * full window (window_size). */
440 delta = scale_exec_time(window_size, rq);
441 if (!exiting_task(p))
442 p->ravg.prev_window = delta;
443 }
444 rq->prev_runnable_sum += delta;
445
446 /* Account piece of busy time in the current window. */
447 delta = scale_exec_time(wallclock - window_start, rq);
448 rq->curr_runnable_sum += delta;
449 if (!exiting_task(p))
450 p->ravg.curr_window = delta;
451
452 return;
453 }
454
455 if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
456 /* account_busy_for_cpu_time() = 1 so busy time needs
457 * to be accounted to the current window. A new window
458 * has started and p is the current task so rollover is
459 * needed. If any of these three above conditions are true
460 * then this busy time can't be accounted as irqtime.
461 *
462 * Busy time for the idle task or exiting tasks need not
463 * be accounted.
464 *
465 * An example of this would be a task that starts execution
466 * and then sleeps once a new window has begun. */
467
468 if (!nr_full_windows) {
469 /* A full window hasn't elapsed, account partial
470 * contribution to previous completed window. */
471 delta = scale_exec_time(window_start - mark_start, rq);
472 if (!is_idle_task(p) && !exiting_task(p))
473 p->ravg.prev_window += delta;
474
475 delta += rq->curr_runnable_sum;
476 } else {
477 /* Since at least one full window has elapsed,
478 * the contribution to the previous window is the
479 * full window (window_size). */
480 delta = scale_exec_time(window_size, rq);
481 if (!is_idle_task(p) && !exiting_task(p))
482 p->ravg.prev_window = delta;
483
484 }
485 /*
486 * Rollover for normal runnable sum is done here by overwriting
487 * the values in prev_runnable_sum and curr_runnable_sum.
488 * Rollover for new task runnable sum has completed by previous
489 * if-else statement.
490 */
491 rq->prev_runnable_sum = delta;
492
493 /* Account piece of busy time in the current window. */
494 delta = scale_exec_time(wallclock - window_start, rq);
495 rq->curr_runnable_sum = delta;
496 if (!is_idle_task(p) && !exiting_task(p))
497 p->ravg.curr_window = delta;
498
499 return;
500 }
501
502 if (irqtime) {
503 /* account_busy_for_cpu_time() = 1 so busy time needs
504 * to be accounted to the current window. A new window
505 * has started and p is the current task so rollover is
506 * needed. The current task must be the idle task because
507 * irqtime is not accounted for any other task.
508 *
509 * Irqtime will be accounted each time we process IRQ activity
510 * after a period of idleness, so we know the IRQ busy time
511 * started at wallclock - irqtime. */
512
513 BUG_ON(!is_idle_task(p));
514 mark_start = wallclock - irqtime;
515
516 /* Roll window over. If IRQ busy time was just in the current
517 * window then that is all that need be accounted. */
518 rq->prev_runnable_sum = rq->curr_runnable_sum;
519 if (mark_start > window_start) {
520 rq->curr_runnable_sum = scale_exec_time(irqtime, rq);
521 return;
522 }
523
524 /* The IRQ busy time spanned multiple windows. Process the
525 * busy time preceding the current window start first. */
526 delta = window_start - mark_start;
527 if (delta > window_size)
528 delta = window_size;
529 delta = scale_exec_time(delta, rq);
530 rq->prev_runnable_sum += delta;
531
532 /* Process the remaining IRQ busy time in the current window. */
533 delta = wallclock - window_start;
534 rq->curr_runnable_sum = scale_exec_time(delta, rq);
535
536 return;
537 }
538
539 BUG();
540}
541
542static int account_busy_for_task_demand(struct task_struct *p, int event)
543{
544 /* No need to bother updating task demand for exiting tasks
545 * or the idle task. */
546 if (exiting_task(p) || is_idle_task(p))
547 return 0;
548
549 /* When a task is waking up it is completing a segment of non-busy
550 * time. Likewise, if wait time is not treated as busy time, then
551 * when a task begins to run or is migrated, it is not running and
552 * is completing a segment of non-busy time. */
553 if (event == TASK_WAKE || (!walt_account_wait_time &&
554 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
555 return 0;
556
557 return 1;
558}
559
560/*
561 * Called when new window is starting for a task, to record cpu usage over
562 * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
563 * when, say, a real-time task runs without preemption for several windows at a
564 * stretch.
565 */
566static void update_history(struct rq *rq, struct task_struct *p,
567 u32 runtime, int samples, int event)
568{
569 u32 *hist = &p->ravg.sum_history[0];
570 int ridx, widx;
571 u32 max = 0, avg, demand;
572 u64 sum = 0;
573
574 /* Ignore windows where task had no activity */
575 if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
576 goto done;
577
578 /* Push new 'runtime' value onto stack */
579 widx = walt_ravg_hist_size - 1;
580 ridx = widx - samples;
581 for (; ridx >= 0; --widx, --ridx) {
582 hist[widx] = hist[ridx];
583 sum += hist[widx];
584 if (hist[widx] > max)
585 max = hist[widx];
586 }
587
588 for (widx = 0; widx < samples && widx < walt_ravg_hist_size; widx++) {
589 hist[widx] = runtime;
590 sum += hist[widx];
591 if (hist[widx] > max)
592 max = hist[widx];
593 }
594
595 p->ravg.sum = 0;
596
597 if (walt_window_stats_policy == WINDOW_STATS_RECENT) {
598 demand = runtime;
599 } else if (walt_window_stats_policy == WINDOW_STATS_MAX) {
600 demand = max;
601 } else {
602 avg = div64_u64(sum, walt_ravg_hist_size);
603 if (walt_window_stats_policy == WINDOW_STATS_AVG)
604 demand = avg;
605 else
606 demand = max(avg, runtime);
607 }
608
609 /*
610 * A throttled deadline sched class task gets dequeued without
611 * changing p->on_rq. Since the dequeue decrements hmp stats
612 * avoid decrementing it here again.
Joonwoo Park7f17fff2017-02-03 11:15:31 -0800613 *
614 * When window is rolled over, the cumulative window demand
615 * is reset to the cumulative runnable average (contribution from
616 * the tasks on the runqueue). If the current task is dequeued
617 * already, it's demand is not included in the cumulative runnable
618 * average. So add the task demand separately to cumulative window
619 * demand.
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700620 */
Joonwoo Park7f17fff2017-02-03 11:15:31 -0800621 if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
622 if (task_on_rq_queued(p))
623 fixup_cumulative_runnable_avg(rq, p, demand);
624 else if (rq->curr == p)
625 fixup_cum_window_demand(rq, demand);
626 }
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700627
628 p->ravg.demand = demand;
629
630done:
631 trace_walt_update_history(rq, p, runtime, samples, event);
632 return;
633}
634
635static void add_to_task_demand(struct rq *rq, struct task_struct *p,
636 u64 delta)
637{
638 delta = scale_exec_time(delta, rq);
639 p->ravg.sum += delta;
640 if (unlikely(p->ravg.sum > walt_ravg_window))
641 p->ravg.sum = walt_ravg_window;
642}
643
644/*
645 * Account cpu demand of task and/or update task's cpu demand history
646 *
647 * ms = p->ravg.mark_start;
648 * wc = wallclock
649 * ws = rq->window_start
650 *
651 * Three possibilities:
652 *
653 * a) Task event is contained within one window.
654 * window_start < mark_start < wallclock
655 *
656 * ws ms wc
657 * | | |
658 * V V V
659 * |---------------|
660 *
661 * In this case, p->ravg.sum is updated *iff* event is appropriate
662 * (ex: event == PUT_PREV_TASK)
663 *
664 * b) Task event spans two windows.
665 * mark_start < window_start < wallclock
666 *
667 * ms ws wc
668 * | | |
669 * V V V
670 * -----|-------------------
671 *
672 * In this case, p->ravg.sum is updated with (ws - ms) *iff* event
673 * is appropriate, then a new window sample is recorded followed
674 * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
675 *
676 * c) Task event spans more than two windows.
677 *
678 * ms ws_tmp ws wc
679 * | | | |
680 * V V V V
681 * ---|-------|-------|-------|-------|------
682 * | |
683 * |<------ nr_full_windows ------>|
684 *
685 * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
686 * event is appropriate, window sample of p->ravg.sum is recorded,
687 * 'nr_full_window' samples of window_size is also recorded *iff*
688 * event is appropriate and finally p->ravg.sum is set to (wc - ws)
689 * *iff* event is appropriate.
690 *
691 * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
692 * depends on it!
693 */
694static void update_task_demand(struct task_struct *p, struct rq *rq,
695 int event, u64 wallclock)
696{
697 u64 mark_start = p->ravg.mark_start;
698 u64 delta, window_start = rq->window_start;
699 int new_window, nr_full_windows;
700 u32 window_size = walt_ravg_window;
701
702 new_window = mark_start < window_start;
703 if (!account_busy_for_task_demand(p, event)) {
704 if (new_window)
705 /* If the time accounted isn't being accounted as
706 * busy time, and a new window started, only the
707 * previous window need be closed out with the
708 * pre-existing demand. Multiple windows may have
709 * elapsed, but since empty windows are dropped,
710 * it is not necessary to account those. */
711 update_history(rq, p, p->ravg.sum, 1, event);
712 return;
713 }
714
715 if (!new_window) {
716 /* The simple case - busy time contained within the existing
717 * window. */
718 add_to_task_demand(rq, p, wallclock - mark_start);
719 return;
720 }
721
722 /* Busy time spans at least two windows. Temporarily rewind
723 * window_start to first window boundary after mark_start. */
724 delta = window_start - mark_start;
725 nr_full_windows = div64_u64(delta, window_size);
726 window_start -= (u64)nr_full_windows * (u64)window_size;
727
728 /* Process (window_start - mark_start) first */
729 add_to_task_demand(rq, p, window_start - mark_start);
730
731 /* Push new sample(s) into task's demand history */
732 update_history(rq, p, p->ravg.sum, 1, event);
733 if (nr_full_windows)
734 update_history(rq, p, scale_exec_time(window_size, rq),
735 nr_full_windows, event);
736
737 /* Roll window_start back to current to process any remainder
738 * in current window. */
739 window_start += (u64)nr_full_windows * (u64)window_size;
740
741 /* Process (wallclock - window_start) next */
742 mark_start = window_start;
743 add_to_task_demand(rq, p, wallclock - mark_start);
744}
745
746/* Reflect task activity on its demand and cpu's busy time statistics */
747void walt_update_task_ravg(struct task_struct *p, struct rq *rq,
748 int event, u64 wallclock, u64 irqtime)
749{
750 if (walt_disabled || !rq->window_start)
751 return;
752
753 lockdep_assert_held(&rq->lock);
754
755 update_window_start(rq, wallclock);
756
757 if (!p->ravg.mark_start)
758 goto done;
759
760 update_task_demand(p, rq, event, wallclock);
761 update_cpu_busy_time(p, rq, event, wallclock, irqtime);
762
763done:
764 trace_walt_update_task_ravg(p, rq, event, wallclock, irqtime);
765
766 p->ravg.mark_start = wallclock;
767}
768
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700769static void reset_task_stats(struct task_struct *p)
770{
771 u32 sum = 0;
772
773 if (exiting_task(p))
774 sum = EXITING_TASK_MARKER;
775
776 memset(&p->ravg, 0, sizeof(struct ravg));
777 /* Retain EXITING_TASK marker */
778 p->ravg.sum_history[0] = sum;
779}
780
781void walt_mark_task_starting(struct task_struct *p)
782{
783 u64 wallclock;
784 struct rq *rq = task_rq(p);
785
786 if (!rq->window_start) {
787 reset_task_stats(p);
788 return;
789 }
790
791 wallclock = walt_ktime_clock();
792 p->ravg.mark_start = wallclock;
793}
794
795void walt_set_window_start(struct rq *rq)
796{
797 int cpu = cpu_of(rq);
798 struct rq *sync_rq = cpu_rq(sync_cpu);
799
Joonwoo Park98a5fa32017-05-16 11:13:00 -0700800 if (likely(rq->window_start))
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700801 return;
802
803 if (cpu == sync_cpu) {
Joonwoo Park98a5fa32017-05-16 11:13:00 -0700804 rq->window_start = 1;
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700805 } else {
806 raw_spin_unlock(&rq->lock);
807 double_rq_lock(rq, sync_rq);
808 rq->window_start = cpu_rq(sync_cpu)->window_start;
809 rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
810 raw_spin_unlock(&sync_rq->lock);
811 }
812
813 rq->curr->ravg.mark_start = rq->window_start;
814}
815
816void walt_migrate_sync_cpu(int cpu)
817{
818 if (cpu == sync_cpu)
819 sync_cpu = smp_processor_id();
820}
821
822void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
823{
824 struct rq *src_rq = task_rq(p);
825 struct rq *dest_rq = cpu_rq(new_cpu);
826 u64 wallclock;
827
828 if (!p->on_rq && p->state != TASK_WAKING)
829 return;
830
831 if (exiting_task(p)) {
832 return;
833 }
834
835 if (p->state == TASK_WAKING)
836 double_rq_lock(src_rq, dest_rq);
837
838 wallclock = walt_ktime_clock();
839
840 walt_update_task_ravg(task_rq(p)->curr, task_rq(p),
841 TASK_UPDATE, wallclock, 0);
842 walt_update_task_ravg(dest_rq->curr, dest_rq,
843 TASK_UPDATE, wallclock, 0);
844
845 walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
846
Joonwoo Park7f17fff2017-02-03 11:15:31 -0800847 /*
848 * When a task is migrating during the wakeup, adjust
849 * the task's contribution towards cumulative window
850 * demand.
851 */
852 if (p->state == TASK_WAKING &&
853 p->last_sleep_ts >= src_rq->window_start) {
854 fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
855 fixup_cum_window_demand(dest_rq, p->ravg.demand);
856 }
857
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700858 if (p->ravg.curr_window) {
859 src_rq->curr_runnable_sum -= p->ravg.curr_window;
860 dest_rq->curr_runnable_sum += p->ravg.curr_window;
861 }
862
863 if (p->ravg.prev_window) {
864 src_rq->prev_runnable_sum -= p->ravg.prev_window;
865 dest_rq->prev_runnable_sum += p->ravg.prev_window;
866 }
867
868 if ((s64)src_rq->prev_runnable_sum < 0) {
869 src_rq->prev_runnable_sum = 0;
870 WARN_ON(1);
871 }
872 if ((s64)src_rq->curr_runnable_sum < 0) {
873 src_rq->curr_runnable_sum = 0;
874 WARN_ON(1);
875 }
876
877 trace_walt_migration_update_sum(src_rq, p);
878 trace_walt_migration_update_sum(dest_rq, p);
879
880 if (p->state == TASK_WAKING)
881 double_rq_unlock(src_rq, dest_rq);
882}
883
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -0700884void walt_init_new_task_load(struct task_struct *p)
885{
886 int i;
887 u32 init_load_windows =
888 div64_u64((u64)sysctl_sched_walt_init_task_load_pct *
889 (u64)walt_ravg_window, 100);
890 u32 init_load_pct = current->init_load_pct;
891
892 p->init_load_pct = 0;
893 memset(&p->ravg, 0, sizeof(struct ravg));
894
895 if (init_load_pct) {
896 init_load_windows = div64_u64((u64)init_load_pct *
897 (u64)walt_ravg_window, 100);
898 }
899
900 p->ravg.demand = init_load_windows;
901 for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
902 p->ravg.sum_history[i] = init_load_windows;
903}