blob: 10d7f1ba3b6e6845ec4e0f1082daac8675131888 [file] [log] [blame]
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +05301/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/*
14 * Scheduler hook for average runqueue determination
15 */
16#include <linux/module.h>
17#include <linux/percpu.h>
18#include <linux/hrtimer.h>
19#include <linux/sched.h>
20#include <linux/math64.h>
21
22#include "sched.h"
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +053023#include "walt.h"
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070024#include <trace/events/sched.h>
25
26static DEFINE_PER_CPU(u64, nr_prod_sum);
27static DEFINE_PER_CPU(u64, last_time);
28static DEFINE_PER_CPU(u64, nr_big_prod_sum);
29static DEFINE_PER_CPU(u64, nr);
Pavankumar Kondeti005309282017-05-10 15:43:29 +053030static DEFINE_PER_CPU(u64, nr_max);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070031
32static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
33static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
34static s64 last_get_time;
35
Pavankumar Kondeti271e3142017-06-23 14:20:24 +053036static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
37
Pavankumar Kondeti005309282017-05-10 15:43:29 +053038#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070039/**
40 * sched_get_nr_running_avg
41 * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
42 * Returns the avg * 100 to return up to two decimal points
43 * of accuracy.
44 *
45 * Obtains the average nr_running value since the last poll.
46 * This function may not be called concurrently with itself
47 */
Pavankumar Kondeti005309282017-05-10 15:43:29 +053048void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
49 unsigned int *max_nr, unsigned int *big_max_nr)
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070050{
51 int cpu;
52 u64 curr_time = sched_clock();
53 u64 diff = curr_time - last_get_time;
54 u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0;
55
56 *avg = 0;
57 *iowait_avg = 0;
58 *big_avg = 0;
Pavankumar Kondeti005309282017-05-10 15:43:29 +053059 *max_nr = 0;
60 *big_max_nr = 0;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070061
62 if (!diff)
63 return;
64
65 /* read and reset nr_running counts */
66 for_each_possible_cpu(cpu) {
67 unsigned long flags;
68
69 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
70 curr_time = sched_clock();
Pavankumar Kondetie09b8292016-09-28 12:21:03 +053071 diff = curr_time - per_cpu(last_time, cpu);
72 BUG_ON((s64)diff < 0);
73
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070074 tmp_avg += per_cpu(nr_prod_sum, cpu);
Pavankumar Kondetie09b8292016-09-28 12:21:03 +053075 tmp_avg += per_cpu(nr, cpu) * diff;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070076
77 tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
Pavankumar Kondetie09b8292016-09-28 12:21:03 +053078 tmp_big_avg += nr_eligible_big_tasks(cpu) * diff;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070079
80 tmp_iowait += per_cpu(iowait_prod_sum, cpu);
Pavankumar Kondetie09b8292016-09-28 12:21:03 +053081 tmp_iowait += nr_iowait_cpu(cpu) * diff;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070082
83 per_cpu(last_time, cpu) = curr_time;
84
85 per_cpu(nr_prod_sum, cpu) = 0;
86 per_cpu(nr_big_prod_sum, cpu) = 0;
87 per_cpu(iowait_prod_sum, cpu) = 0;
88
Pavankumar Kondeti005309282017-05-10 15:43:29 +053089 if (*max_nr < per_cpu(nr_max, cpu))
90 *max_nr = per_cpu(nr_max, cpu);
91
92 if (is_max_capacity_cpu(cpu)) {
93 if (*big_max_nr < per_cpu(nr_max, cpu))
94 *big_max_nr = per_cpu(nr_max, cpu);
95 }
96
97 per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070098 spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
99 }
100
101 diff = curr_time - last_get_time;
102 last_get_time = curr_time;
103
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530104 /*
105 * Any task running on BIG cluster and BIG tasks running on little
106 * cluster contributes to big_avg. Small or medium tasks can also
107 * run on BIG cluster when co-location and scheduler boost features
108 * are activated. We don't want these tasks to downmigrate to little
109 * cluster when BIG CPUs are available but isolated. Round up the
110 * average values so that core_ctl aggressively unisolate BIG CPUs.
111 */
112 *avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
113 *big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
114 *iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700115
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530116 trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
117 *max_nr, *big_max_nr);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700118
119 BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
120 pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
121 __func__, *avg, *big_avg, *iowait_avg);
122}
123EXPORT_SYMBOL(sched_get_nr_running_avg);
124
Pavankumar Kondeti271e3142017-06-23 14:20:24 +0530125#define BUSY_NR_RUN 3
Dilip Gudlurc051c742017-10-04 15:18:42 -0700126#define BUSY_LOAD_FACTOR 10
Pavankumar Kondeti271e3142017-06-23 14:20:24 +0530127static inline void update_last_busy_time(int cpu, bool dequeue,
128 unsigned long prev_nr_run, u64 curr_time)
129{
130 bool nr_run_trigger = false, load_trigger = false;
131
132 if (!hmp_capable() || is_min_capacity_cpu(cpu))
133 return;
134
135 if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
136 nr_run_trigger = true;
137
138 if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) >
139 capacity_orig_of(cpu))
140 load_trigger = true;
141
142 if (nr_run_trigger || load_trigger)
143 atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
144}
145
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700146/**
147 * sched_update_nr_prod
148 * @cpu: The core id of the nr running driver.
149 * @delta: Adjust nr by 'delta' amount
150 * @inc: Whether we are increasing or decreasing the count
151 * @return: N/A
152 *
153 * Update average with latest nr_running value for CPU
154 */
155void sched_update_nr_prod(int cpu, long delta, bool inc)
156{
Pavankumar Kondetie09b8292016-09-28 12:21:03 +0530157 u64 diff;
158 u64 curr_time;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700159 unsigned long flags, nr_running;
160
161 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
162 nr_running = per_cpu(nr, cpu);
163 curr_time = sched_clock();
164 diff = curr_time - per_cpu(last_time, cpu);
Pavankumar Kondetie09b8292016-09-28 12:21:03 +0530165 BUG_ON((s64)diff < 0);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700166 per_cpu(last_time, cpu) = curr_time;
167 per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
168
169 BUG_ON((s64)per_cpu(nr, cpu) < 0);
170
Pavankumar Kondeti005309282017-05-10 15:43:29 +0530171 if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
172 per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
173
Pavankumar Kondeti271e3142017-06-23 14:20:24 +0530174 update_last_busy_time(cpu, !inc, nr_running, curr_time);
175
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700176 per_cpu(nr_prod_sum, cpu) += nr_running * diff;
177 per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
178 per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
179 spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
180}
181EXPORT_SYMBOL(sched_update_nr_prod);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530182
183/*
184 * Returns the CPU utilization % in the last window.
185 *
186 */
187unsigned int sched_get_cpu_util(int cpu)
188{
189 struct rq *rq = cpu_rq(cpu);
Pavankumar Kondetid4314f02017-07-07 10:02:19 +0530190 u64 util;
191 unsigned long capacity, flags;
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530192 unsigned int busy;
193
194 raw_spin_lock_irqsave(&rq->lock, flags);
195
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530196 util = rq->cfs.avg.util_avg;
197 capacity = capacity_orig_of(cpu);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530198
199#ifdef CONFIG_SCHED_WALT
200 if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
201 util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
202 util = div64_u64(util,
203 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
204 }
205#endif
206 raw_spin_unlock_irqrestore(&rq->lock, flags);
207
208 util = (util >= capacity) ? capacity : util;
Maria Yu12a64232017-09-26 16:31:35 +0800209 busy = div64_ul((util * 100), capacity);
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530210 return busy;
211}
Pavankumar Kondeti271e3142017-06-23 14:20:24 +0530212
213u64 sched_get_cpu_last_busy_time(int cpu)
214{
215 return atomic64_read(&per_cpu(last_busy_time, cpu));
216}