blob: cfe4f40d58197e4460616e088c467980ea3e441f [file] [log] [blame]
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001/*
2 * CPUFreq governor based on scheduler-provided CPU utilization data.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
Viresh Kumar87ecf322016-05-18 17:55:28 +053012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020014#include <linux/cpufreq.h>
Viresh Kumara231c652016-11-15 13:53:22 +053015#include <linux/kthread.h>
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020016#include <linux/slab.h>
17#include <trace/events/power.h>
Jonathan Avila703e2432017-08-10 13:49:49 -070018#include <linux/sched/sysctl.h>
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020019#include "sched.h"
Juri Lellic6e94382016-12-14 16:10:10 +000020#include "tune.h"
21
Viresh Kumara231c652016-11-15 13:53:22 +053022#define SUGOV_KTHREAD_PRIORITY 50
23
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020024struct sugov_tunables {
25 struct gov_attr_set attr_set;
Steve Mucklea37cf5f2016-11-17 10:48:45 +053026 unsigned int up_rate_limit_us;
27 unsigned int down_rate_limit_us;
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -070028 unsigned int hispeed_load;
Rohit Gupta30249632017-02-02 18:39:07 -080029 unsigned int hispeed_freq;
Saravana Kannand0a7c6b2017-05-23 17:26:32 -070030 bool pl;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020031};
32
33struct sugov_policy {
34 struct cpufreq_policy *policy;
35
36 struct sugov_tunables *tunables;
37 struct list_head tunables_hook;
38
39 raw_spinlock_t update_lock; /* For shared policies */
40 u64 last_freq_update_time;
Steve Mucklea37cf5f2016-11-17 10:48:45 +053041 s64 min_rate_limit_ns;
42 s64 up_rate_delay_ns;
43 s64 down_rate_delay_ns;
Saravana Kannan0f34ee92017-06-28 21:44:14 -070044 u64 last_ws;
45 u64 curr_cycles;
46 u64 last_cyc_update_time;
47 unsigned long avg_cap;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020048 unsigned int next_freq;
Viresh Kumar55a6d462017-03-02 14:03:20 +053049 unsigned int cached_raw_freq;
Rohit Gupta30249632017-02-02 18:39:07 -080050 unsigned long hispeed_util;
51 unsigned long max;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020052
53 /* The next fields are only needed if fast switch cannot be used. */
54 struct irq_work irq_work;
Viresh Kumara231c652016-11-15 13:53:22 +053055 struct kthread_work work;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020056 struct mutex work_lock;
Viresh Kumara231c652016-11-15 13:53:22 +053057 struct kthread_worker worker;
58 struct task_struct *thread;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020059 bool work_in_progress;
60
61 bool need_freq_update;
62};
63
64struct sugov_cpu {
65 struct update_util_data update_util;
66 struct sugov_policy *sg_policy;
67
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +020068 unsigned long iowait_boost;
69 unsigned long iowait_boost_max;
70 u64 last_update;
Steve Muckled7439bc2016-07-13 13:25:26 -070071
Saravana Kannan05649862017-05-04 19:03:53 -070072 struct sched_walt_cpu_load walt_load;
73
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020074 /* The fields below are only needed when sharing a policy. */
75 unsigned long util;
76 unsigned long max;
Rafael J. Wysockic4568722016-08-16 22:14:55 +020077 unsigned int flags;
Vikram Mulukutla857cffa2017-05-04 19:47:06 -070078 unsigned int cpu;
Chris Redpath595ae4a2017-05-25 15:24:58 +010079
80 /* The field below is for single-CPU policies only. */
81#ifdef CONFIG_NO_HZ_COMMON
82 unsigned long saved_idle_calls;
83#endif
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020084};
85
86static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
Saravana Kannan7070a9e2017-08-24 17:02:49 -070087static unsigned int stale_ns;
Rohit Guptaae5c8d22017-09-25 14:23:48 -070088static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020089
90/************************ Governor internals ***********************/
91
92static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
93{
94 s64 delta_ns;
95
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020096 if (unlikely(sg_policy->need_freq_update)) {
97 sg_policy->need_freq_update = false;
98 /*
99 * This happens when limits change, so forget the previous
100 * next_freq value and force an update.
101 */
102 sg_policy->next_freq = UINT_MAX;
103 return true;
104 }
105
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530106 /* No need to recalculate next freq for min_rate_limit_us
107 * at least. However we might still decide to further rate
108 * limit once frequency change direction is decided, according
109 * to the separate rate limits.
110 */
111
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200112 delta_ns = time - sg_policy->last_freq_update_time;
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530113 return delta_ns >= sg_policy->min_rate_limit_ns;
114}
115
116static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
117 unsigned int next_freq)
118{
119 s64 delta_ns;
120
121 delta_ns = time - sg_policy->last_freq_update_time;
122
123 if (next_freq > sg_policy->next_freq &&
124 delta_ns < sg_policy->up_rate_delay_ns)
125 return true;
126
127 if (next_freq < sg_policy->next_freq &&
128 delta_ns < sg_policy->down_rate_delay_ns)
129 return true;
130
131 return false;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200132}
133
134static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
135 unsigned int next_freq)
136{
137 struct cpufreq_policy *policy = sg_policy->policy;
138
Chris Redpath6702ce12017-05-25 15:27:07 +0100139 if (sg_policy->next_freq == next_freq)
140 return;
141
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530142 if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
143 return;
144
Chris Redpath6702ce12017-05-25 15:27:07 +0100145 sg_policy->next_freq = next_freq;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200146 sg_policy->last_freq_update_time = time;
147
148 if (policy->fast_switch_enabled) {
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200149 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
150 if (next_freq == CPUFREQ_ENTRY_INVALID)
151 return;
152
153 policy->cur = next_freq;
154 trace_cpu_frequency(next_freq, smp_processor_id());
Chris Redpath6702ce12017-05-25 15:27:07 +0100155 } else {
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200156 sg_policy->work_in_progress = true;
Maria Yu1d2db9a2019-08-13 17:25:28 +0800157 sched_irq_work_queue(&sg_policy->irq_work);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200158 }
159}
160
Rohit Gupta30249632017-02-02 18:39:07 -0800161#define TARGET_LOAD 80
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200162/**
163 * get_next_freq - Compute a new frequency for a given cpufreq policy.
Viresh Kumare74f7f12017-03-02 14:03:21 +0530164 * @sg_policy: schedutil policy object to compute the new frequency for.
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200165 * @util: Current CPU utilization.
166 * @max: CPU capacity.
167 *
168 * If the utilization is frequency-invariant, choose the new frequency to be
169 * proportional to it, that is
170 *
171 * next_freq = C * max_freq * util / max
172 *
173 * Otherwise, approximate the would-be frequency-invariant utilization by
174 * util_raw * (curr_freq / max_freq) which leads to
175 *
176 * next_freq = C * curr_freq * util_raw / max
177 *
178 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
Steve Muckled7439bc2016-07-13 13:25:26 -0700179 *
180 * The lowest driver-supported frequency which is equal or greater than the raw
181 * next_freq (as calculated above) is returned, subject to policy min/max and
182 * cpufreq driver limitations.
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200183 */
Viresh Kumare74f7f12017-03-02 14:03:21 +0530184static unsigned int get_next_freq(struct sugov_policy *sg_policy,
185 unsigned long util, unsigned long max)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200186{
Steve Muckled7439bc2016-07-13 13:25:26 -0700187 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200188 unsigned int freq = arch_scale_freq_invariant() ?
189 policy->cpuinfo.max_freq : policy->cur;
190
Steve Muckled7439bc2016-07-13 13:25:26 -0700191 freq = (freq + (freq >> 2)) * util / max;
Saravana Kannan2cf86dc2017-10-12 16:54:36 -0700192 trace_sugov_next_freq(policy->cpu, util, max, freq);
Steve Muckled7439bc2016-07-13 13:25:26 -0700193
Viresh Kumar55a6d462017-03-02 14:03:20 +0530194 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
Steve Muckled7439bc2016-07-13 13:25:26 -0700195 return sg_policy->next_freq;
Viresh Kumar55a6d462017-03-02 14:03:20 +0530196 sg_policy->cached_raw_freq = freq;
Steve Muckled7439bc2016-07-13 13:25:26 -0700197 return cpufreq_driver_resolve_freq(policy, freq);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200198}
199
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700200static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200201{
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700202 struct rq *rq = cpu_rq(cpu);
Steve Muckle097cf682016-08-26 11:40:47 -0700203 unsigned long cfs_max;
Saravana Kannan05649862017-05-04 19:03:53 -0700204 struct sugov_cpu *loadcpu = &per_cpu(sugov_cpu, cpu);
Steve Muckle097cf682016-08-26 11:40:47 -0700205
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700206 cfs_max = arch_scale_cpu_capacity(NULL, cpu);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200207
208 *util = min(rq->cfs.avg.util_avg, cfs_max);
209 *max = cfs_max;
Saravana Kannan05649862017-05-04 19:03:53 -0700210
Pavankumar Kondeti12912ba2017-11-24 10:21:46 +0530211 *util = boosted_cpu_util(cpu, &loadcpu->walt_load);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200212}
213
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200214static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
215 unsigned int flags)
216{
217 if (flags & SCHED_CPUFREQ_IOWAIT) {
218 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
219 } else if (sg_cpu->iowait_boost) {
220 s64 delta_ns = time - sg_cpu->last_update;
221
222 /* Clear iowait_boost if the CPU apprears to have been idle. */
223 if (delta_ns > TICK_NSEC)
224 sg_cpu->iowait_boost = 0;
225 }
226}
227
228static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
229 unsigned long *max)
230{
231 unsigned long boost_util = sg_cpu->iowait_boost;
232 unsigned long boost_max = sg_cpu->iowait_boost_max;
233
234 if (!boost_util)
235 return;
236
237 if (*util * boost_max < *max * boost_util) {
238 *util = boost_util;
239 *max = boost_max;
240 }
241 sg_cpu->iowait_boost >>= 1;
242}
243
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700244static unsigned long freq_to_util(struct sugov_policy *sg_policy,
245 unsigned int freq)
246{
247 return mult_frac(sg_policy->max, freq,
248 sg_policy->policy->cpuinfo.max_freq);
249}
250
251#define KHZ 1000
252static void sugov_track_cycles(struct sugov_policy *sg_policy,
253 unsigned int prev_freq,
254 u64 upto)
255{
256 u64 delta_ns, cycles;
Jonathan Avila703e2432017-08-10 13:49:49 -0700257
258 if (unlikely(!sysctl_sched_use_walt_cpu_util))
259 return;
260
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700261 /* Track cycles in current window */
262 delta_ns = upto - sg_policy->last_cyc_update_time;
Maria Yubba987e2017-11-10 19:05:58 +0800263 delta_ns *= prev_freq;
264 do_div(delta_ns, (NSEC_PER_SEC / KHZ));
265 cycles = delta_ns;
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700266 sg_policy->curr_cycles += cycles;
267 sg_policy->last_cyc_update_time = upto;
268}
269
270static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
271 unsigned int prev_freq)
272{
273 u64 last_ws = sg_policy->last_ws;
274 unsigned int avg_freq;
275
Jonathan Avila703e2432017-08-10 13:49:49 -0700276 if (unlikely(!sysctl_sched_use_walt_cpu_util))
277 return;
278
Santosh Mardi0ce24ae2018-03-21 12:00:15 +0530279 BUG_ON(curr_ws < last_ws);
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700280 if (curr_ws <= last_ws)
281 return;
282
283 /* If we skipped some windows */
284 if (curr_ws > (last_ws + sched_ravg_window)) {
285 avg_freq = prev_freq;
286 /* Reset tracking history */
287 sg_policy->last_cyc_update_time = curr_ws;
288 } else {
289 sugov_track_cycles(sg_policy, prev_freq, curr_ws);
290 avg_freq = sg_policy->curr_cycles;
291 avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
292 }
293 sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
294 sg_policy->curr_cycles = 0;
295 sg_policy->last_ws = curr_ws;
296}
297
Rohit Gupta30249632017-02-02 18:39:07 -0800298#define NL_RATIO 75
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700299#define DEFAULT_HISPEED_LOAD 90
Rohit Gupta30249632017-02-02 18:39:07 -0800300static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
301 unsigned long *max)
302{
303 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rohit Gupta30249632017-02-02 18:39:07 -0800304 bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
305 unsigned long nl = sg_cpu->walt_load.nl;
306 unsigned long cpu_util = sg_cpu->util;
307 bool is_hiload;
308
Jonathan Avila703e2432017-08-10 13:49:49 -0700309 if (unlikely(!sysctl_sched_use_walt_cpu_util))
310 return;
311
Saravana Kannan36faa282017-06-28 21:44:56 -0700312 is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700313 sg_policy->tunables->hispeed_load,
Rohit Gupta30249632017-02-02 18:39:07 -0800314 100));
315
Rohit Gupta5573e732017-06-21 10:08:07 -0700316 if (is_hiload && !is_migration)
Rohit Gupta30249632017-02-02 18:39:07 -0800317 *util = max(*util, sg_policy->hispeed_util);
Rohit Gupta30249632017-02-02 18:39:07 -0800318
319 if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
320 *util = *max;
321
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700322 if (sg_policy->tunables->pl)
323 *util = max(*util, sg_cpu->walt_load.pl);
Rohit Gupta30249632017-02-02 18:39:07 -0800324}
325
Chris Redpath595ae4a2017-05-25 15:24:58 +0100326#ifdef CONFIG_NO_HZ_COMMON
327static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
328{
329 unsigned long idle_calls = tick_nohz_get_idle_calls();
330 bool ret = idle_calls == sg_cpu->saved_idle_calls;
331
332 sg_cpu->saved_idle_calls = idle_calls;
333 return ret;
334}
335#else
336static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
337#endif /* CONFIG_NO_HZ_COMMON */
338
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200339static void sugov_update_single(struct update_util_data *hook, u64 time,
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200340 unsigned int flags)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200341{
342 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
343 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
344 struct cpufreq_policy *policy = sg_policy->policy;
Saravana Kannanc703c032017-10-12 17:20:33 -0700345 unsigned long util, max, hs_util;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200346 unsigned int next_f;
Chris Redpath595ae4a2017-05-25 15:24:58 +0100347 bool busy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200348
Saravana Kannanc703c032017-10-12 17:20:33 -0700349 flags &= ~SCHED_CPUFREQ_RT_DL;
350
Rohit Guptaa07c4352017-10-18 16:48:34 -0700351 if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
352 return;
353
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200354 sugov_set_iowait_boost(sg_cpu, time, flags);
355 sg_cpu->last_update = time;
356
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200357 if (!sugov_should_update_freq(sg_policy, time))
358 return;
359
Chris Redpath595ae4a2017-05-25 15:24:58 +0100360 busy = sugov_cpu_is_busy(sg_cpu);
Saravana Kannan76ef1712017-05-04 19:44:36 -0700361
Saravana Kannanc703c032017-10-12 17:20:33 -0700362 raw_spin_lock(&sg_policy->update_lock);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200363 if (flags & SCHED_CPUFREQ_RT_DL) {
364 next_f = policy->cpuinfo.max_freq;
365 } else {
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700366 sugov_get_util(&util, &max, sg_cpu->cpu);
Saravana Kannanc703c032017-10-12 17:20:33 -0700367 if (sg_policy->max != max) {
368 sg_policy->max = max;
369 hs_util = freq_to_util(sg_policy,
370 sg_policy->tunables->hispeed_freq);
371 hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
372 sg_policy->hispeed_util = hs_util;
373 }
374
375 sg_cpu->util = util;
376 sg_cpu->max = max;
377 sg_cpu->flags = flags;
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700378 sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
379 sg_policy->policy->cur);
Saravana Kannanc703c032017-10-12 17:20:33 -0700380 trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
381 sg_policy->avg_cap,
382 max, sg_cpu->walt_load.nl,
383 sg_cpu->walt_load.pl, flags);
384 sugov_iowait_boost(sg_cpu, &util, &max);
Rohit Gupta30249632017-02-02 18:39:07 -0800385 sugov_walt_adjust(sg_cpu, &util, &max);
Viresh Kumare74f7f12017-03-02 14:03:21 +0530386 next_f = get_next_freq(sg_policy, util, max);
Chris Redpath595ae4a2017-05-25 15:24:58 +0100387 /*
388 * Do not reduce the frequency if the CPU has not been idle
389 * recently, as the reduction is likely to be premature then.
390 */
391 if (busy && next_f < sg_policy->next_freq)
392 next_f = sg_policy->next_freq;
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200393 }
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530394
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200395 sugov_update_commit(sg_policy, time, next_f);
Saravana Kannanc703c032017-10-12 17:20:33 -0700396 raw_spin_unlock(&sg_policy->update_lock);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200397}
398
Juri Lellie7fd5b12017-05-03 14:30:48 +0100399static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200400{
Steve Muckled7439bc2016-07-13 13:25:26 -0700401 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200402 struct cpufreq_policy *policy = sg_policy->policy;
Chris Redpath39151862017-05-25 15:22:59 +0100403 unsigned long util = 0, max = 1;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200404 unsigned int j;
405
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200406 for_each_cpu(j, policy->cpus) {
Chris Redpath39151862017-05-25 15:22:59 +0100407 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200408 unsigned long j_util, j_max;
409 s64 delta_ns;
410
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200411 /*
412 * If the CPU utilization was last updated before the previous
413 * frequency update and the time elapsed between the last update
414 * of the CPU utilization and the last frequency update is long
415 * enough, don't take the CPU into account as it probably is
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200416 * idle now (and clear iowait_boost for it).
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200417 */
Juri Lellie7fd5b12017-05-03 14:30:48 +0100418 delta_ns = time - j_sg_cpu->last_update;
Saravana Kannan7070a9e2017-08-24 17:02:49 -0700419 if (delta_ns > stale_ns) {
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200420 j_sg_cpu->iowait_boost = 0;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200421 continue;
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200422 }
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200423 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
Chris Redpath39151862017-05-25 15:22:59 +0100424 return policy->cpuinfo.max_freq;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200425
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200426 j_util = j_sg_cpu->util;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200427 j_max = j_sg_cpu->max;
Saravana Kannan750f9702017-10-12 15:56:12 -0700428 if (j_util * max >= j_max * util) {
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200429 util = j_util;
430 max = j_max;
431 }
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200432
433 sugov_iowait_boost(j_sg_cpu, &util, &max);
Rohit Gupta30249632017-02-02 18:39:07 -0800434 sugov_walt_adjust(j_sg_cpu, &util, &max);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200435 }
436
Viresh Kumare74f7f12017-03-02 14:03:21 +0530437 return get_next_freq(sg_policy, util, max);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200438}
439
440static void sugov_update_shared(struct update_util_data *hook, u64 time,
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200441 unsigned int flags)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200442{
443 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
444 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rohit Gupta30249632017-02-02 18:39:07 -0800445 unsigned long util, max, hs_util;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200446 unsigned int next_f;
447
Rohit Guptaa07c4352017-10-18 16:48:34 -0700448 if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
449 return;
450
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700451 sugov_get_util(&util, &max, sg_cpu->cpu);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200452
Saravana Kannan76ef1712017-05-04 19:44:36 -0700453 flags &= ~SCHED_CPUFREQ_RT_DL;
454
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200455 raw_spin_lock(&sg_policy->update_lock);
456
Rohit Gupta30249632017-02-02 18:39:07 -0800457 if (sg_policy->max != max) {
Saravana Kannanaf541e12017-06-28 20:16:15 -0700458 sg_policy->max = max;
459 hs_util = freq_to_util(sg_policy,
460 sg_policy->tunables->hispeed_freq);
Rohit Gupta30249632017-02-02 18:39:07 -0800461 hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
462 sg_policy->hispeed_util = hs_util;
Rohit Gupta30249632017-02-02 18:39:07 -0800463 }
464
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200465 sg_cpu->util = util;
466 sg_cpu->max = max;
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200467 sg_cpu->flags = flags;
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200468
469 sugov_set_iowait_boost(sg_cpu, time, flags);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200470 sg_cpu->last_update = time;
471
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700472 sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
473 sg_policy->policy->cur);
474
Rohit Gupta2cd0e9e2017-08-07 10:50:35 -0700475 trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
476 max, sg_cpu->walt_load.nl,
Saravana Kannan12a35ed2017-03-27 15:46:28 -0700477 sg_cpu->walt_load.pl, flags);
478
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200479 if (sugov_should_update_freq(sg_policy, time)) {
Kyle Yane2486b72017-08-25 14:36:53 -0700480 if (flags & SCHED_CPUFREQ_RT_DL)
Chris Redpath39151862017-05-25 15:22:59 +0100481 next_f = sg_policy->policy->cpuinfo.max_freq;
482 else
Juri Lellie7fd5b12017-05-03 14:30:48 +0100483 next_f = sugov_next_freq_shared(sg_cpu, time);
Chris Redpath39151862017-05-25 15:22:59 +0100484
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200485 sugov_update_commit(sg_policy, time, next_f);
486 }
487
488 raw_spin_unlock(&sg_policy->update_lock);
489}
490
Viresh Kumara231c652016-11-15 13:53:22 +0530491static void sugov_work(struct kthread_work *work)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200492{
493 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
Saravana Kannan01adff122017-07-11 17:39:53 -0700494 unsigned long flags;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200495
496 mutex_lock(&sg_policy->work_lock);
Saravana Kannan01adff122017-07-11 17:39:53 -0700497 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700498 sugov_track_cycles(sg_policy, sg_policy->policy->cur,
Pavankumar Kondetifaa04442018-06-25 16:13:39 +0530499 sched_ktime_clock());
Saravana Kannan01adff122017-07-11 17:39:53 -0700500 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200501 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
502 CPUFREQ_RELATION_L);
503 mutex_unlock(&sg_policy->work_lock);
504
505 sg_policy->work_in_progress = false;
506}
507
508static void sugov_irq_work(struct irq_work *irq_work)
509{
510 struct sugov_policy *sg_policy;
511
512 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
Viresh Kumara231c652016-11-15 13:53:22 +0530513
514 /*
Viresh Kumar75526a22016-11-24 13:51:11 +0530515 * For RT and deadline tasks, the schedutil governor shoots the
516 * frequency to maximum. Special care must be taken to ensure that this
517 * kthread doesn't result in the same behavior.
Viresh Kumara231c652016-11-15 13:53:22 +0530518 *
519 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
Viresh Kumar75526a22016-11-24 13:51:11 +0530520 * updated only at the end of the sugov_work() function and before that
521 * the schedutil governor rejects all other frequency scaling requests.
Viresh Kumara231c652016-11-15 13:53:22 +0530522 *
Viresh Kumar75526a22016-11-24 13:51:11 +0530523 * There is a very rare case though, where the RT thread yields right
Viresh Kumara231c652016-11-15 13:53:22 +0530524 * after the work_in_progress flag is cleared. The effects of that are
525 * neglected for now.
526 */
527 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200528}
529
530/************************** sysfs interface ************************/
531
532static struct sugov_tunables *global_tunables;
533static DEFINE_MUTEX(global_tunables_lock);
534
535static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
536{
537 return container_of(attr_set, struct sugov_tunables, attr_set);
538}
539
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530540static DEFINE_MUTEX(min_rate_lock);
541
542static void update_min_rate_limit_ns(struct sugov_policy *sg_policy)
543{
544 mutex_lock(&min_rate_lock);
545 sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
546 sg_policy->down_rate_delay_ns);
547 mutex_unlock(&min_rate_lock);
548}
549
550static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
551{
552 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
553
554 return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
555}
556
557static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200558{
559 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
560
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530561 return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200562}
563
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530564static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
565 const char *buf, size_t count)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200566{
567 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
568 struct sugov_policy *sg_policy;
569 unsigned int rate_limit_us;
570
571 if (kstrtouint(buf, 10, &rate_limit_us))
572 return -EINVAL;
573
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530574 tunables->up_rate_limit_us = rate_limit_us;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200575
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530576 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
577 sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
578 update_min_rate_limit_ns(sg_policy);
579 }
580
581 return count;
582}
583
584static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
585 const char *buf, size_t count)
586{
587 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
588 struct sugov_policy *sg_policy;
589 unsigned int rate_limit_us;
590
591 if (kstrtouint(buf, 10, &rate_limit_us))
592 return -EINVAL;
593
594 tunables->down_rate_limit_us = rate_limit_us;
595
596 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
597 sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
598 update_min_rate_limit_ns(sg_policy);
599 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200600
601 return count;
602}
603
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700604static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
605{
606 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
607
Saravana Kannan95e1d762017-10-11 17:46:11 -0700608 return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700609}
610
611static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
612 const char *buf, size_t count)
613{
614 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
615
616 if (kstrtouint(buf, 10, &tunables->hispeed_load))
617 return -EINVAL;
618
619 tunables->hispeed_load = min(100U, tunables->hispeed_load);
620
621 return count;
622}
623
Rohit Gupta30249632017-02-02 18:39:07 -0800624static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
625{
626 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
627
Saravana Kannan95e1d762017-10-11 17:46:11 -0700628 return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
Rohit Gupta30249632017-02-02 18:39:07 -0800629}
630
631static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
632 const char *buf, size_t count)
633{
634 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
635 unsigned int val;
636 struct sugov_policy *sg_policy;
637 unsigned long hs_util;
Saravana Kannan01adff122017-07-11 17:39:53 -0700638 unsigned long flags;
Rohit Gupta30249632017-02-02 18:39:07 -0800639
640 if (kstrtouint(buf, 10, &val))
641 return -EINVAL;
642
643 tunables->hispeed_freq = val;
644 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
Saravana Kannan01adff122017-07-11 17:39:53 -0700645 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
Saravana Kannanaf541e12017-06-28 20:16:15 -0700646 hs_util = freq_to_util(sg_policy,
647 sg_policy->tunables->hispeed_freq);
Rohit Gupta30249632017-02-02 18:39:07 -0800648 hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
649 sg_policy->hispeed_util = hs_util;
Saravana Kannan01adff122017-07-11 17:39:53 -0700650 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Rohit Gupta30249632017-02-02 18:39:07 -0800651 }
652
653 return count;
654}
655
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700656static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
657{
658 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
659
Saravana Kannan95e1d762017-10-11 17:46:11 -0700660 return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700661}
662
663static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
664 size_t count)
665{
666 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
667
668 if (kstrtobool(buf, &tunables->pl))
669 return -EINVAL;
670
671 return count;
672}
673
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530674static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
675static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700676static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
Rohit Gupta30249632017-02-02 18:39:07 -0800677static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700678static struct governor_attr pl = __ATTR_RW(pl);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200679
680static struct attribute *sugov_attributes[] = {
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530681 &up_rate_limit_us.attr,
682 &down_rate_limit_us.attr,
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700683 &hispeed_load.attr,
Rohit Gupta30249632017-02-02 18:39:07 -0800684 &hispeed_freq.attr,
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700685 &pl.attr,
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200686 NULL
687};
688
689static struct kobj_type sugov_tunables_ktype = {
690 .default_attrs = sugov_attributes,
691 .sysfs_ops = &governor_sysfs_ops,
692};
693
694/********************** cpufreq governor interface *********************/
695
696static struct cpufreq_governor schedutil_gov;
697
698static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
699{
700 struct sugov_policy *sg_policy;
701
702 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
703 if (!sg_policy)
704 return NULL;
705
706 sg_policy->policy = policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200707 raw_spin_lock_init(&sg_policy->update_lock);
708 return sg_policy;
709}
710
711static void sugov_policy_free(struct sugov_policy *sg_policy)
712{
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200713 kfree(sg_policy);
714}
715
Viresh Kumara231c652016-11-15 13:53:22 +0530716static int sugov_kthread_create(struct sugov_policy *sg_policy)
717{
718 struct task_struct *thread;
719 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
720 struct cpufreq_policy *policy = sg_policy->policy;
721 int ret;
722
723 /* kthread only required for slow path */
724 if (policy->fast_switch_enabled)
725 return 0;
726
727 kthread_init_work(&sg_policy->work, sugov_work);
728 kthread_init_worker(&sg_policy->worker);
729 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
730 "sugov:%d",
731 cpumask_first(policy->related_cpus));
732 if (IS_ERR(thread)) {
733 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
734 return PTR_ERR(thread);
735 }
736
737 ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
738 if (ret) {
739 kthread_stop(thread);
740 pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
741 return ret;
742 }
743
744 sg_policy->thread = thread;
745 kthread_bind_mask(thread, policy->related_cpus);
Viresh Kumar0ced0be2016-11-15 13:53:23 +0530746 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
747 mutex_init(&sg_policy->work_lock);
748
Viresh Kumara231c652016-11-15 13:53:22 +0530749 wake_up_process(thread);
750
751 return 0;
752}
753
754static void sugov_kthread_stop(struct sugov_policy *sg_policy)
755{
756 /* kthread only required for slow path */
757 if (sg_policy->policy->fast_switch_enabled)
758 return;
759
760 kthread_flush_worker(&sg_policy->worker);
761 kthread_stop(sg_policy->thread);
Viresh Kumar0ced0be2016-11-15 13:53:23 +0530762 mutex_destroy(&sg_policy->work_lock);
Viresh Kumara231c652016-11-15 13:53:22 +0530763}
764
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200765static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
766{
767 struct sugov_tunables *tunables;
768
769 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
770 if (tunables) {
771 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
772 if (!have_governor_per_policy())
773 global_tunables = tunables;
774 }
775 return tunables;
776}
777
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700778static void sugov_tunables_save(struct cpufreq_policy *policy,
779 struct sugov_tunables *tunables)
780{
781 int cpu;
782 struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
783
784 if (!have_governor_per_policy())
785 return;
786
787 if (!cached) {
788 cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
789 if (!cached) {
790 pr_warn("Couldn't allocate tunables for caching\n");
791 return;
792 }
793 for_each_cpu(cpu, policy->related_cpus)
794 per_cpu(cached_tunables, cpu) = cached;
795 }
796
797 cached->pl = tunables->pl;
798 cached->hispeed_load = tunables->hispeed_load;
799 cached->hispeed_freq = tunables->hispeed_freq;
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530800 cached->up_rate_limit_us = tunables->up_rate_limit_us;
801 cached->down_rate_limit_us = tunables->down_rate_limit_us;
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700802}
803
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200804static void sugov_tunables_free(struct sugov_tunables *tunables)
805{
806 if (!have_governor_per_policy())
807 global_tunables = NULL;
808
809 kfree(tunables);
810}
811
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700812static void sugov_tunables_restore(struct cpufreq_policy *policy)
813{
814 struct sugov_policy *sg_policy = policy->governor_data;
815 struct sugov_tunables *tunables = sg_policy->tunables;
816 struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
817
818 if (!cached)
819 return;
820
821 tunables->pl = cached->pl;
822 tunables->hispeed_load = cached->hispeed_load;
823 tunables->hispeed_freq = cached->hispeed_freq;
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530824 tunables->up_rate_limit_us = cached->up_rate_limit_us;
825 tunables->down_rate_limit_us = cached->down_rate_limit_us;
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700826}
827
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200828static int sugov_init(struct cpufreq_policy *policy)
829{
830 struct sugov_policy *sg_policy;
831 struct sugov_tunables *tunables;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200832 int ret = 0;
833
834 /* State should be equivalent to EXIT */
835 if (policy->governor_data)
836 return -EBUSY;
837
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530838 cpufreq_enable_fast_switch(policy);
839
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200840 sg_policy = sugov_policy_alloc(policy);
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530841 if (!sg_policy) {
842 ret = -ENOMEM;
843 goto disable_fast_switch;
844 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200845
Viresh Kumara231c652016-11-15 13:53:22 +0530846 ret = sugov_kthread_create(sg_policy);
847 if (ret)
848 goto free_sg_policy;
849
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200850 mutex_lock(&global_tunables_lock);
851
852 if (global_tunables) {
853 if (WARN_ON(have_governor_per_policy())) {
854 ret = -EINVAL;
Viresh Kumara231c652016-11-15 13:53:22 +0530855 goto stop_kthread;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200856 }
857 policy->governor_data = sg_policy;
858 sg_policy->tunables = global_tunables;
859
860 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
861 goto out;
862 }
863
864 tunables = sugov_tunables_alloc(sg_policy);
865 if (!tunables) {
866 ret = -ENOMEM;
Viresh Kumara231c652016-11-15 13:53:22 +0530867 goto stop_kthread;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200868 }
869
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530870 /*
871 * NOTE:
872 * intializing up_rate/down_rate to 0 explicitly in kernel
873 * since WALT expects so by default.
874 */
875 tunables->up_rate_limit_us = 0;
876 tunables->down_rate_limit_us = 0;
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700877 tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
Rohit Gupta30249632017-02-02 18:39:07 -0800878 tunables->hispeed_freq = 0;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200879
880 policy->governor_data = sg_policy;
881 sg_policy->tunables = tunables;
Saravana Kannan7070a9e2017-08-24 17:02:49 -0700882 stale_ns = sched_ravg_window + (sched_ravg_window >> 3);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200883
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700884 sugov_tunables_restore(policy);
885
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200886 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
887 get_governor_parent_kobj(policy), "%s",
888 schedutil_gov.name);
889 if (ret)
890 goto fail;
891
Viresh Kumarb5b11602016-11-15 13:53:20 +0530892out:
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200893 mutex_unlock(&global_tunables_lock);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200894 return 0;
895
Viresh Kumarb5b11602016-11-15 13:53:20 +0530896fail:
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200897 policy->governor_data = NULL;
898 sugov_tunables_free(tunables);
899
Viresh Kumara231c652016-11-15 13:53:22 +0530900stop_kthread:
901 sugov_kthread_stop(sg_policy);
902
Viresh Kumarb5b11602016-11-15 13:53:20 +0530903free_sg_policy:
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200904 mutex_unlock(&global_tunables_lock);
905
906 sugov_policy_free(sg_policy);
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530907
908disable_fast_switch:
909 cpufreq_disable_fast_switch(policy);
910
Viresh Kumar87ecf322016-05-18 17:55:28 +0530911 pr_err("initialization failed (error %d)\n", ret);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200912 return ret;
913}
914
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200915static void sugov_exit(struct cpufreq_policy *policy)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200916{
917 struct sugov_policy *sg_policy = policy->governor_data;
918 struct sugov_tunables *tunables = sg_policy->tunables;
919 unsigned int count;
920
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200921 mutex_lock(&global_tunables_lock);
922
923 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
924 policy->governor_data = NULL;
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700925 if (!count) {
926 sugov_tunables_save(policy, tunables);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200927 sugov_tunables_free(tunables);
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700928 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200929
930 mutex_unlock(&global_tunables_lock);
931
Viresh Kumara231c652016-11-15 13:53:22 +0530932 sugov_kthread_stop(sg_policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200933 sugov_policy_free(sg_policy);
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530934 cpufreq_disable_fast_switch(policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200935}
936
937static int sugov_start(struct cpufreq_policy *policy)
938{
939 struct sugov_policy *sg_policy = policy->governor_data;
940 unsigned int cpu;
941
Steve Mucklea37cf5f2016-11-17 10:48:45 +0530942 sg_policy->up_rate_delay_ns =
943 sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
944 sg_policy->down_rate_delay_ns =
945 sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
946 update_min_rate_limit_ns(sg_policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200947 sg_policy->last_freq_update_time = 0;
948 sg_policy->next_freq = UINT_MAX;
949 sg_policy->work_in_progress = false;
950 sg_policy->need_freq_update = false;
Viresh Kumar55a6d462017-03-02 14:03:20 +0530951 sg_policy->cached_raw_freq = 0;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200952
953 for_each_cpu(cpu, policy->cpus) {
954 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
955
Rafael J. Wysocki7922ae42017-03-19 14:30:02 +0100956 memset(sg_cpu, 0, sizeof(*sg_cpu));
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200957 sg_cpu->sg_policy = sg_policy;
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700958 sg_cpu->cpu = cpu;
Rafael J. Wysocki7922ae42017-03-19 14:30:02 +0100959 sg_cpu->flags = SCHED_CPUFREQ_RT;
960 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
Vikram Mulukutlaeb843722017-07-06 10:05:52 -0700961 }
962
963 for_each_cpu(cpu, policy->cpus) {
964 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
965
Rafael J. Wysocki7922ae42017-03-19 14:30:02 +0100966 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
967 policy_is_shared(policy) ?
968 sugov_update_shared :
969 sugov_update_single);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200970 }
971 return 0;
972}
973
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200974static void sugov_stop(struct cpufreq_policy *policy)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200975{
976 struct sugov_policy *sg_policy = policy->governor_data;
977 unsigned int cpu;
978
979 for_each_cpu(cpu, policy->cpus)
980 cpufreq_remove_update_util_hook(cpu);
981
982 synchronize_sched();
983
Viresh Kumar0ced0be2016-11-15 13:53:23 +0530984 if (!policy->fast_switch_enabled) {
985 irq_work_sync(&sg_policy->irq_work);
986 kthread_cancel_work_sync(&sg_policy->work);
987 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200988}
989
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200990static void sugov_limits(struct cpufreq_policy *policy)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200991{
992 struct sugov_policy *sg_policy = policy->governor_data;
Saravana Kannan01adff122017-07-11 17:39:53 -0700993 unsigned long flags;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200994
995 if (!policy->fast_switch_enabled) {
996 mutex_lock(&sg_policy->work_lock);
Saravana Kannan01adff122017-07-11 17:39:53 -0700997 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700998 sugov_track_cycles(sg_policy, sg_policy->policy->cur,
Pavankumar Kondetifaa04442018-06-25 16:13:39 +0530999 sched_ktime_clock());
Saravana Kannan01adff122017-07-11 17:39:53 -07001000 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Viresh Kumar73d427c2016-05-18 17:55:31 +05301001 cpufreq_policy_apply_limits(policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001002 mutex_unlock(&sg_policy->work_lock);
1003 }
1004
1005 sg_policy->need_freq_update = true;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001006}
1007
1008static struct cpufreq_governor schedutil_gov = {
1009 .name = "schedutil",
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001010 .owner = THIS_MODULE,
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +02001011 .init = sugov_init,
1012 .exit = sugov_exit,
1013 .start = sugov_start,
1014 .stop = sugov_stop,
1015 .limits = sugov_limits,
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001016};
1017
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001018#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
1019struct cpufreq_governor *cpufreq_default_governor(void)
1020{
1021 return &schedutil_gov;
1022}
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001023#endif
Rafael J. Wysockic4568722016-08-16 22:14:55 +02001024
1025static int __init sugov_register(void)
1026{
1027 return cpufreq_register_governor(&schedutil_gov);
1028}
1029fs_initcall(sugov_register);