blob: 3edb2bbee896f0c44f7340dbad8190ef792ee3b0 [file] [log] [blame]
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +02001/*
2 * CPUFreq governor based on scheduler-provided CPU utilization data.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
Viresh Kumar60f05e82016-05-18 17:55:28 +053012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020014#include <linux/cpufreq.h>
Viresh Kumar29d892d72016-11-15 13:53:22 +053015#include <linux/kthread.h>
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020016#include <linux/slab.h>
17#include <trace/events/power.h>
18
19#include "sched.h"
Juri Lellic6e94382016-12-14 16:10:10 +000020#include "tune.h"
21
Juri Lellic6e94382016-12-14 16:10:10 +000022unsigned long boosted_cpu_util(int cpu);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020023
Steve Muckle4152c222016-11-17 10:48:45 +053024/* Stub out fast switch routines present on mainline to reduce the backport
25 * overhead. */
26#define cpufreq_driver_fast_switch(x, y) 0
27#define cpufreq_enable_fast_switch(x)
28#define cpufreq_disable_fast_switch(x)
29#define LATENCY_MULTIPLIER (1000)
Viresh Kumar29d892d72016-11-15 13:53:22 +053030#define SUGOV_KTHREAD_PRIORITY 50
31
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020032struct sugov_tunables {
33 struct gov_attr_set attr_set;
Steve Muckle4152c222016-11-17 10:48:45 +053034 unsigned int up_rate_limit_us;
35 unsigned int down_rate_limit_us;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020036};
37
38struct sugov_policy {
39 struct cpufreq_policy *policy;
40
41 struct sugov_tunables *tunables;
42 struct list_head tunables_hook;
43
44 raw_spinlock_t update_lock; /* For shared policies */
45 u64 last_freq_update_time;
Steve Muckle4152c222016-11-17 10:48:45 +053046 s64 min_rate_limit_ns;
47 s64 up_rate_delay_ns;
48 s64 down_rate_delay_ns;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020049 unsigned int next_freq;
Viresh Kumarafe8d4a2017-03-02 14:03:20 +053050 unsigned int cached_raw_freq;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020051
52 /* The next fields are only needed if fast switch cannot be used. */
53 struct irq_work irq_work;
Viresh Kumar29d892d72016-11-15 13:53:22 +053054 struct kthread_work work;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020055 struct mutex work_lock;
Viresh Kumar29d892d72016-11-15 13:53:22 +053056 struct kthread_worker worker;
57 struct task_struct *thread;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020058 bool work_in_progress;
59
60 bool need_freq_update;
61};
62
63struct sugov_cpu {
64 struct update_util_data update_util;
65 struct sugov_policy *sg_policy;
66
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +020067 unsigned long iowait_boost;
68 unsigned long iowait_boost_max;
69 u64 last_update;
Steve Muckle5cbea462016-07-13 13:25:26 -070070
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020071 /* The fields below are only needed when sharing a policy. */
72 unsigned long util;
73 unsigned long max;
Rafael J. Wysocki58919e82016-08-16 22:14:55 +020074 unsigned int flags;
Chris Redpath595ae4a2017-05-25 15:24:58 +010075
76 /* The field below is for single-CPU policies only. */
77#ifdef CONFIG_NO_HZ_COMMON
78 unsigned long saved_idle_calls;
79#endif
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020080};
81
82static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
83
84/************************ Governor internals ***********************/
85
86static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
87{
88 s64 delta_ns;
89
90 if (sg_policy->work_in_progress)
91 return false;
92
93 if (unlikely(sg_policy->need_freq_update)) {
94 sg_policy->need_freq_update = false;
95 /*
96 * This happens when limits change, so forget the previous
97 * next_freq value and force an update.
98 */
99 sg_policy->next_freq = UINT_MAX;
100 return true;
101 }
102
103 delta_ns = time - sg_policy->last_freq_update_time;
Steve Muckle4152c222016-11-17 10:48:45 +0530104
105 /* No need to recalculate next freq for min_rate_limit_us at least */
106 return delta_ns >= sg_policy->min_rate_limit_ns;
107}
108
109static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
110 unsigned int next_freq)
111{
112 s64 delta_ns;
113
114 delta_ns = time - sg_policy->last_freq_update_time;
115
116 if (next_freq > sg_policy->next_freq &&
117 delta_ns < sg_policy->up_rate_delay_ns)
118 return true;
119
120 if (next_freq < sg_policy->next_freq &&
121 delta_ns < sg_policy->down_rate_delay_ns)
122 return true;
123
124 return false;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200125}
126
127static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
128 unsigned int next_freq)
129{
130 struct cpufreq_policy *policy = sg_policy->policy;
131
Viresh Kumar87cdf4e2017-11-08 19:47:36 +0530132 if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
133 /* Reset cached freq as next_freq isn't changed */
134 sg_policy->cached_raw_freq = 0;
Steve Muckle4152c222016-11-17 10:48:45 +0530135 return;
Viresh Kumar87cdf4e2017-11-08 19:47:36 +0530136 }
Steve Muckle4152c222016-11-17 10:48:45 +0530137
Chris Redpath6702ce12017-05-25 15:27:07 +0100138 if (sg_policy->next_freq == next_freq)
139 return;
140
141 sg_policy->next_freq = next_freq;
142 sg_policy->last_freq_update_time = time;
143
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200144 if (policy->fast_switch_enabled) {
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200145 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
146 if (next_freq == CPUFREQ_ENTRY_INVALID)
147 return;
148
149 policy->cur = next_freq;
150 trace_cpu_frequency(next_freq, smp_processor_id());
Chris Redpath6702ce12017-05-25 15:27:07 +0100151 } else {
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200152 sg_policy->work_in_progress = true;
153 irq_work_queue(&sg_policy->irq_work);
154 }
155}
156
157/**
158 * get_next_freq - Compute a new frequency for a given cpufreq policy.
Viresh Kumar0942cea2017-03-02 14:03:21 +0530159 * @sg_policy: schedutil policy object to compute the new frequency for.
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200160 * @util: Current CPU utilization.
161 * @max: CPU capacity.
162 *
163 * If the utilization is frequency-invariant, choose the new frequency to be
164 * proportional to it, that is
165 *
166 * next_freq = C * max_freq * util / max
167 *
168 * Otherwise, approximate the would-be frequency-invariant utilization by
169 * util_raw * (curr_freq / max_freq) which leads to
170 *
171 * next_freq = C * curr_freq * util_raw / max
172 *
173 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
Steve Muckle5cbea462016-07-13 13:25:26 -0700174 *
175 * The lowest driver-supported frequency which is equal or greater than the raw
176 * next_freq (as calculated above) is returned, subject to policy min/max and
177 * cpufreq driver limitations.
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200178 */
Viresh Kumar0942cea2017-03-02 14:03:21 +0530179static unsigned int get_next_freq(struct sugov_policy *sg_policy,
180 unsigned long util, unsigned long max)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200181{
Steve Muckle5cbea462016-07-13 13:25:26 -0700182 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200183 unsigned int freq = arch_scale_freq_invariant() ?
184 policy->cpuinfo.max_freq : policy->cur;
185
Steve Muckle5cbea462016-07-13 13:25:26 -0700186 freq = (freq + (freq >> 2)) * util / max;
187
Viresh Kumarafe8d4a2017-03-02 14:03:20 +0530188 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
Steve Muckle5cbea462016-07-13 13:25:26 -0700189 return sg_policy->next_freq;
Viresh Kumarafe8d4a2017-03-02 14:03:20 +0530190 sg_policy->cached_raw_freq = freq;
Steve Muckle5cbea462016-07-13 13:25:26 -0700191 return cpufreq_driver_resolve_freq(policy, freq);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200192}
193
Chris Redpatha6d67352017-03-24 17:37:28 +0000194static inline bool use_pelt(void)
195{
196#ifdef CONFIG_SCHED_WALT
197 return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
198#else
199 return true;
200#endif
201}
202
Steve Muckle8d408122016-08-25 15:59:17 -0700203static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200204{
Steve Muckle8d408122016-08-25 15:59:17 -0700205 int cpu = smp_processor_id();
206 struct rq *rq = cpu_rq(cpu);
207 unsigned long max_cap, rt;
208 s64 delta;
Steve Muckle8314bc82016-08-26 11:40:47 -0700209
Steve Muckle8d408122016-08-25 15:59:17 -0700210 max_cap = arch_scale_cpu_capacity(NULL, cpu);
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200211
Steve Muckle8d408122016-08-25 15:59:17 -0700212 sched_avg_update(rq);
213 delta = time - rq->age_stamp;
214 if (unlikely(delta < 0))
215 delta = 0;
216 rt = div64_u64(rq->rt_avg, sched_avg_period() + delta);
217 rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT;
218
Chris Redpatha6d67352017-03-24 17:37:28 +0000219 *util = boosted_cpu_util(cpu);
220 if (likely(use_pelt()))
221 *util = min((*util + rt), max_cap);
222
Steve Muckle8d408122016-08-25 15:59:17 -0700223 *max = max_cap;
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200224}
225
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200226static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
227 unsigned int flags)
228{
229 if (flags & SCHED_CPUFREQ_IOWAIT) {
230 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
231 } else if (sg_cpu->iowait_boost) {
232 s64 delta_ns = time - sg_cpu->last_update;
233
234 /* Clear iowait_boost if the CPU apprears to have been idle. */
235 if (delta_ns > TICK_NSEC)
236 sg_cpu->iowait_boost = 0;
237 }
238}
239
240static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
241 unsigned long *max)
242{
243 unsigned long boost_util = sg_cpu->iowait_boost;
244 unsigned long boost_max = sg_cpu->iowait_boost_max;
245
246 if (!boost_util)
247 return;
248
249 if (*util * boost_max < *max * boost_util) {
250 *util = boost_util;
251 *max = boost_max;
252 }
253 sg_cpu->iowait_boost >>= 1;
254}
255
Chris Redpath595ae4a2017-05-25 15:24:58 +0100256#ifdef CONFIG_NO_HZ_COMMON
257static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
258{
259 unsigned long idle_calls = tick_nohz_get_idle_calls();
260 bool ret = idle_calls == sg_cpu->saved_idle_calls;
261
262 sg_cpu->saved_idle_calls = idle_calls;
263 return ret;
264}
265#else
266static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
267#endif /* CONFIG_NO_HZ_COMMON */
268
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200269static void sugov_update_single(struct update_util_data *hook, u64 time,
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200270 unsigned int flags)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200271{
272 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
273 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
274 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200275 unsigned long util, max;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200276 unsigned int next_f;
Chris Redpath595ae4a2017-05-25 15:24:58 +0100277 bool busy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200278
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200279 sugov_set_iowait_boost(sg_cpu, time, flags);
280 sg_cpu->last_update = time;
281
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200282 if (!sugov_should_update_freq(sg_policy, time))
283 return;
284
Chris Redpath595ae4a2017-05-25 15:24:58 +0100285 busy = sugov_cpu_is_busy(sg_cpu);
286
Steve Muckle8d408122016-08-25 15:59:17 -0700287 if (flags & SCHED_CPUFREQ_DL) {
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200288 next_f = policy->cpuinfo.max_freq;
289 } else {
Steve Muckle8d408122016-08-25 15:59:17 -0700290 sugov_get_util(&util, &max, time);
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200291 sugov_iowait_boost(sg_cpu, &util, &max);
Viresh Kumar0942cea2017-03-02 14:03:21 +0530292 next_f = get_next_freq(sg_policy, util, max);
Chris Redpath595ae4a2017-05-25 15:24:58 +0100293 /*
294 * Do not reduce the frequency if the CPU has not been idle
295 * recently, as the reduction is likely to be premature then.
296 */
Viresh Kumar87cdf4e2017-11-08 19:47:36 +0530297 if (busy && next_f < sg_policy->next_freq) {
Chris Redpath595ae4a2017-05-25 15:24:58 +0100298 next_f = sg_policy->next_freq;
Viresh Kumar87cdf4e2017-11-08 19:47:36 +0530299
300 /* Reset cached freq as next_freq has changed */
301 sg_policy->cached_raw_freq = 0;
302 }
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200303 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200304 sugov_update_commit(sg_policy, time, next_f);
305}
306
Chris Redpath39151862017-05-25 15:22:59 +0100307static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200308{
Steve Muckle5cbea462016-07-13 13:25:26 -0700309 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200310 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200311 u64 last_freq_update_time = sg_policy->last_freq_update_time;
Chris Redpath39151862017-05-25 15:22:59 +0100312 unsigned long util = 0, max = 1;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200313 unsigned int j;
314
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200315 for_each_cpu(j, policy->cpus) {
Chris Redpath39151862017-05-25 15:22:59 +0100316 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200317 unsigned long j_util, j_max;
318 s64 delta_ns;
319
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200320 /*
321 * If the CPU utilization was last updated before the previous
322 * frequency update and the time elapsed between the last update
323 * of the CPU utilization and the last frequency update is long
324 * enough, don't take the CPU into account as it probably is
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200325 * idle now (and clear iowait_boost for it).
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200326 */
327 delta_ns = last_freq_update_time - j_sg_cpu->last_update;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200328 if (delta_ns > TICK_NSEC) {
329 j_sg_cpu->iowait_boost = 0;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200330 continue;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200331 }
Steve Muckle8d408122016-08-25 15:59:17 -0700332 if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
Chris Redpath39151862017-05-25 15:22:59 +0100333 return policy->cpuinfo.max_freq;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200334
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200335 j_util = j_sg_cpu->util;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200336 j_max = j_sg_cpu->max;
337 if (j_util * max > j_max * util) {
338 util = j_util;
339 max = j_max;
340 }
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200341
342 sugov_iowait_boost(j_sg_cpu, &util, &max);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200343 }
344
Viresh Kumar0942cea2017-03-02 14:03:21 +0530345 return get_next_freq(sg_policy, util, max);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200346}
347
348static void sugov_update_shared(struct update_util_data *hook, u64 time,
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200349 unsigned int flags)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200350{
351 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
352 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200353 unsigned long util, max;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200354 unsigned int next_f;
355
Steve Muckle8d408122016-08-25 15:59:17 -0700356 sugov_get_util(&util, &max, time);
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200357
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200358 raw_spin_lock(&sg_policy->update_lock);
359
360 sg_cpu->util = util;
361 sg_cpu->max = max;
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200362 sg_cpu->flags = flags;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200363
364 sugov_set_iowait_boost(sg_cpu, time, flags);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200365 sg_cpu->last_update = time;
366
367 if (sugov_should_update_freq(sg_policy, time)) {
Chris Redpath39151862017-05-25 15:22:59 +0100368 if (flags & SCHED_CPUFREQ_DL)
369 next_f = sg_policy->policy->cpuinfo.max_freq;
370 else
371 next_f = sugov_next_freq_shared(sg_cpu);
372
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200373 sugov_update_commit(sg_policy, time, next_f);
374 }
375
376 raw_spin_unlock(&sg_policy->update_lock);
377}
378
Viresh Kumar29d892d72016-11-15 13:53:22 +0530379static void sugov_work(struct kthread_work *work)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200380{
381 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
382
383 mutex_lock(&sg_policy->work_lock);
384 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
385 CPUFREQ_RELATION_L);
386 mutex_unlock(&sg_policy->work_lock);
387
388 sg_policy->work_in_progress = false;
389}
390
391static void sugov_irq_work(struct irq_work *irq_work)
392{
393 struct sugov_policy *sg_policy;
394
395 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
Viresh Kumar29d892d72016-11-15 13:53:22 +0530396
397 /*
Viresh Kumar81162a92016-11-24 13:51:11 +0530398 * For RT and deadline tasks, the schedutil governor shoots the
399 * frequency to maximum. Special care must be taken to ensure that this
400 * kthread doesn't result in the same behavior.
Viresh Kumar29d892d72016-11-15 13:53:22 +0530401 *
402 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
Viresh Kumar81162a92016-11-24 13:51:11 +0530403 * updated only at the end of the sugov_work() function and before that
404 * the schedutil governor rejects all other frequency scaling requests.
Viresh Kumar29d892d72016-11-15 13:53:22 +0530405 *
Viresh Kumar81162a92016-11-24 13:51:11 +0530406 * There is a very rare case though, where the RT thread yields right
Viresh Kumar29d892d72016-11-15 13:53:22 +0530407 * after the work_in_progress flag is cleared. The effects of that are
408 * neglected for now.
409 */
410 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200411}
412
413/************************** sysfs interface ************************/
414
415static struct sugov_tunables *global_tunables;
416static DEFINE_MUTEX(global_tunables_lock);
417
418static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
419{
420 return container_of(attr_set, struct sugov_tunables, attr_set);
421}
422
Steve Muckle4152c222016-11-17 10:48:45 +0530423static DEFINE_MUTEX(min_rate_lock);
424
425static void update_min_rate_limit_us(struct sugov_policy *sg_policy)
426{
427 mutex_lock(&min_rate_lock);
428 sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
429 sg_policy->down_rate_delay_ns);
430 mutex_unlock(&min_rate_lock);
431}
432
433static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200434{
435 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
436
Steve Muckle4152c222016-11-17 10:48:45 +0530437 return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200438}
439
Steve Muckle4152c222016-11-17 10:48:45 +0530440static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
441{
442 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
443
444 return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
445}
446
447static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
448 const char *buf, size_t count)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200449{
450 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
451 struct sugov_policy *sg_policy;
452 unsigned int rate_limit_us;
453
454 if (kstrtouint(buf, 10, &rate_limit_us))
455 return -EINVAL;
456
Steve Muckle4152c222016-11-17 10:48:45 +0530457 tunables->up_rate_limit_us = rate_limit_us;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200458
Steve Muckle4152c222016-11-17 10:48:45 +0530459 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
460 sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
461 update_min_rate_limit_us(sg_policy);
462 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200463
464 return count;
465}
466
Steve Muckle4152c222016-11-17 10:48:45 +0530467static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
468 const char *buf, size_t count)
469{
470 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
471 struct sugov_policy *sg_policy;
472 unsigned int rate_limit_us;
473
474 if (kstrtouint(buf, 10, &rate_limit_us))
475 return -EINVAL;
476
477 tunables->down_rate_limit_us = rate_limit_us;
478
479 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
480 sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
481 update_min_rate_limit_us(sg_policy);
482 }
483
484 return count;
485}
486
487static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
488static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200489
490static struct attribute *sugov_attributes[] = {
Steve Muckle4152c222016-11-17 10:48:45 +0530491 &up_rate_limit_us.attr,
492 &down_rate_limit_us.attr,
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200493 NULL
494};
495
496static struct kobj_type sugov_tunables_ktype = {
497 .default_attrs = sugov_attributes,
498 .sysfs_ops = &governor_sysfs_ops,
499};
500
501/********************** cpufreq governor interface *********************/
502
503static struct cpufreq_governor schedutil_gov;
504
505static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
506{
507 struct sugov_policy *sg_policy;
508
509 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
510 if (!sg_policy)
511 return NULL;
512
513 sg_policy->policy = policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200514 raw_spin_lock_init(&sg_policy->update_lock);
515 return sg_policy;
516}
517
518static void sugov_policy_free(struct sugov_policy *sg_policy)
519{
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200520 kfree(sg_policy);
521}
522
Viresh Kumar29d892d72016-11-15 13:53:22 +0530523static int sugov_kthread_create(struct sugov_policy *sg_policy)
524{
525 struct task_struct *thread;
526 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
527 struct cpufreq_policy *policy = sg_policy->policy;
528 int ret;
529
530 /* kthread only required for slow path */
531 if (policy->fast_switch_enabled)
532 return 0;
533
534 kthread_init_work(&sg_policy->work, sugov_work);
535 kthread_init_worker(&sg_policy->worker);
536 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
537 "sugov:%d",
538 cpumask_first(policy->related_cpus));
539 if (IS_ERR(thread)) {
540 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
541 return PTR_ERR(thread);
542 }
543
544 ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
545 if (ret) {
546 kthread_stop(thread);
547 pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
548 return ret;
549 }
550
551 sg_policy->thread = thread;
552 kthread_bind_mask(thread, policy->related_cpus);
Chris Redpath338ad2c2017-07-20 16:34:10 +0100553 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
554 mutex_init(&sg_policy->work_lock);
555
Viresh Kumar29d892d72016-11-15 13:53:22 +0530556 wake_up_process(thread);
557
558 return 0;
559}
560
561static void sugov_kthread_stop(struct sugov_policy *sg_policy)
562{
563 /* kthread only required for slow path */
564 if (sg_policy->policy->fast_switch_enabled)
565 return;
566
567 kthread_flush_worker(&sg_policy->worker);
568 kthread_stop(sg_policy->thread);
Chris Redpath338ad2c2017-07-20 16:34:10 +0100569 mutex_destroy(&sg_policy->work_lock);
Viresh Kumar29d892d72016-11-15 13:53:22 +0530570}
571
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200572static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
573{
574 struct sugov_tunables *tunables;
575
576 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
577 if (tunables) {
578 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
579 if (!have_governor_per_policy())
580 global_tunables = tunables;
581 }
582 return tunables;
583}
584
585static void sugov_tunables_free(struct sugov_tunables *tunables)
586{
587 if (!have_governor_per_policy())
588 global_tunables = NULL;
589
590 kfree(tunables);
591}
592
593static int sugov_init(struct cpufreq_policy *policy)
594{
595 struct sugov_policy *sg_policy;
596 struct sugov_tunables *tunables;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200597 int ret = 0;
598
599 /* State should be equivalent to EXIT */
600 if (policy->governor_data)
601 return -EBUSY;
602
Chris Redpath99ab82d2017-07-20 16:32:35 +0100603 cpufreq_enable_fast_switch(policy);
604
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200605 sg_policy = sugov_policy_alloc(policy);
Chris Redpath99ab82d2017-07-20 16:32:35 +0100606 if (!sg_policy) {
607 ret = -ENOMEM;
608 goto disable_fast_switch;
609 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200610
Viresh Kumar29d892d72016-11-15 13:53:22 +0530611 ret = sugov_kthread_create(sg_policy);
612 if (ret)
613 goto free_sg_policy;
614
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200615 mutex_lock(&global_tunables_lock);
616
617 if (global_tunables) {
618 if (WARN_ON(have_governor_per_policy())) {
619 ret = -EINVAL;
Viresh Kumar29d892d72016-11-15 13:53:22 +0530620 goto stop_kthread;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200621 }
622 policy->governor_data = sg_policy;
623 sg_policy->tunables = global_tunables;
624
625 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
626 goto out;
627 }
628
629 tunables = sugov_tunables_alloc(sg_policy);
630 if (!tunables) {
631 ret = -ENOMEM;
Viresh Kumar29d892d72016-11-15 13:53:22 +0530632 goto stop_kthread;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200633 }
634
Rafael J. Wysocki53d56d42017-04-11 00:20:41 +0200635 if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
636 tunables->up_rate_limit_us = policy->up_transition_delay_us;
637 tunables->down_rate_limit_us = policy->down_transition_delay_us;
638 } else {
639 unsigned int lat;
640
641 tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
642 tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
643 lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
644 if (lat) {
645 tunables->up_rate_limit_us *= lat;
646 tunables->down_rate_limit_us *= lat;
647 }
Steve Muckle4152c222016-11-17 10:48:45 +0530648 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200649
650 policy->governor_data = sg_policy;
651 sg_policy->tunables = tunables;
652
653 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
654 get_governor_parent_kobj(policy), "%s",
655 schedutil_gov.name);
656 if (ret)
657 goto fail;
658
Chris Redpath91a6b312017-05-25 15:04:04 +0100659out:
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200660 mutex_unlock(&global_tunables_lock);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200661 return 0;
662
Chris Redpath91a6b312017-05-25 15:04:04 +0100663fail:
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200664 policy->governor_data = NULL;
665 sugov_tunables_free(tunables);
666
Viresh Kumar29d892d72016-11-15 13:53:22 +0530667 stop_kthread:
668 sugov_kthread_stop(sg_policy);
669
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200670 free_sg_policy:
671 mutex_unlock(&global_tunables_lock);
672
673 sugov_policy_free(sg_policy);
Chris Redpath99ab82d2017-07-20 16:32:35 +0100674
675disable_fast_switch:
676 cpufreq_disable_fast_switch(policy);
677
Viresh Kumar60f05e82016-05-18 17:55:28 +0530678 pr_err("initialization failed (error %d)\n", ret);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200679 return ret;
680}
681
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200682static void sugov_exit(struct cpufreq_policy *policy)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200683{
684 struct sugov_policy *sg_policy = policy->governor_data;
685 struct sugov_tunables *tunables = sg_policy->tunables;
686 unsigned int count;
687
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200688 mutex_lock(&global_tunables_lock);
689
690 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
691 policy->governor_data = NULL;
692 if (!count)
693 sugov_tunables_free(tunables);
694
695 mutex_unlock(&global_tunables_lock);
696
Viresh Kumar29d892d72016-11-15 13:53:22 +0530697 sugov_kthread_stop(sg_policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200698 sugov_policy_free(sg_policy);
Chris Redpath99ab82d2017-07-20 16:32:35 +0100699
700 cpufreq_disable_fast_switch(policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200701}
702
703static int sugov_start(struct cpufreq_policy *policy)
704{
705 struct sugov_policy *sg_policy = policy->governor_data;
706 unsigned int cpu;
707
Steve Muckle4152c222016-11-17 10:48:45 +0530708 sg_policy->up_rate_delay_ns =
709 sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
710 sg_policy->down_rate_delay_ns =
711 sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
712 update_min_rate_limit_us(sg_policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200713 sg_policy->last_freq_update_time = 0;
714 sg_policy->next_freq = UINT_MAX;
715 sg_policy->work_in_progress = false;
716 sg_policy->need_freq_update = false;
Viresh Kumarafe8d4a2017-03-02 14:03:20 +0530717 sg_policy->cached_raw_freq = 0;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200718
719 for_each_cpu(cpu, policy->cpus) {
720 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
721
Rafael J. Wysockia8fc3152017-03-19 14:30:02 +0100722 memset(sg_cpu, 0, sizeof(*sg_cpu));
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200723 sg_cpu->sg_policy = sg_policy;
Steve Muckle8d408122016-08-25 15:59:17 -0700724 sg_cpu->flags = SCHED_CPUFREQ_DL;
Rafael J. Wysockia8fc3152017-03-19 14:30:02 +0100725 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
726 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
727 policy_is_shared(policy) ?
728 sugov_update_shared :
729 sugov_update_single);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200730 }
731 return 0;
732}
733
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200734static void sugov_stop(struct cpufreq_policy *policy)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200735{
736 struct sugov_policy *sg_policy = policy->governor_data;
737 unsigned int cpu;
738
739 for_each_cpu(cpu, policy->cpus)
740 cpufreq_remove_update_util_hook(cpu);
741
742 synchronize_sched();
743
Chris Redpath338ad2c2017-07-20 16:34:10 +0100744 if (!policy->fast_switch_enabled) {
745 irq_work_sync(&sg_policy->irq_work);
746 kthread_cancel_work_sync(&sg_policy->work);
747 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200748}
749
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200750static void sugov_limits(struct cpufreq_policy *policy)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200751{
752 struct sugov_policy *sg_policy = policy->governor_data;
753
754 if (!policy->fast_switch_enabled) {
755 mutex_lock(&sg_policy->work_lock);
Viresh Kumarbf2be2d2016-05-18 17:55:31 +0530756 cpufreq_policy_apply_limits(policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200757 mutex_unlock(&sg_policy->work_lock);
758 }
759
760 sg_policy->need_freq_update = true;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200761}
762
763static struct cpufreq_governor schedutil_gov = {
764 .name = "schedutil",
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200765 .owner = THIS_MODULE,
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200766 .init = sugov_init,
767 .exit = sugov_exit,
768 .start = sugov_start,
769 .stop = sugov_stop,
770 .limits = sugov_limits,
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200771};
772
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200773#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
774struct cpufreq_governor *cpufreq_default_governor(void)
775{
776 return &schedutil_gov;
777}
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200778#endif
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200779
780static int __init sugov_register(void)
781{
782 return cpufreq_register_governor(&schedutil_gov);
783}
784fs_initcall(sugov_register);