blob: 627924b4cf4da138e64ba37b999a1870f9761318 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
Junjie Wuaceecc062015-09-18 18:13:01 -070030#include <linux/hrtimer.h>
Mike Chanef969692010-06-22 11:26:45 -070031#include <linux/workqueue.h>
32#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070033#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070034
Todd Poynorae010472012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Stephen Boyd1c2271f2017-03-20 18:57:28 -070038static DEFINE_PER_CPU(struct update_util_data, update_util);
39
Junjie Wucf531ef2015-04-17 12:48:36 -070040struct cpufreq_interactive_policyinfo {
Stephen Boyd1c2271f2017-03-20 18:57:28 -070041 bool work_in_progress;
42 struct irq_work irq_work;
43 spinlock_t irq_work_lock; /* protects work_in_progress */
Junjie Wucf531ef2015-04-17 12:48:36 -070044 struct timer_list policy_slack_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -070045 struct hrtimer notif_timer;
Junjie Wucf531ef2015-04-17 12:48:36 -070046 spinlock_t load_lock; /* protects load tracking stat */
Junjie Wu6b974ed2014-04-28 15:11:47 -070047 u64 last_evaluated_jiffy;
Mike Chanef969692010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
Saravana Kannan433ed992014-08-14 18:29:45 -070049 struct cpufreq_policy p_nolim; /* policy copy with no limits */
Mike Chanef969692010-06-22 11:26:45 -070050 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070051 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070052 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070053 unsigned int floor_freq;
Junjie Wu1d868952015-03-27 11:44:21 -070054 unsigned int min_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -070055 u64 floor_validate_time;
56 u64 hispeed_validate_time;
57 u64 max_freq_hyst_start_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080058 struct rw_semaphore enable_sem;
Junjie Wu82f08032014-12-09 13:20:26 -080059 bool reject_notification;
Junjie Wu506bfb02015-09-23 12:00:33 -070060 bool notif_pending;
Junjie Wuaceecc062015-09-18 18:13:01 -070061 unsigned long notif_cpu;
Mike Chanef969692010-06-22 11:26:45 -070062 int governor_enabled;
Junjie Wu13c6a762014-08-07 18:04:13 -070063 struct cpufreq_interactive_tunables *cached_tunables;
Joonwoo Park22d94972015-09-15 09:35:53 -070064 struct sched_load *sl;
Mike Chanef969692010-06-22 11:26:45 -070065};
66
Junjie Wucf531ef2015-04-17 12:48:36 -070067/* Protected by per-policy load_lock */
68struct cpufreq_interactive_cpuinfo {
69 u64 time_in_idle;
70 u64 time_in_idle_timestamp;
71 u64 cputime_speedadj;
72 u64 cputime_speedadj_timestamp;
73 unsigned int loadadjfreq;
74};
75
76static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
Mike Chanef969692010-06-22 11:26:45 -070077static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
78
Todd Poynor0f1920b2012-07-16 17:07:15 -070079/* realtime thread handles frequency scaling */
80static struct task_struct *speedchange_task;
81static cpumask_t speedchange_cpumask;
82static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080083static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070084
Junjie Wu4344ea32014-04-28 16:22:24 -070085static int set_window_count;
86static int migration_register_count;
87static struct mutex sched_lock;
Junjie Wud6f5e522015-07-29 18:22:21 -070088static cpumask_t controlled_cpus;
Junjie Wu4344ea32014-04-28 16:22:24 -070089
Todd Poynor8d2d93f2012-11-28 17:58:17 -080090/* Target load. Lower values result in higher CPU speeds. */
91#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080092static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080093
Todd Poynora380aa82012-04-17 17:39:34 -070094#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070095#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090096static unsigned int default_above_hispeed_delay[] = {
97 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070098
Viresh Kumar17d15c42013-05-16 14:58:54 +053099struct cpufreq_interactive_tunables {
100 int usage_count;
101 /* Hi speed to bump to from lo speed when load burst (default max) */
102 unsigned int hispeed_freq;
103 /* Go to hi speed when CPU load at or above this value. */
104#define DEFAULT_GO_HISPEED_LOAD 99
105 unsigned long go_hispeed_load;
106 /* Target load. Lower values result in higher CPU speeds. */
107 spinlock_t target_loads_lock;
108 unsigned int *target_loads;
109 int ntarget_loads;
110 /*
111 * The minimum amount of time to spend at a frequency before we can ramp
112 * down.
113 */
114#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
115 unsigned long min_sample_time;
116 /*
117 * The sample rate of the timer used to increase frequency
118 */
119 unsigned long timer_rate;
120 /*
121 * Wait this long before raising speed above hispeed, by default a
122 * single timer interval.
123 */
124 spinlock_t above_hispeed_delay_lock;
125 unsigned int *above_hispeed_delay;
126 int nabove_hispeed_delay;
127 /* Non-zero means indefinite speed boost active */
128 int boost_val;
129 /* Duration of a boot pulse in usecs */
130 int boostpulse_duration_val;
131 /* End time of boost pulse in ktime converted to usecs */
132 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800133 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530134 /*
135 * Max additional time to wait in idle, beyond timer_rate, at speeds
136 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
137 */
Todd Poynor4add2592012-12-18 17:50:10 -0800138#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530139 int timer_slack_val;
140 bool io_is_busy;
Junjie Wu4344ea32014-04-28 16:22:24 -0700141
142 /* scheduler input related flags */
143 bool use_sched_load;
144 bool use_migration_notif;
Junjie Wue05d74e2014-08-29 14:12:52 -0700145
146 /*
Junjie Wu7ca999f2014-08-29 18:55:45 -0700147 * Whether to align timer windows across all CPUs. When
148 * use_sched_load is true, this flag is ignored and windows
149 * will always be aligned.
150 */
151 bool align_windows;
152
153 /*
Junjie Wue05d74e2014-08-29 14:12:52 -0700154 * Stay at max freq for at least max_freq_hysteresis before dropping
155 * frequency.
156 */
157 unsigned int max_freq_hysteresis;
Junjie Wu450c8572015-07-22 17:38:49 -0700158
Junjie Wu3381c4c2015-08-19 15:45:37 -0700159 /* Ignore hispeed_freq and above_hispeed_delay for notification */
160 bool ignore_hispeed_on_notif;
161
162 /* Ignore min_sample_time for notification */
Junjie Wu450c8572015-07-22 17:38:49 -0700163 bool fast_ramp_down;
Junjie Wu7c128602015-06-09 17:36:11 -0700164
165 /* Whether to enable prediction or not */
166 bool enable_prediction;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530167};
Lianwei Wangd72db422012-11-01 09:59:52 +0800168
Viresh Kumar17d15c42013-05-16 14:58:54 +0530169/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700170static struct cpufreq_interactive_tunables *common_tunables;
Junjie Wucf531ef2015-04-17 12:48:36 -0700171static struct cpufreq_interactive_tunables *cached_common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530172
173static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800174
Junjie Wu6b974ed2014-04-28 15:11:47 -0700175/* Round to starting jiffy of next evaluation window */
176static u64 round_to_nw_start(u64 jif,
177 struct cpufreq_interactive_tunables *tunables)
178{
179 unsigned long step = usecs_to_jiffies(tunables->timer_rate);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700180 u64 ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700181
Junjie Wu7ca999f2014-08-29 18:55:45 -0700182 if (tunables->use_sched_load || tunables->align_windows) {
183 do_div(jif, step);
184 ret = (jif + 1) * step;
185 } else {
186 ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
187 }
188
189 return ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700190}
191
Junjie Wu4344ea32014-04-28 16:22:24 -0700192static inline int set_window_helper(
193 struct cpufreq_interactive_tunables *tunables)
194{
195 return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
196 usecs_to_jiffies(tunables->timer_rate));
197}
198
Junjie Wu1d868952015-03-27 11:44:21 -0700199static void cpufreq_interactive_timer_resched(unsigned long cpu,
200 bool slack_only)
Todd Poynor8eccd412012-10-08 20:14:34 -0700201{
Junjie Wucf531ef2015-04-17 12:48:36 -0700202 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
203 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530204 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700205 ppol->policy->governor_data;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700206 u64 expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800207 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700208 int i;
Todd Poynor4add2592012-12-18 17:50:10 -0800209
Junjie Wucf531ef2015-04-17 12:48:36 -0700210 spin_lock_irqsave(&ppol->load_lock, flags);
Junjie Wua26c0702015-07-20 10:20:08 -0700211 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Junjie Wu1d868952015-03-27 11:44:21 -0700212 if (!slack_only) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700213 for_each_cpu(i, ppol->policy->cpus) {
214 pcpu = &per_cpu(cpuinfo, i);
215 pcpu->time_in_idle = get_cpu_idle_time(i,
216 &pcpu->time_in_idle_timestamp,
217 tunables->io_is_busy);
218 pcpu->cputime_speedadj = 0;
219 pcpu->cputime_speedadj_timestamp =
220 pcpu->time_in_idle_timestamp;
221 }
Junjie Wu1d868952015-03-27 11:44:21 -0700222 }
Todd Poynor4e25bf92013-04-05 13:25:21 -0700223
Viresh Kumar17d15c42013-05-16 14:58:54 +0530224 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700225 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530226 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700227 del_timer(&ppol->policy_slack_timer);
228 ppol->policy_slack_timer.expires = expires;
229 add_timer(&ppol->policy_slack_timer);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700230 }
231
Junjie Wucf531ef2015-04-17 12:48:36 -0700232 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700233}
234
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700235static void update_util_handler(struct update_util_data *data, u64 time,
236 unsigned int sched_flags)
237{
238 struct cpufreq_interactive_policyinfo *ppol;
239 unsigned long flags;
240
241 ppol = *this_cpu_ptr(&polinfo);
242 spin_lock_irqsave(&ppol->irq_work_lock, flags);
243 /*
244 * The irq-work may not be allowed to be queued up right now
245 * because work has already been queued up or is in progress.
246 */
247 if (ppol->work_in_progress ||
248 sched_flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)
249 goto out;
250
251 ppol->work_in_progress = true;
252 irq_work_queue(&ppol->irq_work);
253out:
254 spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
255}
256
257static inline void gov_clear_update_util(struct cpufreq_policy *policy)
258{
259 int i;
260
261 for_each_cpu(i, policy->cpus)
262 cpufreq_remove_update_util_hook(i);
263
264 synchronize_sched();
265}
266
267static void gov_set_update_util(struct cpufreq_policy *policy)
268{
269 struct update_util_data *util;
270 int cpu;
271
272 for_each_cpu(cpu, policy->cpus) {
273 util = &per_cpu(update_util, cpu);
274 cpufreq_add_update_util_hook(cpu, util, update_util_handler);
275 }
276}
277
Lianwei Wang90c6c152013-04-26 13:30:51 +0800278/* The caller shall take enable_sem write semaphore to avoid any timer race.
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700279 * The policy_slack_timer must be deactivated when calling this function.
Lianwei Wang90c6c152013-04-26 13:30:51 +0800280 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530281static void cpufreq_interactive_timer_start(
282 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800283{
Junjie Wucf531ef2015-04-17 12:48:36 -0700284 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
285 struct cpufreq_interactive_cpuinfo *pcpu;
286 u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800287 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700288 int i;
Lianwei Wang90c6c152013-04-26 13:30:51 +0800289
Junjie Wucf531ef2015-04-17 12:48:36 -0700290 spin_lock_irqsave(&ppol->load_lock, flags);
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700291 gov_set_update_util(ppol->policy);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530292 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700293 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530294 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700295 ppol->policy_slack_timer.expires = expires;
296 add_timer(&ppol->policy_slack_timer);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800297 }
298
Junjie Wucf531ef2015-04-17 12:48:36 -0700299 for_each_cpu(i, ppol->policy->cpus) {
300 pcpu = &per_cpu(cpuinfo, i);
301 pcpu->time_in_idle =
302 get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
303 tunables->io_is_busy);
304 pcpu->cputime_speedadj = 0;
305 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
306 }
307 spin_unlock_irqrestore(&ppol->load_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800308}
309
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700310
Viresh Kumar17d15c42013-05-16 14:58:54 +0530311static unsigned int freq_to_above_hispeed_delay(
312 struct cpufreq_interactive_tunables *tunables,
313 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900314{
315 int i;
316 unsigned int ret;
317 unsigned long flags;
318
Viresh Kumar17d15c42013-05-16 14:58:54 +0530319 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900320
Viresh Kumar17d15c42013-05-16 14:58:54 +0530321 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
322 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900323 ;
324
Viresh Kumar17d15c42013-05-16 14:58:54 +0530325 ret = tunables->above_hispeed_delay[i];
326 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900327 return ret;
328}
329
Viresh Kumar17d15c42013-05-16 14:58:54 +0530330static unsigned int freq_to_targetload(
331 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800332{
333 int i;
334 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800335 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800336
Viresh Kumar17d15c42013-05-16 14:58:54 +0530337 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800338
Viresh Kumar17d15c42013-05-16 14:58:54 +0530339 for (i = 0; i < tunables->ntarget_loads - 1 &&
340 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800341 ;
342
Viresh Kumar17d15c42013-05-16 14:58:54 +0530343 ret = tunables->target_loads[i];
344 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800345 return ret;
346}
347
Junjie Wud6f5e522015-07-29 18:22:21 -0700348#define DEFAULT_MAX_LOAD 100
349u32 get_freq_max_load(int cpu, unsigned int freq)
350{
351 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
352
353 if (!cpumask_test_cpu(cpu, &controlled_cpus))
354 return DEFAULT_MAX_LOAD;
355
356 if (have_governor_per_policy()) {
357 if (!ppol || !ppol->cached_tunables)
358 return DEFAULT_MAX_LOAD;
359 return freq_to_targetload(ppol->cached_tunables, freq);
360 }
361
362 if (!cached_common_tunables)
363 return DEFAULT_MAX_LOAD;
364 return freq_to_targetload(cached_common_tunables, freq);
365}
366
Todd Poynore9c60742012-11-14 11:41:21 -0800367/*
368 * If increasing frequencies never map to a lower target load then
369 * choose_freq() will find the minimum frequency that does not exceed its
370 * target load given the current load.
371 */
Junjie Wucf531ef2015-04-17 12:48:36 -0700372static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
Viresh Kumar17d15c42013-05-16 14:58:54 +0530373 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800374{
375 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800376 unsigned int prevfreq, freqmin, freqmax;
377 unsigned int tl;
378 int index;
379
380 freqmin = 0;
381 freqmax = UINT_MAX;
382
383 do {
384 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530385 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800386
387 /*
388 * Find the lowest frequency where the computed load is less
389 * than or equal to the target load.
390 */
391
Stephen Boyd9a864832017-03-13 16:49:15 -0700392 index = cpufreq_frequency_table_target(&pcpu->p_nolim,
393 loadadjfreq / tl,
394 CPUFREQ_RELATION_L);
Todd Poynore9c60742012-11-14 11:41:21 -0800395 freq = pcpu->freq_table[index].frequency;
396
397 if (freq > prevfreq) {
398 /* The previous frequency is too low. */
399 freqmin = prevfreq;
400
401 if (freq >= freqmax) {
402 /*
403 * Find the highest frequency that is less
404 * than freqmax.
405 */
Stephen Boyd9a864832017-03-13 16:49:15 -0700406 index = cpufreq_frequency_table_target(
407 &pcpu->p_nolim,
408 freqmax - 1, CPUFREQ_RELATION_H);
Todd Poynore9c60742012-11-14 11:41:21 -0800409 freq = pcpu->freq_table[index].frequency;
410
411 if (freq == freqmin) {
412 /*
413 * The first frequency below freqmax
414 * has already been found to be too
415 * low. freqmax is the lowest speed
416 * we found that is fast enough.
417 */
418 freq = freqmax;
419 break;
420 }
421 }
422 } else if (freq < prevfreq) {
423 /* The previous frequency is high enough. */
424 freqmax = prevfreq;
425
426 if (freq <= freqmin) {
427 /*
428 * Find the lowest frequency that is higher
429 * than freqmin.
430 */
Stephen Boyd9a864832017-03-13 16:49:15 -0700431 index = cpufreq_frequency_table_target(
432 &pcpu->p_nolim,
433 freqmin + 1, CPUFREQ_RELATION_L);
Todd Poynore9c60742012-11-14 11:41:21 -0800434 freq = pcpu->freq_table[index].frequency;
435
436 /*
437 * If freqmax is the first frequency above
438 * freqmin then we have already found that
439 * this speed is fast enough.
440 */
441 if (freq == freqmax)
442 break;
443 }
444 }
445
446 /* If same frequency chosen as previous then done. */
447 } while (freq != prevfreq);
448
449 return freq;
450}
451
Todd Poynor0e58da22012-12-11 16:05:03 -0800452static u64 update_load(int cpu)
453{
Junjie Wucf531ef2015-04-17 12:48:36 -0700454 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Todd Poynor0e58da22012-12-11 16:05:03 -0800455 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530456 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700457 ppol->policy->governor_data;
Chris Redpath682c1e32013-06-17 18:36:56 +0100458 u64 now_idle, now, active_time, delta_idle, delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800459
Viresh Kumar17d15c42013-05-16 14:58:54 +0530460 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Kyle Yane2486b72017-08-25 14:36:53 -0700461 delta_idle = (now_idle - pcpu->time_in_idle);
462 delta_time = (now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900463
464 if (delta_time <= delta_idle)
465 active_time = 0;
466 else
467 active_time = delta_time - delta_idle;
468
Junjie Wucf531ef2015-04-17 12:48:36 -0700469 pcpu->cputime_speedadj += active_time * ppol->policy->cur;
Todd Poynor0e58da22012-12-11 16:05:03 -0800470
471 pcpu->time_in_idle = now_idle;
472 pcpu->time_in_idle_timestamp = now;
473 return now;
474}
475
Junjie Wu7c128602015-06-09 17:36:11 -0700476static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
477 unsigned long busy)
478{
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530479 int prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700480 struct cpufreq_interactive_tunables *tunables =
481 ppol->policy->governor_data;
482
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530483 prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
484 busy, tunables->timer_rate);
485 return prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700486}
487
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700488#define NEW_TASK_RATIO 75
Junjie Wu7c128602015-06-09 17:36:11 -0700489#define PRED_TOLERANCE_PCT 10
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700490static void cpufreq_interactive_timer(int data)
Mike Chanef969692010-06-22 11:26:45 -0700491{
Junjie Wu7c128602015-06-09 17:36:11 -0700492 s64 now;
Mike Chanef969692010-06-22 11:26:45 -0700493 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800494 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700495 int cpu_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700496 int pol_load = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700497 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530498 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700499 ppol->policy->governor_data;
Junjie Wu7c128602015-06-09 17:36:11 -0700500 struct sched_load *sl = ppol->sl;
Junjie Wucf531ef2015-04-17 12:48:36 -0700501 struct cpufreq_interactive_cpuinfo *pcpu;
Mike Chanef969692010-06-22 11:26:45 -0700502 unsigned int new_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700503 unsigned int prev_laf = 0, t_prevlaf;
504 unsigned int pred_laf = 0, t_predlaf = 0;
505 unsigned int prev_chfreq, pred_chfreq, chosen_freq;
Mike Chanef969692010-06-22 11:26:45 -0700506 unsigned int index;
507 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700508 unsigned long max_cpu;
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700509 int i, cpu;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700510 int new_load_pct = 0;
Junjie Wu7c128602015-06-09 17:36:11 -0700511 int prev_l, pred_l = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700512 struct cpufreq_govinfo govinfo;
Junjie Wud8a5e842015-08-26 17:47:21 -0700513 bool skip_hispeed_logic, skip_min_sample_time;
Junjie Wu7c128602015-06-09 17:36:11 -0700514 bool jump_to_max_no_ts = false;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700515 bool jump_to_max = false;
Mike Chanef969692010-06-22 11:26:45 -0700516
Junjie Wucf531ef2015-04-17 12:48:36 -0700517 if (!down_read_trylock(&ppol->enable_sem))
Todd Poynor5cad6092012-12-18 17:50:44 -0800518 return;
Junjie Wucf531ef2015-04-17 12:48:36 -0700519 if (!ppol->governor_enabled)
Mike Chanef969692010-06-22 11:26:45 -0700520 goto exit;
521
Junjie Wucf531ef2015-04-17 12:48:36 -0700522 now = ktime_to_us(ktime_get());
Junjie Wu506bfb02015-09-23 12:00:33 -0700523
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700524 spin_lock_irqsave(&ppol->target_freq_lock, flags);
525 spin_lock(&ppol->load_lock);
Junjie Wu506bfb02015-09-23 12:00:33 -0700526
Rohit Guptab56dc4d2016-12-05 17:36:49 -0800527 skip_hispeed_logic =
Junjie Wu7c128602015-06-09 17:36:11 -0700528 tunables->ignore_hispeed_on_notif && ppol->notif_pending;
Junjie Wu506bfb02015-09-23 12:00:33 -0700529 skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
530 ppol->notif_pending = false;
Junjie Wu7c128602015-06-09 17:36:11 -0700531 now = ktime_to_us(ktime_get());
Junjie Wucf531ef2015-04-17 12:48:36 -0700532 ppol->last_evaluated_jiffy = get_jiffies_64();
533
Junjie Wufef75c02015-05-26 17:54:38 -0700534 if (tunables->use_sched_load)
Junjie Wu7c128602015-06-09 17:36:11 -0700535 sched_get_cpus_busy(sl, ppol->policy->cpus);
Junjie Wucf531ef2015-04-17 12:48:36 -0700536 max_cpu = cpumask_first(ppol->policy->cpus);
Junjie Wu7c128602015-06-09 17:36:11 -0700537 i = 0;
538 for_each_cpu(cpu, ppol->policy->cpus) {
539 pcpu = &per_cpu(cpuinfo, cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700540 if (tunables->use_sched_load) {
Junjie Wu7c128602015-06-09 17:36:11 -0700541 t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
542 prev_l = t_prevlaf / ppol->target_freq;
543 if (tunables->enable_prediction) {
544 t_predlaf = sl_busy_to_laf(ppol,
545 sl[i].predicted_load);
546 pred_l = t_predlaf / ppol->target_freq;
547 }
548 if (sl[i].prev_load)
549 new_load_pct = sl[i].new_task_load * 100 /
550 sl[i].prev_load;
551 else
552 new_load_pct = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700553 } else {
Junjie Wu7c128602015-06-09 17:36:11 -0700554 now = update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700555 delta_time = (unsigned int)
Junjie Wu4344ea32014-04-28 16:22:24 -0700556 (now - pcpu->cputime_speedadj_timestamp);
Junjie Wucf531ef2015-04-17 12:48:36 -0700557 if (WARN_ON_ONCE(!delta_time))
558 continue;
559 cputime_speedadj = pcpu->cputime_speedadj;
560 do_div(cputime_speedadj, delta_time);
Junjie Wu7c128602015-06-09 17:36:11 -0700561 t_prevlaf = (unsigned int)cputime_speedadj * 100;
562 prev_l = t_prevlaf / ppol->target_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -0700563 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700564
Junjie Wu7c128602015-06-09 17:36:11 -0700565 /* find max of loadadjfreq inside policy */
566 if (t_prevlaf > prev_laf) {
567 prev_laf = t_prevlaf;
568 max_cpu = cpu;
Junjie Wucf531ef2015-04-17 12:48:36 -0700569 }
Junjie Wu7c128602015-06-09 17:36:11 -0700570 pred_laf = max(t_predlaf, pred_laf);
Junjie Wucf531ef2015-04-17 12:48:36 -0700571
Junjie Wu7c128602015-06-09 17:36:11 -0700572 cpu_load = max(prev_l, pred_l);
573 pol_load = max(pol_load, cpu_load);
574 trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
575 prev_l, pred_l);
576
577 /* save loadadjfreq for notification */
578 pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
579
580 /* detect heavy new task and jump to policy->max */
581 if (prev_l >= tunables->go_hispeed_load &&
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700582 new_load_pct >= NEW_TASK_RATIO) {
583 skip_hispeed_logic = true;
584 jump_to_max = true;
585 }
Junjie Wu7c128602015-06-09 17:36:11 -0700586 i++;
Junjie Wu4344ea32014-04-28 16:22:24 -0700587 }
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700588 spin_unlock(&ppol->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700589
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800590 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700591
Junjie Wu7c128602015-06-09 17:36:11 -0700592 prev_chfreq = choose_freq(ppol, prev_laf);
593 pred_chfreq = choose_freq(ppol, pred_laf);
594 chosen_freq = max(prev_chfreq, pred_chfreq);
595
596 if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
597 if (!jump_to_max)
598 jump_to_max_no_ts = true;
599
Junjie Wud8a5e842015-08-26 17:47:21 -0700600 if (now - ppol->max_freq_hyst_start_time <
601 tunables->max_freq_hysteresis &&
Junjie Wu7c128602015-06-09 17:36:11 -0700602 pol_load >= tunables->go_hispeed_load &&
Junjie Wud8a5e842015-08-26 17:47:21 -0700603 ppol->target_freq < ppol->policy->max) {
604 skip_hispeed_logic = true;
605 skip_min_sample_time = true;
Junjie Wu7c128602015-06-09 17:36:11 -0700606 if (!jump_to_max)
607 jump_to_max_no_ts = true;
Junjie Wud8a5e842015-08-26 17:47:21 -0700608 }
609
Junjie Wu7c128602015-06-09 17:36:11 -0700610 new_freq = chosen_freq;
611 if (jump_to_max_no_ts || jump_to_max) {
Saravana Kannan433ed992014-08-14 18:29:45 -0700612 new_freq = ppol->policy->cpuinfo.max_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700613 } else if (!skip_hispeed_logic) {
614 if (pol_load >= tunables->go_hispeed_load ||
615 tunables->boosted) {
616 if (ppol->target_freq < tunables->hispeed_freq)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530617 new_freq = tunables->hispeed_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700618 else
619 new_freq = max(new_freq,
620 tunables->hispeed_freq);
Todd Poynor2b660492012-12-19 16:06:48 -0800621 }
Todd Poynor2b660492012-12-19 16:06:48 -0800622 }
Todd Poynor131ff022012-11-08 15:06:55 -0800623
Junjie Wud8a5e842015-08-26 17:47:21 -0700624 if (now - ppol->max_freq_hyst_start_time <
625 tunables->max_freq_hysteresis)
626 new_freq = max(tunables->hispeed_freq, new_freq);
627
628 if (!skip_hispeed_logic &&
Junjie Wu51042d32015-08-17 16:02:55 -0700629 ppol->target_freq >= tunables->hispeed_freq &&
630 new_freq > ppol->target_freq &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700631 now - ppol->hispeed_validate_time <
Junjie Wu51042d32015-08-17 16:02:55 -0700632 freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800633 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700634 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700635 ppol->policy->cur, new_freq);
636 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800637 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700638 }
639
Junjie Wucf531ef2015-04-17 12:48:36 -0700640 ppol->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700641
Stephen Boyd9a864832017-03-13 16:49:15 -0700642 index = cpufreq_frequency_table_target(&ppol->p_nolim, new_freq,
643 CPUFREQ_RELATION_L);
Junjie Wucf531ef2015-04-17 12:48:36 -0700644 new_freq = ppol->freq_table[index].frequency;
Mike Chanef969692010-06-22 11:26:45 -0700645
Mike Chanef969692010-06-22 11:26:45 -0700646 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700647 * Do not scale below floor_freq unless we have been at or above the
648 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700649 */
Junjie Wud8a5e842015-08-26 17:47:21 -0700650 if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700651 if (now - ppol->floor_validate_time <
652 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800653 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700654 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700655 ppol->policy->cur, new_freq);
656 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700657 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800658 }
Mike Chanef969692010-06-22 11:26:45 -0700659 }
660
Todd Poynore16d5922012-12-14 17:31:19 -0800661 /*
662 * Update the timestamp for checking whether speed has been held at
663 * or above the selected frequency for a minimum of min_sample_time,
664 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
665 * allow the speed to drop as soon as the boostpulse duration expires
Junjie Wud8a5e842015-08-26 17:47:21 -0700666 * (or the indefinite boost is turned off). If policy->max is restored
667 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
668 * could incorrectly extended the duration of max_freq_hysteresis by
669 * min_sample_time.
Todd Poynore16d5922012-12-14 17:31:19 -0800670 */
671
Junjie Wud8a5e842015-08-26 17:47:21 -0700672 if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
Junjie Wu7c128602015-06-09 17:36:11 -0700673 && !jump_to_max_no_ts) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700674 ppol->floor_freq = new_freq;
675 ppol->floor_validate_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800676 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700677
Junjie Wu7c128602015-06-09 17:36:11 -0700678 if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
Junjie Wucf531ef2015-04-17 12:48:36 -0700679 ppol->max_freq_hyst_start_time = now;
680
681 if (ppol->target_freq == new_freq &&
682 ppol->target_freq <= ppol->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800683 trace_cpufreq_interactive_already(
Junjie Wu7c128602015-06-09 17:36:11 -0700684 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700685 ppol->policy->cur, new_freq);
686 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800687 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700688 }
689
Junjie Wu7c128602015-06-09 17:36:11 -0700690 trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700691 ppol->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800692
Junjie Wucf531ef2015-04-17 12:48:36 -0700693 ppol->target_freq = new_freq;
694 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700695 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Junjie Wucf531ef2015-04-17 12:48:36 -0700696 cpumask_set_cpu(max_cpu, &speedchange_cpumask);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700697 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Puja Gupta487dec62017-06-27 10:13:50 -0700698
699 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700700
Mike Chanef969692010-06-22 11:26:45 -0700701rearm:
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700702 cpufreq_interactive_timer_resched(data, false);
Mike Chanef969692010-06-22 11:26:45 -0700703
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700704 /*
705 * Send govinfo notification.
706 * Govinfo notification could potentially wake up another thread
707 * managed by its clients. Thread wakeups might trigger a load
708 * change callback that executes this function again. Therefore
709 * no spinlock could be held when sending the notification.
710 */
711 for_each_cpu(i, ppol->policy->cpus) {
712 pcpu = &per_cpu(cpuinfo, i);
713 govinfo.cpu = i;
714 govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
715 govinfo.sampling_rate_us = tunables->timer_rate;
716 atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
717 CPUFREQ_LOAD_CHANGE, &govinfo);
718 }
719
Mike Chanef969692010-06-22 11:26:45 -0700720exit:
Junjie Wucf531ef2015-04-17 12:48:36 -0700721 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700722 return;
723}
724
Todd Poynor0f1920b2012-07-16 17:07:15 -0700725static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700726{
727 unsigned int cpu;
728 cpumask_t tmp_mask;
729 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700730 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -0700731
732 while (1) {
733 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700734 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700735
Todd Poynor0f1920b2012-07-16 17:07:15 -0700736 if (cpumask_empty(&speedchange_cpumask)) {
737 spin_unlock_irqrestore(&speedchange_cpumask_lock,
738 flags);
Mike Chanef969692010-06-22 11:26:45 -0700739 schedule();
740
741 if (kthread_should_stop())
742 break;
743
Todd Poynor0f1920b2012-07-16 17:07:15 -0700744 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700745 }
746
747 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700748 tmp_mask = speedchange_cpumask;
749 cpumask_clear(&speedchange_cpumask);
750 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700751
752 for_each_cpu(cpu, &tmp_mask) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700753 ppol = per_cpu(polinfo, cpu);
754 if (!down_read_trylock(&ppol->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700755 continue;
Junjie Wucf531ef2015-04-17 12:48:36 -0700756 if (!ppol->governor_enabled) {
757 up_read(&ppol->enable_sem);
Todd Poynor5cad6092012-12-18 17:50:44 -0800758 continue;
759 }
Mike Chanef969692010-06-22 11:26:45 -0700760
Junjie Wucf531ef2015-04-17 12:48:36 -0700761 if (ppol->target_freq != ppol->policy->cur)
762 __cpufreq_driver_target(ppol->policy,
763 ppol->target_freq,
Mike Chanef969692010-06-22 11:26:45 -0700764 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700765 trace_cpufreq_interactive_setspeed(cpu,
Junjie Wucf531ef2015-04-17 12:48:36 -0700766 ppol->target_freq,
767 ppol->policy->cur);
768 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700769 }
770 }
771
772 return 0;
773}
774
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800775static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700776{
777 int i;
778 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700779 unsigned long flags[2];
Junjie Wucf531ef2015-04-17 12:48:36 -0700780 struct cpufreq_interactive_policyinfo *ppol;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800781
782 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700783
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700784 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700785
786 for_each_online_cpu(i) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700787 ppol = per_cpu(polinfo, i);
788 if (!ppol || tunables != ppol->policy->governor_data)
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800789 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700790
Junjie Wucf531ef2015-04-17 12:48:36 -0700791 spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
792 if (ppol->target_freq < tunables->hispeed_freq) {
793 ppol->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700794 cpumask_set_cpu(i, &speedchange_cpumask);
Junjie Wucf531ef2015-04-17 12:48:36 -0700795 ppol->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800796 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700797 anyboost = 1;
798 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700799
800 /*
801 * Set floor freq and (re)start timer for when last
802 * validated.
803 */
804
805 ppol->floor_freq = tunables->hispeed_freq;
806 ppol->floor_validate_time = ktime_to_us(ktime_get());
807 spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
808 break;
Todd Poynorab8dc402012-04-02 17:17:14 -0700809 }
810
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700811 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700812
813 if (anyboost)
Puja Gupta487dec62017-06-27 10:13:50 -0700814 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700815}
816
Junjie Wu4344ea32014-04-28 16:22:24 -0700817static int load_change_callback(struct notifier_block *nb, unsigned long val,
818 void *data)
819{
820 unsigned long cpu = (unsigned long) data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700821 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Junjie Wu4344ea32014-04-28 16:22:24 -0700822 struct cpufreq_interactive_tunables *tunables;
Junjie Wu506bfb02015-09-23 12:00:33 -0700823 unsigned long flags;
Junjie Wu4344ea32014-04-28 16:22:24 -0700824
Junjie Wucf531ef2015-04-17 12:48:36 -0700825 if (!ppol || ppol->reject_notification)
Junjie Wu82f08032014-12-09 13:20:26 -0800826 return 0;
827
Junjie Wucf531ef2015-04-17 12:48:36 -0700828 if (!down_read_trylock(&ppol->enable_sem))
Junjie Wu18e7fd22014-09-17 18:51:41 -0700829 return 0;
Junjie Wuaceecc062015-09-18 18:13:01 -0700830 if (!ppol->governor_enabled)
831 goto exit;
832
833 tunables = ppol->policy->governor_data;
834 if (!tunables->use_sched_load || !tunables->use_migration_notif)
835 goto exit;
836
837 spin_lock_irqsave(&ppol->target_freq_lock, flags);
838 ppol->notif_pending = true;
839 ppol->notif_cpu = cpu;
840 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
841
842 if (!hrtimer_is_queued(&ppol->notif_timer))
843 hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
844 HRTIMER_MODE_REL);
845exit:
846 up_read(&ppol->enable_sem);
847 return 0;
848}
849
850static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
851{
852 struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
853 struct cpufreq_interactive_policyinfo, notif_timer);
854 int cpu;
855
856 if (!down_read_trylock(&ppol->enable_sem))
857 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700858 if (!ppol->governor_enabled) {
859 up_read(&ppol->enable_sem);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700860 return 0;
861 }
Junjie Wuaceecc062015-09-18 18:13:01 -0700862 cpu = ppol->notif_cpu;
Junjie Wu18e7fd22014-09-17 18:51:41 -0700863 trace_cpufreq_interactive_load_change(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700864 del_timer(&ppol->policy_slack_timer);
Junjie Wu506bfb02015-09-23 12:00:33 -0700865 cpufreq_interactive_timer(cpu);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700866
Junjie Wucf531ef2015-04-17 12:48:36 -0700867 up_read(&ppol->enable_sem);
Junjie Wuaceecc062015-09-18 18:13:01 -0700868 return HRTIMER_NORESTART;
Junjie Wu4344ea32014-04-28 16:22:24 -0700869}
870
871static struct notifier_block load_notifier_block = {
872 .notifier_call = load_change_callback,
873};
874
Todd Poynor0e58da22012-12-11 16:05:03 -0800875static int cpufreq_interactive_notifier(
876 struct notifier_block *nb, unsigned long val, void *data)
877{
878 struct cpufreq_freqs *freq = data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700879 struct cpufreq_interactive_policyinfo *ppol;
Todd Poynor0e58da22012-12-11 16:05:03 -0800880 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800881 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800882
883 if (val == CPUFREQ_POSTCHANGE) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700884 ppol = per_cpu(polinfo, freq->cpu);
885 if (!ppol)
Todd Poynor34974c32012-12-23 12:28:49 -0800886 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700887 if (!down_read_trylock(&ppol->enable_sem))
888 return 0;
889 if (!ppol->governor_enabled) {
890 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800891 return 0;
892 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800893
Junjie Wucf531ef2015-04-17 12:48:36 -0700894 if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
895 up_read(&ppol->enable_sem);
896 return 0;
897 }
898 spin_lock_irqsave(&ppol->load_lock, flags);
899 for_each_cpu(cpu, ppol->policy->cpus)
Todd Poynor0e58da22012-12-11 16:05:03 -0800900 update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700901 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800902
Junjie Wucf531ef2015-04-17 12:48:36 -0700903 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800904 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800905 return 0;
906}
907
908static struct notifier_block cpufreq_notifier_block = {
909 .notifier_call = cpufreq_interactive_notifier,
910};
911
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900912static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
913{
914 const char *cp;
915 int i;
916 int ntokens = 1;
917 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700918 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900919
920 cp = buf;
921 while ((cp = strpbrk(cp + 1, " :")))
922 ntokens++;
923
Todd Poynor233dfa02013-03-20 15:40:46 -0700924 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900925 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900926
927 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
928 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700929 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900930 goto err;
931 }
932
933 cp = buf;
934 i = 0;
935 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700936 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900937 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900938
939 cp = strpbrk(cp, " :");
940 if (!cp)
941 break;
942 cp++;
943 }
944
Todd Poynor233dfa02013-03-20 15:40:46 -0700945 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900946 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900947
948 *num_tokens = ntokens;
949 return tokenized_data;
950
951err_kfree:
952 kfree(tokenized_data);
953err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700954 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900955}
956
Todd Poynore9c60742012-11-14 11:41:21 -0800957static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530958 struct cpufreq_interactive_tunables *tunables,
959 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800960{
Todd Poynore9c60742012-11-14 11:41:21 -0800961 int i;
962 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800963 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800964
Viresh Kumar17d15c42013-05-16 14:58:54 +0530965 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800966
Viresh Kumar17d15c42013-05-16 14:58:54 +0530967 for (i = 0; i < tunables->ntarget_loads; i++)
968 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800969 i & 0x1 ? ":" : " ");
970
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800971 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530972 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800973 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800974}
975
Todd Poynore9c60742012-11-14 11:41:21 -0800976static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530977 struct cpufreq_interactive_tunables *tunables,
978 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800979{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900980 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800981 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800982 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800983
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900984 new_target_loads = get_tokenized_data(buf, &ntokens);
985 if (IS_ERR(new_target_loads))
986 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800987
Viresh Kumar17d15c42013-05-16 14:58:54 +0530988 spin_lock_irqsave(&tunables->target_loads_lock, flags);
989 if (tunables->target_loads != default_target_loads)
990 kfree(tunables->target_loads);
991 tunables->target_loads = new_target_loads;
992 tunables->ntarget_loads = ntokens;
993 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Junjie Wud6f5e522015-07-29 18:22:21 -0700994
995 sched_update_freq_max_load(&controlled_cpus);
996
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800997 return count;
998}
999
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001000static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301001 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001002{
1003 int i;
1004 ssize_t ret = 0;
1005 unsigned long flags;
1006
Viresh Kumar17d15c42013-05-16 14:58:54 +05301007 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001008
Viresh Kumar17d15c42013-05-16 14:58:54 +05301009 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
1010 ret += sprintf(buf + ret, "%u%s",
1011 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001012 i & 0x1 ? ":" : " ");
1013
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +08001014 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +05301015 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001016 return ret;
1017}
1018
1019static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301020 struct cpufreq_interactive_tunables *tunables,
1021 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001022{
1023 int ntokens;
1024 unsigned int *new_above_hispeed_delay = NULL;
1025 unsigned long flags;
1026
1027 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
1028 if (IS_ERR(new_above_hispeed_delay))
1029 return PTR_RET(new_above_hispeed_delay);
1030
Viresh Kumar17d15c42013-05-16 14:58:54 +05301031 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
1032 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
1033 kfree(tunables->above_hispeed_delay);
1034 tunables->above_hispeed_delay = new_above_hispeed_delay;
1035 tunables->nabove_hispeed_delay = ntokens;
1036 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001037 return count;
1038
1039}
1040
Viresh Kumar17d15c42013-05-16 14:58:54 +05301041static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1042 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001043{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301044 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -07001045}
1046
Viresh Kumar17d15c42013-05-16 14:58:54 +05301047static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1048 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001049{
1050 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -07001051 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -07001052
Amit Pundircf076402015-11-03 20:53:29 +05301053 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001054 if (ret < 0)
1055 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301056 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -07001057 return count;
1058}
1059
Junjie Wue05d74e2014-08-29 14:12:52 -07001060#define show_store_one(file_name) \
1061static ssize_t show_##file_name( \
1062 struct cpufreq_interactive_tunables *tunables, char *buf) \
1063{ \
1064 return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
1065} \
1066static ssize_t store_##file_name( \
1067 struct cpufreq_interactive_tunables *tunables, \
1068 const char *buf, size_t count) \
1069{ \
1070 int ret; \
1071 unsigned long int val; \
1072 \
1073 ret = kstrtoul(buf, 0, &val); \
1074 if (ret < 0) \
1075 return ret; \
1076 tunables->file_name = val; \
1077 return count; \
1078}
1079show_store_one(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001080show_store_one(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001081show_store_one(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001082show_store_one(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001083show_store_one(enable_prediction);
Junjie Wue05d74e2014-08-29 14:12:52 -07001084
Viresh Kumar17d15c42013-05-16 14:58:54 +05301085static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
1086 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001087{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301088 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -07001089}
1090
Viresh Kumar17d15c42013-05-16 14:58:54 +05301091static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
1092 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001093{
1094 int ret;
1095 unsigned long val;
1096
Amit Pundircf076402015-11-03 20:53:29 +05301097 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001098 if (ret < 0)
1099 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301100 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -07001101 return count;
1102}
1103
Viresh Kumar17d15c42013-05-16 14:58:54 +05301104static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
1105 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001106{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301107 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -07001108}
1109
Viresh Kumar17d15c42013-05-16 14:58:54 +05301110static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
1111 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001112{
1113 int ret;
1114 unsigned long val;
1115
Amit Pundircf076402015-11-03 20:53:29 +05301116 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001117 if (ret < 0)
1118 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301119 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -07001120 return count;
1121}
1122
Viresh Kumar17d15c42013-05-16 14:58:54 +05301123static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
1124 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001125{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301126 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -07001127}
1128
Viresh Kumar17d15c42013-05-16 14:58:54 +05301129static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
1130 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001131{
1132 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001133 unsigned long val, val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001134 struct cpufreq_interactive_tunables *t;
1135 int cpu;
Mike Chanef969692010-06-22 11:26:45 -07001136
Amit Pundircf076402015-11-03 20:53:29 +05301137 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001138 if (ret < 0)
1139 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001140
1141 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
1142 if (val != val_round)
1143 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
1144 val_round);
Junjie Wu847796e2014-08-15 16:34:37 -07001145 tunables->timer_rate = val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001146
1147 if (!tunables->use_sched_load)
1148 return count;
1149
1150 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001151 if (!per_cpu(polinfo, cpu))
1152 continue;
1153 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001154 if (t && t->use_sched_load)
1155 t->timer_rate = val_round;
1156 }
1157 set_window_helper(tunables);
1158
Mike Chanef969692010-06-22 11:26:45 -07001159 return count;
1160}
1161
Viresh Kumar17d15c42013-05-16 14:58:54 +05301162static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
1163 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -08001164{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301165 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -08001166}
1167
Viresh Kumar17d15c42013-05-16 14:58:54 +05301168static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
1169 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -08001170{
1171 int ret;
1172 unsigned long val;
1173
1174 ret = kstrtol(buf, 10, &val);
1175 if (ret < 0)
1176 return ret;
1177
Viresh Kumar17d15c42013-05-16 14:58:54 +05301178 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -08001179 return count;
1180}
1181
Viresh Kumar17d15c42013-05-16 14:58:54 +05301182static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001183 char *buf)
1184{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301185 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -07001186}
1187
Viresh Kumar17d15c42013-05-16 14:58:54 +05301188static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001189 const char *buf, size_t count)
1190{
1191 int ret;
1192 unsigned long val;
1193
1194 ret = kstrtoul(buf, 0, &val);
1195 if (ret < 0)
1196 return ret;
1197
Viresh Kumar17d15c42013-05-16 14:58:54 +05301198 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -07001199
Viresh Kumar17d15c42013-05-16 14:58:54 +05301200 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -07001201 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001202 if (!tunables->boosted)
1203 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001204 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -07001205 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -07001206 trace_cpufreq_interactive_unboost("off");
1207 }
Todd Poynor15a9ea02012-04-23 20:42:41 -07001208
1209 return count;
1210}
1211
Viresh Kumar17d15c42013-05-16 14:58:54 +05301212static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -07001213 const char *buf, size_t count)
1214{
1215 int ret;
1216 unsigned long val;
1217
1218 ret = kstrtoul(buf, 0, &val);
1219 if (ret < 0)
1220 return ret;
1221
Viresh Kumar17d15c42013-05-16 14:58:54 +05301222 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
1223 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -07001224 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001225 if (!tunables->boosted)
1226 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001227 return count;
1228}
1229
Viresh Kumar17d15c42013-05-16 14:58:54 +05301230static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
1231 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -08001232{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301233 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -08001234}
1235
Viresh Kumar17d15c42013-05-16 14:58:54 +05301236static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
1237 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -08001238{
1239 int ret;
1240 unsigned long val;
1241
1242 ret = kstrtoul(buf, 0, &val);
1243 if (ret < 0)
1244 return ret;
1245
Viresh Kumar17d15c42013-05-16 14:58:54 +05301246 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -08001247 return count;
1248}
1249
Viresh Kumar17d15c42013-05-16 14:58:54 +05301250static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1251 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +08001252{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301253 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +08001254}
1255
Viresh Kumar17d15c42013-05-16 14:58:54 +05301256static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1257 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +08001258{
1259 int ret;
1260 unsigned long val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001261 struct cpufreq_interactive_tunables *t;
1262 int cpu;
Lianwei Wang72e40572013-02-22 11:39:18 +08001263
1264 ret = kstrtoul(buf, 0, &val);
1265 if (ret < 0)
1266 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301267 tunables->io_is_busy = val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001268
1269 if (!tunables->use_sched_load)
1270 return count;
1271
1272 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001273 if (!per_cpu(polinfo, cpu))
1274 continue;
1275 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001276 if (t && t->use_sched_load)
1277 t->io_is_busy = val;
1278 }
1279 sched_set_io_is_busy(val);
1280
1281 return count;
1282}
1283
1284static int cpufreq_interactive_enable_sched_input(
1285 struct cpufreq_interactive_tunables *tunables)
1286{
1287 int rc = 0, j;
1288 struct cpufreq_interactive_tunables *t;
1289
1290 mutex_lock(&sched_lock);
1291
1292 set_window_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001293 if (set_window_count > 1) {
Junjie Wu4344ea32014-04-28 16:22:24 -07001294 for_each_possible_cpu(j) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001295 if (!per_cpu(polinfo, j))
1296 continue;
1297 t = per_cpu(polinfo, j)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001298 if (t && t->use_sched_load) {
1299 tunables->timer_rate = t->timer_rate;
1300 tunables->io_is_busy = t->io_is_busy;
1301 break;
1302 }
1303 }
Junjie Wue627d702014-12-15 16:51:08 -08001304 } else {
1305 rc = set_window_helper(tunables);
1306 if (rc) {
1307 pr_err("%s: Failed to set sched window\n", __func__);
1308 set_window_count--;
1309 goto out;
1310 }
1311 sched_set_io_is_busy(tunables->io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001312 }
1313
Junjie Wu4344ea32014-04-28 16:22:24 -07001314 if (!tunables->use_migration_notif)
1315 goto out;
1316
1317 migration_register_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001318 if (migration_register_count > 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001319 goto out;
1320 else
1321 atomic_notifier_chain_register(&load_alert_notifier_head,
1322 &load_notifier_block);
1323out:
1324 mutex_unlock(&sched_lock);
1325 return rc;
1326}
1327
1328static int cpufreq_interactive_disable_sched_input(
1329 struct cpufreq_interactive_tunables *tunables)
1330{
1331 mutex_lock(&sched_lock);
1332
1333 if (tunables->use_migration_notif) {
1334 migration_register_count--;
Junjie Wue627d702014-12-15 16:51:08 -08001335 if (migration_register_count < 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001336 atomic_notifier_chain_unregister(
1337 &load_alert_notifier_head,
1338 &load_notifier_block);
1339 }
1340 set_window_count--;
1341
1342 mutex_unlock(&sched_lock);
1343 return 0;
1344}
1345
1346static ssize_t show_use_sched_load(
1347 struct cpufreq_interactive_tunables *tunables, char *buf)
1348{
1349 return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
1350}
1351
1352static ssize_t store_use_sched_load(
1353 struct cpufreq_interactive_tunables *tunables,
1354 const char *buf, size_t count)
1355{
1356 int ret;
1357 unsigned long val;
1358
1359 ret = kstrtoul(buf, 0, &val);
1360 if (ret < 0)
1361 return ret;
1362
1363 if (tunables->use_sched_load == (bool) val)
1364 return count;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301365
1366 tunables->use_sched_load = val;
1367
Junjie Wu4344ea32014-04-28 16:22:24 -07001368 if (val)
1369 ret = cpufreq_interactive_enable_sched_input(tunables);
1370 else
1371 ret = cpufreq_interactive_disable_sched_input(tunables);
1372
Hanumath Prasada9c07002015-06-30 15:19:39 +05301373 if (ret) {
1374 tunables->use_sched_load = !val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001375 return ret;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301376 }
Junjie Wu4344ea32014-04-28 16:22:24 -07001377
Junjie Wu4344ea32014-04-28 16:22:24 -07001378 return count;
1379}
1380
1381static ssize_t show_use_migration_notif(
1382 struct cpufreq_interactive_tunables *tunables, char *buf)
1383{
1384 return snprintf(buf, PAGE_SIZE, "%d\n",
1385 tunables->use_migration_notif);
1386}
1387
1388static ssize_t store_use_migration_notif(
1389 struct cpufreq_interactive_tunables *tunables,
1390 const char *buf, size_t count)
1391{
1392 int ret;
1393 unsigned long val;
1394
1395 ret = kstrtoul(buf, 0, &val);
1396 if (ret < 0)
1397 return ret;
1398
1399 if (tunables->use_migration_notif == (bool) val)
1400 return count;
1401 tunables->use_migration_notif = val;
1402
1403 if (!tunables->use_sched_load)
1404 return count;
1405
1406 mutex_lock(&sched_lock);
1407 if (val) {
1408 migration_register_count++;
1409 if (migration_register_count == 1)
1410 atomic_notifier_chain_register(
1411 &load_alert_notifier_head,
1412 &load_notifier_block);
1413 } else {
1414 migration_register_count--;
1415 if (!migration_register_count)
1416 atomic_notifier_chain_unregister(
1417 &load_alert_notifier_head,
1418 &load_notifier_block);
1419 }
1420 mutex_unlock(&sched_lock);
1421
Lianwei Wang72e40572013-02-22 11:39:18 +08001422 return count;
1423}
1424
Viresh Kumar17d15c42013-05-16 14:58:54 +05301425/*
1426 * Create show/store routines
1427 * - sys: One governor instance for complete SYSTEM
1428 * - pol: One governor instance per struct cpufreq_policy
1429 */
1430#define show_gov_pol_sys(file_name) \
1431static ssize_t show_##file_name##_gov_sys \
jianzhoudd779cb2019-04-26 10:38:56 +08001432(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
Viresh Kumar17d15c42013-05-16 14:58:54 +05301433{ \
1434 return show_##file_name(common_tunables, buf); \
1435} \
1436 \
1437static ssize_t show_##file_name##_gov_pol \
1438(struct cpufreq_policy *policy, char *buf) \
1439{ \
1440 return show_##file_name(policy->governor_data, buf); \
1441}
Lianwei Wang72e40572013-02-22 11:39:18 +08001442
Viresh Kumar17d15c42013-05-16 14:58:54 +05301443#define store_gov_pol_sys(file_name) \
1444static ssize_t store_##file_name##_gov_sys \
jianzhoudd779cb2019-04-26 10:38:56 +08001445(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, \
Viresh Kumar17d15c42013-05-16 14:58:54 +05301446 size_t count) \
1447{ \
1448 return store_##file_name(common_tunables, buf, count); \
1449} \
1450 \
1451static ssize_t store_##file_name##_gov_pol \
1452(struct cpufreq_policy *policy, const char *buf, size_t count) \
1453{ \
1454 return store_##file_name(policy->governor_data, buf, count); \
1455}
1456
1457#define show_store_gov_pol_sys(file_name) \
1458show_gov_pol_sys(file_name); \
1459store_gov_pol_sys(file_name)
1460
1461show_store_gov_pol_sys(target_loads);
1462show_store_gov_pol_sys(above_hispeed_delay);
1463show_store_gov_pol_sys(hispeed_freq);
1464show_store_gov_pol_sys(go_hispeed_load);
1465show_store_gov_pol_sys(min_sample_time);
1466show_store_gov_pol_sys(timer_rate);
1467show_store_gov_pol_sys(timer_slack);
1468show_store_gov_pol_sys(boost);
1469store_gov_pol_sys(boostpulse);
1470show_store_gov_pol_sys(boostpulse_duration);
1471show_store_gov_pol_sys(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001472show_store_gov_pol_sys(use_sched_load);
1473show_store_gov_pol_sys(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001474show_store_gov_pol_sys(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001475show_store_gov_pol_sys(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001476show_store_gov_pol_sys(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001477show_store_gov_pol_sys(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001478show_store_gov_pol_sys(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301479
1480#define gov_sys_attr_rw(_name) \
jianzhoudd779cb2019-04-26 10:38:56 +08001481static struct kobj_attribute _name##_gov_sys = \
Viresh Kumar17d15c42013-05-16 14:58:54 +05301482__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1483
1484#define gov_pol_attr_rw(_name) \
1485static struct freq_attr _name##_gov_pol = \
1486__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1487
1488#define gov_sys_pol_attr_rw(_name) \
1489 gov_sys_attr_rw(_name); \
1490 gov_pol_attr_rw(_name)
1491
1492gov_sys_pol_attr_rw(target_loads);
1493gov_sys_pol_attr_rw(above_hispeed_delay);
1494gov_sys_pol_attr_rw(hispeed_freq);
1495gov_sys_pol_attr_rw(go_hispeed_load);
1496gov_sys_pol_attr_rw(min_sample_time);
1497gov_sys_pol_attr_rw(timer_rate);
1498gov_sys_pol_attr_rw(timer_slack);
1499gov_sys_pol_attr_rw(boost);
1500gov_sys_pol_attr_rw(boostpulse_duration);
1501gov_sys_pol_attr_rw(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001502gov_sys_pol_attr_rw(use_sched_load);
1503gov_sys_pol_attr_rw(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001504gov_sys_pol_attr_rw(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001505gov_sys_pol_attr_rw(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001506gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001507gov_sys_pol_attr_rw(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001508gov_sys_pol_attr_rw(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301509
jianzhoudd779cb2019-04-26 10:38:56 +08001510static struct kobj_attribute boostpulse_gov_sys =
Viresh Kumar17d15c42013-05-16 14:58:54 +05301511 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1512
1513static struct freq_attr boostpulse_gov_pol =
1514 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1515
1516/* One Governor instance for entire system */
1517static struct attribute *interactive_attributes_gov_sys[] = {
1518 &target_loads_gov_sys.attr,
1519 &above_hispeed_delay_gov_sys.attr,
1520 &hispeed_freq_gov_sys.attr,
1521 &go_hispeed_load_gov_sys.attr,
1522 &min_sample_time_gov_sys.attr,
1523 &timer_rate_gov_sys.attr,
1524 &timer_slack_gov_sys.attr,
1525 &boost_gov_sys.attr,
1526 &boostpulse_gov_sys.attr,
1527 &boostpulse_duration_gov_sys.attr,
1528 &io_is_busy_gov_sys.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001529 &use_sched_load_gov_sys.attr,
1530 &use_migration_notif_gov_sys.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001531 &max_freq_hysteresis_gov_sys.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001532 &align_windows_gov_sys.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001533 &ignore_hispeed_on_notif_gov_sys.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001534 &fast_ramp_down_gov_sys.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001535 &enable_prediction_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001536 NULL,
1537};
1538
Viresh Kumar17d15c42013-05-16 14:58:54 +05301539static struct attribute_group interactive_attr_group_gov_sys = {
1540 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001541 .name = "interactive",
1542};
1543
Viresh Kumar17d15c42013-05-16 14:58:54 +05301544/* Per policy governor instance */
1545static struct attribute *interactive_attributes_gov_pol[] = {
1546 &target_loads_gov_pol.attr,
1547 &above_hispeed_delay_gov_pol.attr,
1548 &hispeed_freq_gov_pol.attr,
1549 &go_hispeed_load_gov_pol.attr,
1550 &min_sample_time_gov_pol.attr,
1551 &timer_rate_gov_pol.attr,
1552 &timer_slack_gov_pol.attr,
1553 &boost_gov_pol.attr,
1554 &boostpulse_gov_pol.attr,
1555 &boostpulse_duration_gov_pol.attr,
1556 &io_is_busy_gov_pol.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001557 &use_sched_load_gov_pol.attr,
1558 &use_migration_notif_gov_pol.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001559 &max_freq_hysteresis_gov_pol.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001560 &align_windows_gov_pol.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001561 &ignore_hispeed_on_notif_gov_pol.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001562 &fast_ramp_down_gov_pol.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001563 &enable_prediction_gov_pol.attr,
Viresh Kumar17d15c42013-05-16 14:58:54 +05301564 NULL,
1565};
1566
1567static struct attribute_group interactive_attr_group_gov_pol = {
1568 .attrs = interactive_attributes_gov_pol,
1569 .name = "interactive",
1570};
1571
1572static struct attribute_group *get_sysfs_attr(void)
1573{
1574 if (have_governor_per_policy())
1575 return &interactive_attr_group_gov_pol;
1576 else
1577 return &interactive_attr_group_gov_sys;
1578}
1579
Junjie Wucf531ef2015-04-17 12:48:36 -07001580static void cpufreq_interactive_nop_timer(unsigned long data)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001581{
Junjie Wu53f83f82014-08-18 16:35:09 -07001582}
1583
Junjie Wuc5a97d92014-05-23 12:22:59 -07001584static struct cpufreq_interactive_tunables *alloc_tunable(
1585 struct cpufreq_policy *policy)
1586{
1587 struct cpufreq_interactive_tunables *tunables;
1588
1589 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1590 if (!tunables)
1591 return ERR_PTR(-ENOMEM);
1592
1593 tunables->above_hispeed_delay = default_above_hispeed_delay;
1594 tunables->nabove_hispeed_delay =
1595 ARRAY_SIZE(default_above_hispeed_delay);
1596 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1597 tunables->target_loads = default_target_loads;
1598 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1599 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1600 tunables->timer_rate = DEFAULT_TIMER_RATE;
1601 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1602 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1603
1604 spin_lock_init(&tunables->target_loads_lock);
1605 spin_lock_init(&tunables->above_hispeed_delay_lock);
1606
1607 return tunables;
1608}
1609
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001610static void irq_work(struct irq_work *irq_work)
1611{
1612 struct cpufreq_interactive_policyinfo *ppol;
1613 unsigned long flags;
1614
1615 ppol = container_of(irq_work, struct cpufreq_interactive_policyinfo,
1616 irq_work);
1617
1618 cpufreq_interactive_timer(smp_processor_id());
1619 spin_lock_irqsave(&ppol->irq_work_lock, flags);
1620 ppol->work_in_progress = false;
1621 spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
1622}
1623
Junjie Wucf531ef2015-04-17 12:48:36 -07001624static struct cpufreq_interactive_policyinfo *get_policyinfo(
1625 struct cpufreq_policy *policy)
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001626{
Junjie Wucf531ef2015-04-17 12:48:36 -07001627 struct cpufreq_interactive_policyinfo *ppol =
1628 per_cpu(polinfo, policy->cpu);
1629 int i;
Joonwoo Park22d94972015-09-15 09:35:53 -07001630 struct sched_load *sl;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001631
Junjie Wucf531ef2015-04-17 12:48:36 -07001632 /* polinfo already allocated for policy, return */
1633 if (ppol)
1634 return ppol;
1635
1636 ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
1637 if (!ppol)
1638 return ERR_PTR(-ENOMEM);
1639
Joonwoo Park22d94972015-09-15 09:35:53 -07001640 sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
1641 GFP_KERNEL);
1642 if (!sl) {
Junjie Wufef75c02015-05-26 17:54:38 -07001643 kfree(ppol);
1644 return ERR_PTR(-ENOMEM);
1645 }
Joonwoo Park22d94972015-09-15 09:35:53 -07001646 ppol->sl = sl;
Junjie Wufef75c02015-05-26 17:54:38 -07001647
Junjie Wucf531ef2015-04-17 12:48:36 -07001648 init_timer(&ppol->policy_slack_timer);
1649 ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -07001650 hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1651 ppol->notif_timer.function = cpufreq_interactive_hrtimer;
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001652 init_irq_work(&ppol->irq_work, irq_work);
1653 spin_lock_init(&ppol->irq_work_lock);
Junjie Wucf531ef2015-04-17 12:48:36 -07001654 spin_lock_init(&ppol->load_lock);
1655 spin_lock_init(&ppol->target_freq_lock);
1656 init_rwsem(&ppol->enable_sem);
1657
1658 for_each_cpu(i, policy->related_cpus)
1659 per_cpu(polinfo, i) = ppol;
1660 return ppol;
1661}
1662
1663/* This function is not multithread-safe. */
1664static void free_policyinfo(int cpu)
1665{
1666 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
1667 int j;
1668
1669 if (!ppol)
1670 return;
1671
1672 for_each_possible_cpu(j)
1673 if (per_cpu(polinfo, j) == ppol)
1674 per_cpu(polinfo, cpu) = NULL;
1675 kfree(ppol->cached_tunables);
Joonwoo Park22d94972015-09-15 09:35:53 -07001676 kfree(ppol->sl);
Junjie Wucf531ef2015-04-17 12:48:36 -07001677 kfree(ppol);
1678}
1679
1680static struct cpufreq_interactive_tunables *get_tunables(
1681 struct cpufreq_interactive_policyinfo *ppol)
1682{
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001683 if (have_governor_per_policy())
Junjie Wucf531ef2015-04-17 12:48:36 -07001684 return ppol->cached_tunables;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001685 else
Junjie Wucf531ef2015-04-17 12:48:36 -07001686 return cached_common_tunables;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001687}
1688
Stephen Boyd9a864832017-03-13 16:49:15 -07001689/* Interactive Governor callbacks */
1690struct interactive_governor {
1691 struct cpufreq_governor gov;
1692 unsigned int usage_count;
1693};
1694
1695static struct interactive_governor interactive_gov;
1696
1697#define CPU_FREQ_GOV_INTERACTIVE (&interactive_gov.gov)
1698
1699int cpufreq_interactive_init(struct cpufreq_policy *policy)
Mike Chanef969692010-06-22 11:26:45 -07001700{
1701 int rc;
Junjie Wucf531ef2015-04-17 12:48:36 -07001702 struct cpufreq_interactive_policyinfo *ppol;
Stephen Boyd9a864832017-03-13 16:49:15 -07001703 struct cpufreq_interactive_tunables *tunables;
1704
1705 if (have_governor_per_policy())
1706 tunables = policy->governor_data;
1707 else
1708 tunables = common_tunables;
1709
1710 ppol = get_policyinfo(policy);
1711 if (IS_ERR(ppol))
1712 return PTR_ERR(ppol);
1713
1714 if (have_governor_per_policy()) {
1715 WARN_ON(tunables);
1716 } else if (tunables) {
1717 tunables->usage_count++;
1718 cpumask_or(&controlled_cpus, &controlled_cpus,
1719 policy->related_cpus);
1720 sched_update_freq_max_load(policy->related_cpus);
1721 policy->governor_data = tunables;
1722 return 0;
1723 }
1724
1725 tunables = get_tunables(ppol);
1726 if (!tunables) {
1727 tunables = alloc_tunable(policy);
1728 if (IS_ERR(tunables))
1729 return PTR_ERR(tunables);
1730 }
1731
1732 tunables->usage_count = 1;
1733 policy->governor_data = tunables;
1734 if (!have_governor_per_policy())
1735 common_tunables = tunables;
1736
1737 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1738 get_sysfs_attr());
1739 if (rc) {
1740 kfree(tunables);
1741 policy->governor_data = NULL;
1742 if (!have_governor_per_policy())
1743 common_tunables = NULL;
1744 return rc;
1745 }
1746
1747 if (!interactive_gov.usage_count++)
1748 cpufreq_register_notifier(&cpufreq_notifier_block,
1749 CPUFREQ_TRANSITION_NOTIFIER);
1750
1751 if (tunables->use_sched_load)
1752 cpufreq_interactive_enable_sched_input(tunables);
1753
1754 cpumask_or(&controlled_cpus, &controlled_cpus,
1755 policy->related_cpus);
1756 sched_update_freq_max_load(policy->related_cpus);
1757
1758 if (have_governor_per_policy())
1759 ppol->cached_tunables = tunables;
1760 else
1761 cached_common_tunables = tunables;
1762
1763 return 0;
1764}
1765
1766void cpufreq_interactive_exit(struct cpufreq_policy *policy)
1767{
1768 struct cpufreq_interactive_tunables *tunables;
1769
1770 if (have_governor_per_policy())
1771 tunables = policy->governor_data;
1772 else
1773 tunables = common_tunables;
1774
1775 BUG_ON(!tunables);
1776
1777 cpumask_andnot(&controlled_cpus, &controlled_cpus,
1778 policy->related_cpus);
1779 sched_update_freq_max_load(cpu_possible_mask);
1780 if (!--tunables->usage_count) {
1781 /* Last policy using the governor ? */
1782 if (!--interactive_gov.usage_count)
1783 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1784 CPUFREQ_TRANSITION_NOTIFIER);
1785
1786 sysfs_remove_group(get_governor_parent_kobj(policy),
1787 get_sysfs_attr());
1788
1789 common_tunables = NULL;
1790 }
1791
1792 policy->governor_data = NULL;
1793
1794 if (tunables->use_sched_load)
1795 cpufreq_interactive_disable_sched_input(tunables);
1796}
1797
1798int cpufreq_interactive_start(struct cpufreq_policy *policy)
1799{
1800 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -07001801 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301802 struct cpufreq_interactive_tunables *tunables;
1803
1804 if (have_governor_per_policy())
1805 tunables = policy->governor_data;
1806 else
1807 tunables = common_tunables;
1808
Stephen Boyd9a864832017-03-13 16:49:15 -07001809 BUG_ON(!tunables);
1810 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001811
Stephen Boyd9a864832017-03-13 16:49:15 -07001812 freq_table = policy->freq_table;
1813 if (!tunables->hispeed_freq)
1814 tunables->hispeed_freq = policy->max;
Junjie Wucf531ef2015-04-17 12:48:36 -07001815
Stephen Boyd9a864832017-03-13 16:49:15 -07001816 ppol = per_cpu(polinfo, policy->cpu);
1817 ppol->policy = policy;
1818 ppol->target_freq = policy->cur;
1819 ppol->freq_table = freq_table;
1820 ppol->p_nolim = *policy;
1821 ppol->p_nolim.min = policy->cpuinfo.min_freq;
1822 ppol->p_nolim.max = policy->cpuinfo.max_freq;
1823 ppol->floor_freq = ppol->target_freq;
1824 ppol->floor_validate_time = ktime_to_us(ktime_get());
1825 ppol->hispeed_validate_time = ppol->floor_validate_time;
1826 ppol->min_freq = policy->min;
1827 ppol->reject_notification = true;
1828 ppol->notif_pending = false;
1829 down_write(&ppol->enable_sem);
Stephen Boyd9a864832017-03-13 16:49:15 -07001830 del_timer_sync(&ppol->policy_slack_timer);
Stephen Boyd9a864832017-03-13 16:49:15 -07001831 ppol->last_evaluated_jiffy = get_jiffies_64();
1832 cpufreq_interactive_timer_start(tunables, policy->cpu);
1833 ppol->governor_enabled = 1;
1834 up_write(&ppol->enable_sem);
1835 ppol->reject_notification = false;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301836
Stephen Boyd9a864832017-03-13 16:49:15 -07001837 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001838 return 0;
1839}
1840
Stephen Boyd9a864832017-03-13 16:49:15 -07001841void cpufreq_interactive_stop(struct cpufreq_policy *policy)
1842{
1843 struct cpufreq_interactive_policyinfo *ppol;
1844 struct cpufreq_interactive_tunables *tunables;
1845
1846 if (have_governor_per_policy())
1847 tunables = policy->governor_data;
1848 else
1849 tunables = common_tunables;
1850
1851 BUG_ON(!tunables);
1852
1853 mutex_lock(&gov_lock);
1854
1855 ppol = per_cpu(polinfo, policy->cpu);
1856 ppol->reject_notification = true;
1857 down_write(&ppol->enable_sem);
1858 ppol->governor_enabled = 0;
1859 ppol->target_freq = 0;
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001860 gov_clear_update_util(ppol->policy);
1861 irq_work_sync(&ppol->irq_work);
1862 ppol->work_in_progress = false;
Stephen Boyd9a864832017-03-13 16:49:15 -07001863 del_timer_sync(&ppol->policy_slack_timer);
1864 up_write(&ppol->enable_sem);
1865 ppol->reject_notification = false;
1866
1867 mutex_unlock(&gov_lock);
1868}
1869
1870void cpufreq_interactive_limits(struct cpufreq_policy *policy)
1871{
1872 struct cpufreq_interactive_policyinfo *ppol;
1873 struct cpufreq_interactive_tunables *tunables;
1874
1875 if (have_governor_per_policy())
1876 tunables = policy->governor_data;
1877 else
1878 tunables = common_tunables;
1879
1880 BUG_ON(!tunables);
1881 ppol = per_cpu(polinfo, policy->cpu);
1882
1883 __cpufreq_driver_target(policy,
1884 ppol->target_freq, CPUFREQ_RELATION_L);
1885
1886 down_read(&ppol->enable_sem);
1887 if (ppol->governor_enabled) {
1888 if (policy->min < ppol->min_freq)
1889 cpufreq_interactive_timer_resched(policy->cpu,
1890 true);
1891 ppol->min_freq = policy->min;
1892 }
1893 up_read(&ppol->enable_sem);
1894}
1895
1896static struct interactive_governor interactive_gov = {
1897 .gov = {
1898 .name = "interactive",
1899 .max_transition_latency = 10000000,
1900 .owner = THIS_MODULE,
1901 .init = cpufreq_interactive_init,
1902 .exit = cpufreq_interactive_exit,
1903 .start = cpufreq_interactive_start,
1904 .stop = cpufreq_interactive_stop,
1905 .limits = cpufreq_interactive_limits,
1906 }
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301907};
1908
Stephen Boyd9a864832017-03-13 16:49:15 -07001909static int __init cpufreq_interactive_gov_init(void)
Mike Chanef969692010-06-22 11:26:45 -07001910{
Mike Chanef969692010-06-22 11:26:45 -07001911 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1912
Todd Poynor0f1920b2012-07-16 17:07:15 -07001913 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001914 mutex_init(&gov_lock);
Junjie Wu4344ea32014-04-28 16:22:24 -07001915 mutex_init(&sched_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001916 speedchange_task =
1917 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1918 "cfinteractive");
1919 if (IS_ERR(speedchange_task))
1920 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001921
Todd Poynor0f1920b2012-07-16 17:07:15 -07001922 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1923 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001924
Sam Leffler5c9b8272012-06-27 12:55:56 -07001925 /* NB: wake up so the thread does not look hung to the freezer */
Puja Gupta487dec62017-06-27 10:13:50 -07001926 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001927
Stephen Boyd9a864832017-03-13 16:49:15 -07001928 return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
Mike Chanef969692010-06-22 11:26:45 -07001929}
1930
1931#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
Stephen Boyd9a864832017-03-13 16:49:15 -07001932struct cpufreq_governor *cpufreq_default_governor(void)
1933{
1934 return CPU_FREQ_GOV_INTERACTIVE;
1935}
1936
1937fs_initcall(cpufreq_interactive_gov_init);
Mike Chanef969692010-06-22 11:26:45 -07001938#else
Stephen Boyd9a864832017-03-13 16:49:15 -07001939module_init(cpufreq_interactive_gov_init);
Mike Chanef969692010-06-22 11:26:45 -07001940#endif
1941
Stephen Boyd9a864832017-03-13 16:49:15 -07001942static void __exit cpufreq_interactive_gov_exit(void)
Mike Chanef969692010-06-22 11:26:45 -07001943{
Junjie Wuc5a97d92014-05-23 12:22:59 -07001944 int cpu;
1945
Stephen Boyd9a864832017-03-13 16:49:15 -07001946 cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001947 kthread_stop(speedchange_task);
1948 put_task_struct(speedchange_task);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001949
Junjie Wucf531ef2015-04-17 12:48:36 -07001950 for_each_possible_cpu(cpu)
1951 free_policyinfo(cpu);
Mike Chanef969692010-06-22 11:26:45 -07001952}
1953
Stephen Boyd9a864832017-03-13 16:49:15 -07001954module_exit(cpufreq_interactive_gov_exit);
Mike Chanef969692010-06-22 11:26:45 -07001955
1956MODULE_AUTHOR("Mike Chan <mike@android.com>");
1957MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1958 "Latency sensitive workloads");
1959MODULE_LICENSE("GPL");