blob: 12eb6d84c7084729c8338b5ec326d71244d1f678 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
Junjie Wuaceecc062015-09-18 18:13:01 -070030#include <linux/hrtimer.h>
Mike Chanef969692010-06-22 11:26:45 -070031#include <linux/workqueue.h>
32#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070033#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070034
Todd Poynorae010472012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Stephen Boyd1c2271f2017-03-20 18:57:28 -070038static DEFINE_PER_CPU(struct update_util_data, update_util);
39
Junjie Wucf531ef2015-04-17 12:48:36 -070040struct cpufreq_interactive_policyinfo {
Stephen Boyd1c2271f2017-03-20 18:57:28 -070041 bool work_in_progress;
42 struct irq_work irq_work;
43 spinlock_t irq_work_lock; /* protects work_in_progress */
Junjie Wucf531ef2015-04-17 12:48:36 -070044 struct timer_list policy_slack_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -070045 struct hrtimer notif_timer;
Junjie Wucf531ef2015-04-17 12:48:36 -070046 spinlock_t load_lock; /* protects load tracking stat */
Junjie Wu6b974ed2014-04-28 15:11:47 -070047 u64 last_evaluated_jiffy;
Mike Chanef969692010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
Saravana Kannan433ed992014-08-14 18:29:45 -070049 struct cpufreq_policy p_nolim; /* policy copy with no limits */
Mike Chanef969692010-06-22 11:26:45 -070050 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070051 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070052 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070053 unsigned int floor_freq;
Junjie Wu1d868952015-03-27 11:44:21 -070054 unsigned int min_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -070055 u64 floor_validate_time;
56 u64 hispeed_validate_time;
57 u64 max_freq_hyst_start_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080058 struct rw_semaphore enable_sem;
Junjie Wu82f08032014-12-09 13:20:26 -080059 bool reject_notification;
Junjie Wu506bfb02015-09-23 12:00:33 -070060 bool notif_pending;
Junjie Wuaceecc062015-09-18 18:13:01 -070061 unsigned long notif_cpu;
Mike Chanef969692010-06-22 11:26:45 -070062 int governor_enabled;
Junjie Wu13c6a762014-08-07 18:04:13 -070063 struct cpufreq_interactive_tunables *cached_tunables;
Joonwoo Park22d94972015-09-15 09:35:53 -070064 struct sched_load *sl;
Mike Chanef969692010-06-22 11:26:45 -070065};
66
Junjie Wucf531ef2015-04-17 12:48:36 -070067/* Protected by per-policy load_lock */
68struct cpufreq_interactive_cpuinfo {
69 u64 time_in_idle;
70 u64 time_in_idle_timestamp;
71 u64 cputime_speedadj;
72 u64 cputime_speedadj_timestamp;
73 unsigned int loadadjfreq;
74};
75
76static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
Mike Chanef969692010-06-22 11:26:45 -070077static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
78
Todd Poynor0f1920b2012-07-16 17:07:15 -070079/* realtime thread handles frequency scaling */
80static struct task_struct *speedchange_task;
81static cpumask_t speedchange_cpumask;
82static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080083static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070084
Junjie Wu4344ea32014-04-28 16:22:24 -070085static int set_window_count;
86static int migration_register_count;
87static struct mutex sched_lock;
Junjie Wud6f5e522015-07-29 18:22:21 -070088static cpumask_t controlled_cpus;
Junjie Wu4344ea32014-04-28 16:22:24 -070089
Todd Poynor8d2d93f2012-11-28 17:58:17 -080090/* Target load. Lower values result in higher CPU speeds. */
91#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080092static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080093
Todd Poynora380aa82012-04-17 17:39:34 -070094#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070095#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090096static unsigned int default_above_hispeed_delay[] = {
97 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070098
Viresh Kumar17d15c42013-05-16 14:58:54 +053099struct cpufreq_interactive_tunables {
100 int usage_count;
101 /* Hi speed to bump to from lo speed when load burst (default max) */
102 unsigned int hispeed_freq;
103 /* Go to hi speed when CPU load at or above this value. */
104#define DEFAULT_GO_HISPEED_LOAD 99
105 unsigned long go_hispeed_load;
106 /* Target load. Lower values result in higher CPU speeds. */
107 spinlock_t target_loads_lock;
108 unsigned int *target_loads;
109 int ntarget_loads;
110 /*
111 * The minimum amount of time to spend at a frequency before we can ramp
112 * down.
113 */
114#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
115 unsigned long min_sample_time;
116 /*
117 * The sample rate of the timer used to increase frequency
118 */
119 unsigned long timer_rate;
120 /*
121 * Wait this long before raising speed above hispeed, by default a
122 * single timer interval.
123 */
124 spinlock_t above_hispeed_delay_lock;
125 unsigned int *above_hispeed_delay;
126 int nabove_hispeed_delay;
127 /* Non-zero means indefinite speed boost active */
128 int boost_val;
129 /* Duration of a boot pulse in usecs */
130 int boostpulse_duration_val;
131 /* End time of boost pulse in ktime converted to usecs */
132 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800133 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530134 /*
135 * Max additional time to wait in idle, beyond timer_rate, at speeds
136 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
137 */
Todd Poynor4add2592012-12-18 17:50:10 -0800138#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530139 int timer_slack_val;
140 bool io_is_busy;
Junjie Wu4344ea32014-04-28 16:22:24 -0700141
142 /* scheduler input related flags */
143 bool use_sched_load;
144 bool use_migration_notif;
Junjie Wue05d74e2014-08-29 14:12:52 -0700145
146 /*
Junjie Wu7ca999f2014-08-29 18:55:45 -0700147 * Whether to align timer windows across all CPUs. When
148 * use_sched_load is true, this flag is ignored and windows
149 * will always be aligned.
150 */
151 bool align_windows;
152
153 /*
Junjie Wue05d74e2014-08-29 14:12:52 -0700154 * Stay at max freq for at least max_freq_hysteresis before dropping
155 * frequency.
156 */
157 unsigned int max_freq_hysteresis;
Junjie Wu450c8572015-07-22 17:38:49 -0700158
Junjie Wu3381c4c2015-08-19 15:45:37 -0700159 /* Ignore hispeed_freq and above_hispeed_delay for notification */
160 bool ignore_hispeed_on_notif;
161
162 /* Ignore min_sample_time for notification */
Junjie Wu450c8572015-07-22 17:38:49 -0700163 bool fast_ramp_down;
Junjie Wu7c128602015-06-09 17:36:11 -0700164
165 /* Whether to enable prediction or not */
166 bool enable_prediction;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530167};
Lianwei Wangd72db422012-11-01 09:59:52 +0800168
Viresh Kumar17d15c42013-05-16 14:58:54 +0530169/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700170static struct cpufreq_interactive_tunables *common_tunables;
Junjie Wucf531ef2015-04-17 12:48:36 -0700171static struct cpufreq_interactive_tunables *cached_common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530172
173static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800174
Junjie Wu6b974ed2014-04-28 15:11:47 -0700175/* Round to starting jiffy of next evaluation window */
176static u64 round_to_nw_start(u64 jif,
177 struct cpufreq_interactive_tunables *tunables)
178{
179 unsigned long step = usecs_to_jiffies(tunables->timer_rate);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700180 u64 ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700181
Junjie Wu7ca999f2014-08-29 18:55:45 -0700182 if (tunables->use_sched_load || tunables->align_windows) {
183 do_div(jif, step);
184 ret = (jif + 1) * step;
185 } else {
186 ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
187 }
188
189 return ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700190}
191
Junjie Wu4344ea32014-04-28 16:22:24 -0700192static inline int set_window_helper(
193 struct cpufreq_interactive_tunables *tunables)
194{
195 return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
196 usecs_to_jiffies(tunables->timer_rate));
197}
198
Junjie Wu1d868952015-03-27 11:44:21 -0700199static void cpufreq_interactive_timer_resched(unsigned long cpu,
200 bool slack_only)
Todd Poynor8eccd412012-10-08 20:14:34 -0700201{
Junjie Wucf531ef2015-04-17 12:48:36 -0700202 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
203 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530204 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700205 ppol->policy->governor_data;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700206 u64 expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800207 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700208 int i;
Todd Poynor4add2592012-12-18 17:50:10 -0800209
Junjie Wucf531ef2015-04-17 12:48:36 -0700210 spin_lock_irqsave(&ppol->load_lock, flags);
Junjie Wua26c0702015-07-20 10:20:08 -0700211 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Junjie Wu1d868952015-03-27 11:44:21 -0700212 if (!slack_only) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700213 for_each_cpu(i, ppol->policy->cpus) {
214 pcpu = &per_cpu(cpuinfo, i);
215 pcpu->time_in_idle = get_cpu_idle_time(i,
216 &pcpu->time_in_idle_timestamp,
217 tunables->io_is_busy);
218 pcpu->cputime_speedadj = 0;
219 pcpu->cputime_speedadj_timestamp =
220 pcpu->time_in_idle_timestamp;
221 }
Junjie Wu1d868952015-03-27 11:44:21 -0700222 }
Todd Poynor4e25bf92013-04-05 13:25:21 -0700223
Viresh Kumar17d15c42013-05-16 14:58:54 +0530224 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700225 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530226 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700227 del_timer(&ppol->policy_slack_timer);
228 ppol->policy_slack_timer.expires = expires;
229 add_timer(&ppol->policy_slack_timer);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700230 }
231
Junjie Wucf531ef2015-04-17 12:48:36 -0700232 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700233}
234
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700235static void update_util_handler(struct update_util_data *data, u64 time,
236 unsigned int sched_flags)
237{
238 struct cpufreq_interactive_policyinfo *ppol;
239 unsigned long flags;
240
241 ppol = *this_cpu_ptr(&polinfo);
242 spin_lock_irqsave(&ppol->irq_work_lock, flags);
243 /*
244 * The irq-work may not be allowed to be queued up right now
245 * because work has already been queued up or is in progress.
246 */
247 if (ppol->work_in_progress ||
248 sched_flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)
249 goto out;
250
251 ppol->work_in_progress = true;
252 irq_work_queue(&ppol->irq_work);
253out:
254 spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
255}
256
257static inline void gov_clear_update_util(struct cpufreq_policy *policy)
258{
259 int i;
260
261 for_each_cpu(i, policy->cpus)
262 cpufreq_remove_update_util_hook(i);
263
264 synchronize_sched();
265}
266
267static void gov_set_update_util(struct cpufreq_policy *policy)
268{
269 struct update_util_data *util;
270 int cpu;
271
272 for_each_cpu(cpu, policy->cpus) {
273 util = &per_cpu(update_util, cpu);
274 cpufreq_add_update_util_hook(cpu, util, update_util_handler);
275 }
276}
277
Lianwei Wang90c6c152013-04-26 13:30:51 +0800278/* The caller shall take enable_sem write semaphore to avoid any timer race.
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700279 * The policy_slack_timer must be deactivated when calling this function.
Lianwei Wang90c6c152013-04-26 13:30:51 +0800280 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530281static void cpufreq_interactive_timer_start(
282 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800283{
Junjie Wucf531ef2015-04-17 12:48:36 -0700284 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
285 struct cpufreq_interactive_cpuinfo *pcpu;
286 u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800287 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700288 int i;
Lianwei Wang90c6c152013-04-26 13:30:51 +0800289
Junjie Wucf531ef2015-04-17 12:48:36 -0700290 spin_lock_irqsave(&ppol->load_lock, flags);
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700291 gov_set_update_util(ppol->policy);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530292 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700293 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530294 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700295 ppol->policy_slack_timer.expires = expires;
296 add_timer(&ppol->policy_slack_timer);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800297 }
298
Junjie Wucf531ef2015-04-17 12:48:36 -0700299 for_each_cpu(i, ppol->policy->cpus) {
300 pcpu = &per_cpu(cpuinfo, i);
301 pcpu->time_in_idle =
302 get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
303 tunables->io_is_busy);
304 pcpu->cputime_speedadj = 0;
305 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
306 }
307 spin_unlock_irqrestore(&ppol->load_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800308}
309
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700310
Viresh Kumar17d15c42013-05-16 14:58:54 +0530311static unsigned int freq_to_above_hispeed_delay(
312 struct cpufreq_interactive_tunables *tunables,
313 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900314{
315 int i;
316 unsigned int ret;
317 unsigned long flags;
318
Viresh Kumar17d15c42013-05-16 14:58:54 +0530319 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900320
Viresh Kumar17d15c42013-05-16 14:58:54 +0530321 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
322 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900323 ;
324
Viresh Kumar17d15c42013-05-16 14:58:54 +0530325 ret = tunables->above_hispeed_delay[i];
326 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900327 return ret;
328}
329
Viresh Kumar17d15c42013-05-16 14:58:54 +0530330static unsigned int freq_to_targetload(
331 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800332{
333 int i;
334 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800335 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800336
Viresh Kumar17d15c42013-05-16 14:58:54 +0530337 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800338
Viresh Kumar17d15c42013-05-16 14:58:54 +0530339 for (i = 0; i < tunables->ntarget_loads - 1 &&
340 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800341 ;
342
Viresh Kumar17d15c42013-05-16 14:58:54 +0530343 ret = tunables->target_loads[i];
344 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800345 return ret;
346}
347
Junjie Wud6f5e522015-07-29 18:22:21 -0700348#define DEFAULT_MAX_LOAD 100
349u32 get_freq_max_load(int cpu, unsigned int freq)
350{
351 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
352
353 if (!cpumask_test_cpu(cpu, &controlled_cpus))
354 return DEFAULT_MAX_LOAD;
355
356 if (have_governor_per_policy()) {
357 if (!ppol || !ppol->cached_tunables)
358 return DEFAULT_MAX_LOAD;
359 return freq_to_targetload(ppol->cached_tunables, freq);
360 }
361
362 if (!cached_common_tunables)
363 return DEFAULT_MAX_LOAD;
364 return freq_to_targetload(cached_common_tunables, freq);
365}
366
Todd Poynore9c60742012-11-14 11:41:21 -0800367/*
368 * If increasing frequencies never map to a lower target load then
369 * choose_freq() will find the minimum frequency that does not exceed its
370 * target load given the current load.
371 */
Junjie Wucf531ef2015-04-17 12:48:36 -0700372static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
Viresh Kumar17d15c42013-05-16 14:58:54 +0530373 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800374{
375 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800376 unsigned int prevfreq, freqmin, freqmax;
377 unsigned int tl;
378 int index;
379
380 freqmin = 0;
381 freqmax = UINT_MAX;
382
383 do {
384 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530385 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800386
387 /*
388 * Find the lowest frequency where the computed load is less
389 * than or equal to the target load.
390 */
391
Stephen Boyd9a864832017-03-13 16:49:15 -0700392 index = cpufreq_frequency_table_target(&pcpu->p_nolim,
393 loadadjfreq / tl,
394 CPUFREQ_RELATION_L);
Todd Poynore9c60742012-11-14 11:41:21 -0800395 freq = pcpu->freq_table[index].frequency;
396
397 if (freq > prevfreq) {
398 /* The previous frequency is too low. */
399 freqmin = prevfreq;
400
401 if (freq >= freqmax) {
402 /*
403 * Find the highest frequency that is less
404 * than freqmax.
405 */
Stephen Boyd9a864832017-03-13 16:49:15 -0700406 index = cpufreq_frequency_table_target(
407 &pcpu->p_nolim,
408 freqmax - 1, CPUFREQ_RELATION_H);
Todd Poynore9c60742012-11-14 11:41:21 -0800409 freq = pcpu->freq_table[index].frequency;
410
411 if (freq == freqmin) {
412 /*
413 * The first frequency below freqmax
414 * has already been found to be too
415 * low. freqmax is the lowest speed
416 * we found that is fast enough.
417 */
418 freq = freqmax;
419 break;
420 }
421 }
422 } else if (freq < prevfreq) {
423 /* The previous frequency is high enough. */
424 freqmax = prevfreq;
425
426 if (freq <= freqmin) {
427 /*
428 * Find the lowest frequency that is higher
429 * than freqmin.
430 */
Stephen Boyd9a864832017-03-13 16:49:15 -0700431 index = cpufreq_frequency_table_target(
432 &pcpu->p_nolim,
433 freqmin + 1, CPUFREQ_RELATION_L);
Todd Poynore9c60742012-11-14 11:41:21 -0800434 freq = pcpu->freq_table[index].frequency;
435
436 /*
437 * If freqmax is the first frequency above
438 * freqmin then we have already found that
439 * this speed is fast enough.
440 */
441 if (freq == freqmax)
442 break;
443 }
444 }
445
446 /* If same frequency chosen as previous then done. */
447 } while (freq != prevfreq);
448
449 return freq;
450}
451
Todd Poynor0e58da22012-12-11 16:05:03 -0800452static u64 update_load(int cpu)
453{
Junjie Wucf531ef2015-04-17 12:48:36 -0700454 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Todd Poynor0e58da22012-12-11 16:05:03 -0800455 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530456 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700457 ppol->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800458 u64 now;
459 u64 now_idle;
460 unsigned int delta_idle;
461 unsigned int delta_time;
462 u64 active_time;
463
Viresh Kumar17d15c42013-05-16 14:58:54 +0530464 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800465 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
466 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900467
468 if (delta_time <= delta_idle)
469 active_time = 0;
470 else
471 active_time = delta_time - delta_idle;
472
Junjie Wucf531ef2015-04-17 12:48:36 -0700473 pcpu->cputime_speedadj += active_time * ppol->policy->cur;
Todd Poynor0e58da22012-12-11 16:05:03 -0800474
475 pcpu->time_in_idle = now_idle;
476 pcpu->time_in_idle_timestamp = now;
477 return now;
478}
479
Junjie Wu7c128602015-06-09 17:36:11 -0700480static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
481 unsigned long busy)
482{
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530483 int prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700484 struct cpufreq_interactive_tunables *tunables =
485 ppol->policy->governor_data;
486
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530487 prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
488 busy, tunables->timer_rate);
489 return prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700490}
491
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700492#define NEW_TASK_RATIO 75
Junjie Wu7c128602015-06-09 17:36:11 -0700493#define PRED_TOLERANCE_PCT 10
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700494static void cpufreq_interactive_timer(int data)
Mike Chanef969692010-06-22 11:26:45 -0700495{
Junjie Wu7c128602015-06-09 17:36:11 -0700496 s64 now;
Mike Chanef969692010-06-22 11:26:45 -0700497 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800498 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700499 int cpu_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700500 int pol_load = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700501 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530502 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700503 ppol->policy->governor_data;
Junjie Wu7c128602015-06-09 17:36:11 -0700504 struct sched_load *sl = ppol->sl;
Junjie Wucf531ef2015-04-17 12:48:36 -0700505 struct cpufreq_interactive_cpuinfo *pcpu;
Mike Chanef969692010-06-22 11:26:45 -0700506 unsigned int new_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700507 unsigned int prev_laf = 0, t_prevlaf;
508 unsigned int pred_laf = 0, t_predlaf = 0;
509 unsigned int prev_chfreq, pred_chfreq, chosen_freq;
Mike Chanef969692010-06-22 11:26:45 -0700510 unsigned int index;
511 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700512 unsigned long max_cpu;
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700513 int i, cpu;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700514 int new_load_pct = 0;
Junjie Wu7c128602015-06-09 17:36:11 -0700515 int prev_l, pred_l = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700516 struct cpufreq_govinfo govinfo;
Junjie Wud8a5e842015-08-26 17:47:21 -0700517 bool skip_hispeed_logic, skip_min_sample_time;
Junjie Wu7c128602015-06-09 17:36:11 -0700518 bool jump_to_max_no_ts = false;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700519 bool jump_to_max = false;
Mike Chanef969692010-06-22 11:26:45 -0700520
Junjie Wucf531ef2015-04-17 12:48:36 -0700521 if (!down_read_trylock(&ppol->enable_sem))
Todd Poynor5cad6092012-12-18 17:50:44 -0800522 return;
Junjie Wucf531ef2015-04-17 12:48:36 -0700523 if (!ppol->governor_enabled)
Mike Chanef969692010-06-22 11:26:45 -0700524 goto exit;
525
Junjie Wucf531ef2015-04-17 12:48:36 -0700526 now = ktime_to_us(ktime_get());
Junjie Wu506bfb02015-09-23 12:00:33 -0700527
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700528 spin_lock_irqsave(&ppol->target_freq_lock, flags);
529 spin_lock(&ppol->load_lock);
Junjie Wu506bfb02015-09-23 12:00:33 -0700530
Rohit Guptab56dc4d2016-12-05 17:36:49 -0800531 skip_hispeed_logic =
Junjie Wu7c128602015-06-09 17:36:11 -0700532 tunables->ignore_hispeed_on_notif && ppol->notif_pending;
Junjie Wu506bfb02015-09-23 12:00:33 -0700533 skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
534 ppol->notif_pending = false;
Junjie Wu7c128602015-06-09 17:36:11 -0700535 now = ktime_to_us(ktime_get());
Junjie Wucf531ef2015-04-17 12:48:36 -0700536 ppol->last_evaluated_jiffy = get_jiffies_64();
537
Junjie Wufef75c02015-05-26 17:54:38 -0700538 if (tunables->use_sched_load)
Junjie Wu7c128602015-06-09 17:36:11 -0700539 sched_get_cpus_busy(sl, ppol->policy->cpus);
Junjie Wucf531ef2015-04-17 12:48:36 -0700540 max_cpu = cpumask_first(ppol->policy->cpus);
Junjie Wu7c128602015-06-09 17:36:11 -0700541 i = 0;
542 for_each_cpu(cpu, ppol->policy->cpus) {
543 pcpu = &per_cpu(cpuinfo, cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700544 if (tunables->use_sched_load) {
Junjie Wu7c128602015-06-09 17:36:11 -0700545 t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
546 prev_l = t_prevlaf / ppol->target_freq;
547 if (tunables->enable_prediction) {
548 t_predlaf = sl_busy_to_laf(ppol,
549 sl[i].predicted_load);
550 pred_l = t_predlaf / ppol->target_freq;
551 }
552 if (sl[i].prev_load)
553 new_load_pct = sl[i].new_task_load * 100 /
554 sl[i].prev_load;
555 else
556 new_load_pct = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700557 } else {
Junjie Wu7c128602015-06-09 17:36:11 -0700558 now = update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700559 delta_time = (unsigned int)
Junjie Wu4344ea32014-04-28 16:22:24 -0700560 (now - pcpu->cputime_speedadj_timestamp);
Junjie Wucf531ef2015-04-17 12:48:36 -0700561 if (WARN_ON_ONCE(!delta_time))
562 continue;
563 cputime_speedadj = pcpu->cputime_speedadj;
564 do_div(cputime_speedadj, delta_time);
Junjie Wu7c128602015-06-09 17:36:11 -0700565 t_prevlaf = (unsigned int)cputime_speedadj * 100;
566 prev_l = t_prevlaf / ppol->target_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -0700567 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700568
Junjie Wu7c128602015-06-09 17:36:11 -0700569 /* find max of loadadjfreq inside policy */
570 if (t_prevlaf > prev_laf) {
571 prev_laf = t_prevlaf;
572 max_cpu = cpu;
Junjie Wucf531ef2015-04-17 12:48:36 -0700573 }
Junjie Wu7c128602015-06-09 17:36:11 -0700574 pred_laf = max(t_predlaf, pred_laf);
Junjie Wucf531ef2015-04-17 12:48:36 -0700575
Junjie Wu7c128602015-06-09 17:36:11 -0700576 cpu_load = max(prev_l, pred_l);
577 pol_load = max(pol_load, cpu_load);
578 trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
579 prev_l, pred_l);
580
581 /* save loadadjfreq for notification */
582 pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
583
584 /* detect heavy new task and jump to policy->max */
585 if (prev_l >= tunables->go_hispeed_load &&
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700586 new_load_pct >= NEW_TASK_RATIO) {
587 skip_hispeed_logic = true;
588 jump_to_max = true;
589 }
Junjie Wu7c128602015-06-09 17:36:11 -0700590 i++;
Junjie Wu4344ea32014-04-28 16:22:24 -0700591 }
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700592 spin_unlock(&ppol->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700593
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800594 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700595
Junjie Wu7c128602015-06-09 17:36:11 -0700596 prev_chfreq = choose_freq(ppol, prev_laf);
597 pred_chfreq = choose_freq(ppol, pred_laf);
598 chosen_freq = max(prev_chfreq, pred_chfreq);
599
600 if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
601 if (!jump_to_max)
602 jump_to_max_no_ts = true;
603
Junjie Wud8a5e842015-08-26 17:47:21 -0700604 if (now - ppol->max_freq_hyst_start_time <
605 tunables->max_freq_hysteresis &&
Junjie Wu7c128602015-06-09 17:36:11 -0700606 pol_load >= tunables->go_hispeed_load &&
Junjie Wud8a5e842015-08-26 17:47:21 -0700607 ppol->target_freq < ppol->policy->max) {
608 skip_hispeed_logic = true;
609 skip_min_sample_time = true;
Junjie Wu7c128602015-06-09 17:36:11 -0700610 if (!jump_to_max)
611 jump_to_max_no_ts = true;
Junjie Wud8a5e842015-08-26 17:47:21 -0700612 }
613
Junjie Wu7c128602015-06-09 17:36:11 -0700614 new_freq = chosen_freq;
615 if (jump_to_max_no_ts || jump_to_max) {
Saravana Kannan433ed992014-08-14 18:29:45 -0700616 new_freq = ppol->policy->cpuinfo.max_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700617 } else if (!skip_hispeed_logic) {
618 if (pol_load >= tunables->go_hispeed_load ||
619 tunables->boosted) {
620 if (ppol->target_freq < tunables->hispeed_freq)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530621 new_freq = tunables->hispeed_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700622 else
623 new_freq = max(new_freq,
624 tunables->hispeed_freq);
Todd Poynor2b660492012-12-19 16:06:48 -0800625 }
Todd Poynor2b660492012-12-19 16:06:48 -0800626 }
Todd Poynor131ff022012-11-08 15:06:55 -0800627
Junjie Wud8a5e842015-08-26 17:47:21 -0700628 if (now - ppol->max_freq_hyst_start_time <
629 tunables->max_freq_hysteresis)
630 new_freq = max(tunables->hispeed_freq, new_freq);
631
632 if (!skip_hispeed_logic &&
Junjie Wu51042d32015-08-17 16:02:55 -0700633 ppol->target_freq >= tunables->hispeed_freq &&
634 new_freq > ppol->target_freq &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700635 now - ppol->hispeed_validate_time <
Junjie Wu51042d32015-08-17 16:02:55 -0700636 freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800637 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700638 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700639 ppol->policy->cur, new_freq);
640 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800641 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700642 }
643
Junjie Wucf531ef2015-04-17 12:48:36 -0700644 ppol->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700645
Stephen Boyd9a864832017-03-13 16:49:15 -0700646 index = cpufreq_frequency_table_target(&ppol->p_nolim, new_freq,
647 CPUFREQ_RELATION_L);
Junjie Wucf531ef2015-04-17 12:48:36 -0700648 new_freq = ppol->freq_table[index].frequency;
Mike Chanef969692010-06-22 11:26:45 -0700649
Mike Chanef969692010-06-22 11:26:45 -0700650 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700651 * Do not scale below floor_freq unless we have been at or above the
652 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700653 */
Junjie Wud8a5e842015-08-26 17:47:21 -0700654 if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700655 if (now - ppol->floor_validate_time <
656 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800657 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700658 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700659 ppol->policy->cur, new_freq);
660 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700661 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800662 }
Mike Chanef969692010-06-22 11:26:45 -0700663 }
664
Todd Poynore16d5922012-12-14 17:31:19 -0800665 /*
666 * Update the timestamp for checking whether speed has been held at
667 * or above the selected frequency for a minimum of min_sample_time,
668 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
669 * allow the speed to drop as soon as the boostpulse duration expires
Junjie Wud8a5e842015-08-26 17:47:21 -0700670 * (or the indefinite boost is turned off). If policy->max is restored
671 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
672 * could incorrectly extended the duration of max_freq_hysteresis by
673 * min_sample_time.
Todd Poynore16d5922012-12-14 17:31:19 -0800674 */
675
Junjie Wud8a5e842015-08-26 17:47:21 -0700676 if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
Junjie Wu7c128602015-06-09 17:36:11 -0700677 && !jump_to_max_no_ts) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700678 ppol->floor_freq = new_freq;
679 ppol->floor_validate_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800680 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700681
Junjie Wu7c128602015-06-09 17:36:11 -0700682 if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
Junjie Wucf531ef2015-04-17 12:48:36 -0700683 ppol->max_freq_hyst_start_time = now;
684
685 if (ppol->target_freq == new_freq &&
686 ppol->target_freq <= ppol->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800687 trace_cpufreq_interactive_already(
Junjie Wu7c128602015-06-09 17:36:11 -0700688 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700689 ppol->policy->cur, new_freq);
690 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800691 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700692 }
693
Junjie Wu7c128602015-06-09 17:36:11 -0700694 trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700695 ppol->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800696
Junjie Wucf531ef2015-04-17 12:48:36 -0700697 ppol->target_freq = new_freq;
698 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700699 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Junjie Wucf531ef2015-04-17 12:48:36 -0700700 cpumask_set_cpu(max_cpu, &speedchange_cpumask);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700701 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Junjie Wu94137b22016-01-05 11:09:41 -0800702 wake_up_process_no_notif(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700703
Mike Chanef969692010-06-22 11:26:45 -0700704rearm:
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700705 cpufreq_interactive_timer_resched(data, false);
Mike Chanef969692010-06-22 11:26:45 -0700706
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700707 /*
708 * Send govinfo notification.
709 * Govinfo notification could potentially wake up another thread
710 * managed by its clients. Thread wakeups might trigger a load
711 * change callback that executes this function again. Therefore
712 * no spinlock could be held when sending the notification.
713 */
714 for_each_cpu(i, ppol->policy->cpus) {
715 pcpu = &per_cpu(cpuinfo, i);
716 govinfo.cpu = i;
717 govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
718 govinfo.sampling_rate_us = tunables->timer_rate;
719 atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
720 CPUFREQ_LOAD_CHANGE, &govinfo);
721 }
722
Mike Chanef969692010-06-22 11:26:45 -0700723exit:
Junjie Wucf531ef2015-04-17 12:48:36 -0700724 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700725 return;
726}
727
Todd Poynor0f1920b2012-07-16 17:07:15 -0700728static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700729{
730 unsigned int cpu;
731 cpumask_t tmp_mask;
732 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700733 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -0700734
735 while (1) {
736 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700737 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700738
Todd Poynor0f1920b2012-07-16 17:07:15 -0700739 if (cpumask_empty(&speedchange_cpumask)) {
740 spin_unlock_irqrestore(&speedchange_cpumask_lock,
741 flags);
Mike Chanef969692010-06-22 11:26:45 -0700742 schedule();
743
744 if (kthread_should_stop())
745 break;
746
Todd Poynor0f1920b2012-07-16 17:07:15 -0700747 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700748 }
749
750 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700751 tmp_mask = speedchange_cpumask;
752 cpumask_clear(&speedchange_cpumask);
753 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700754
755 for_each_cpu(cpu, &tmp_mask) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700756 ppol = per_cpu(polinfo, cpu);
757 if (!down_read_trylock(&ppol->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700758 continue;
Junjie Wucf531ef2015-04-17 12:48:36 -0700759 if (!ppol->governor_enabled) {
760 up_read(&ppol->enable_sem);
Todd Poynor5cad6092012-12-18 17:50:44 -0800761 continue;
762 }
Mike Chanef969692010-06-22 11:26:45 -0700763
Junjie Wucf531ef2015-04-17 12:48:36 -0700764 if (ppol->target_freq != ppol->policy->cur)
765 __cpufreq_driver_target(ppol->policy,
766 ppol->target_freq,
Mike Chanef969692010-06-22 11:26:45 -0700767 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700768 trace_cpufreq_interactive_setspeed(cpu,
Junjie Wucf531ef2015-04-17 12:48:36 -0700769 ppol->target_freq,
770 ppol->policy->cur);
771 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700772 }
773 }
774
775 return 0;
776}
777
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800778static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700779{
780 int i;
781 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700782 unsigned long flags[2];
Junjie Wucf531ef2015-04-17 12:48:36 -0700783 struct cpufreq_interactive_policyinfo *ppol;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800784
785 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700786
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700787 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700788
789 for_each_online_cpu(i) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700790 ppol = per_cpu(polinfo, i);
791 if (!ppol || tunables != ppol->policy->governor_data)
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800792 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700793
Junjie Wucf531ef2015-04-17 12:48:36 -0700794 spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
795 if (ppol->target_freq < tunables->hispeed_freq) {
796 ppol->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700797 cpumask_set_cpu(i, &speedchange_cpumask);
Junjie Wucf531ef2015-04-17 12:48:36 -0700798 ppol->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800799 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700800 anyboost = 1;
801 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700802
803 /*
804 * Set floor freq and (re)start timer for when last
805 * validated.
806 */
807
808 ppol->floor_freq = tunables->hispeed_freq;
809 ppol->floor_validate_time = ktime_to_us(ktime_get());
810 spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
811 break;
Todd Poynorab8dc402012-04-02 17:17:14 -0700812 }
813
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700814 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700815
816 if (anyboost)
Junjie Wu94137b22016-01-05 11:09:41 -0800817 wake_up_process_no_notif(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700818}
819
Junjie Wu4344ea32014-04-28 16:22:24 -0700820static int load_change_callback(struct notifier_block *nb, unsigned long val,
821 void *data)
822{
823 unsigned long cpu = (unsigned long) data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700824 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Junjie Wu4344ea32014-04-28 16:22:24 -0700825 struct cpufreq_interactive_tunables *tunables;
Junjie Wu506bfb02015-09-23 12:00:33 -0700826 unsigned long flags;
Junjie Wu4344ea32014-04-28 16:22:24 -0700827
Junjie Wucf531ef2015-04-17 12:48:36 -0700828 if (!ppol || ppol->reject_notification)
Junjie Wu82f08032014-12-09 13:20:26 -0800829 return 0;
830
Junjie Wucf531ef2015-04-17 12:48:36 -0700831 if (!down_read_trylock(&ppol->enable_sem))
Junjie Wu18e7fd22014-09-17 18:51:41 -0700832 return 0;
Junjie Wuaceecc062015-09-18 18:13:01 -0700833 if (!ppol->governor_enabled)
834 goto exit;
835
836 tunables = ppol->policy->governor_data;
837 if (!tunables->use_sched_load || !tunables->use_migration_notif)
838 goto exit;
839
840 spin_lock_irqsave(&ppol->target_freq_lock, flags);
841 ppol->notif_pending = true;
842 ppol->notif_cpu = cpu;
843 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
844
845 if (!hrtimer_is_queued(&ppol->notif_timer))
846 hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
847 HRTIMER_MODE_REL);
848exit:
849 up_read(&ppol->enable_sem);
850 return 0;
851}
852
853static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
854{
855 struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
856 struct cpufreq_interactive_policyinfo, notif_timer);
857 int cpu;
858
859 if (!down_read_trylock(&ppol->enable_sem))
860 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700861 if (!ppol->governor_enabled) {
862 up_read(&ppol->enable_sem);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700863 return 0;
864 }
Junjie Wuaceecc062015-09-18 18:13:01 -0700865 cpu = ppol->notif_cpu;
Junjie Wu18e7fd22014-09-17 18:51:41 -0700866 trace_cpufreq_interactive_load_change(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700867 del_timer(&ppol->policy_slack_timer);
Junjie Wu506bfb02015-09-23 12:00:33 -0700868 cpufreq_interactive_timer(cpu);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700869
Junjie Wucf531ef2015-04-17 12:48:36 -0700870 up_read(&ppol->enable_sem);
Junjie Wuaceecc062015-09-18 18:13:01 -0700871 return HRTIMER_NORESTART;
Junjie Wu4344ea32014-04-28 16:22:24 -0700872}
873
874static struct notifier_block load_notifier_block = {
875 .notifier_call = load_change_callback,
876};
877
Todd Poynor0e58da22012-12-11 16:05:03 -0800878static int cpufreq_interactive_notifier(
879 struct notifier_block *nb, unsigned long val, void *data)
880{
881 struct cpufreq_freqs *freq = data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700882 struct cpufreq_interactive_policyinfo *ppol;
Todd Poynor0e58da22012-12-11 16:05:03 -0800883 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800884 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800885
886 if (val == CPUFREQ_POSTCHANGE) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700887 ppol = per_cpu(polinfo, freq->cpu);
888 if (!ppol)
Todd Poynor34974c32012-12-23 12:28:49 -0800889 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700890 if (!down_read_trylock(&ppol->enable_sem))
891 return 0;
892 if (!ppol->governor_enabled) {
893 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800894 return 0;
895 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800896
Junjie Wucf531ef2015-04-17 12:48:36 -0700897 if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
898 up_read(&ppol->enable_sem);
899 return 0;
900 }
901 spin_lock_irqsave(&ppol->load_lock, flags);
902 for_each_cpu(cpu, ppol->policy->cpus)
Todd Poynor0e58da22012-12-11 16:05:03 -0800903 update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700904 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800905
Junjie Wucf531ef2015-04-17 12:48:36 -0700906 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800907 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800908 return 0;
909}
910
911static struct notifier_block cpufreq_notifier_block = {
912 .notifier_call = cpufreq_interactive_notifier,
913};
914
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900915static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
916{
917 const char *cp;
918 int i;
919 int ntokens = 1;
920 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700921 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900922
923 cp = buf;
924 while ((cp = strpbrk(cp + 1, " :")))
925 ntokens++;
926
Todd Poynor233dfa02013-03-20 15:40:46 -0700927 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900928 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900929
930 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
931 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700932 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900933 goto err;
934 }
935
936 cp = buf;
937 i = 0;
938 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700939 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900940 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900941
942 cp = strpbrk(cp, " :");
943 if (!cp)
944 break;
945 cp++;
946 }
947
Todd Poynor233dfa02013-03-20 15:40:46 -0700948 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900949 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900950
951 *num_tokens = ntokens;
952 return tokenized_data;
953
954err_kfree:
955 kfree(tokenized_data);
956err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700957 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900958}
959
Todd Poynore9c60742012-11-14 11:41:21 -0800960static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530961 struct cpufreq_interactive_tunables *tunables,
962 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800963{
Todd Poynore9c60742012-11-14 11:41:21 -0800964 int i;
965 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800966 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800967
Viresh Kumar17d15c42013-05-16 14:58:54 +0530968 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800969
Viresh Kumar17d15c42013-05-16 14:58:54 +0530970 for (i = 0; i < tunables->ntarget_loads; i++)
971 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800972 i & 0x1 ? ":" : " ");
973
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800974 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530975 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800976 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800977}
978
Todd Poynore9c60742012-11-14 11:41:21 -0800979static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530980 struct cpufreq_interactive_tunables *tunables,
981 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800982{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900983 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800984 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800985 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800986
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900987 new_target_loads = get_tokenized_data(buf, &ntokens);
988 if (IS_ERR(new_target_loads))
989 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800990
Viresh Kumar17d15c42013-05-16 14:58:54 +0530991 spin_lock_irqsave(&tunables->target_loads_lock, flags);
992 if (tunables->target_loads != default_target_loads)
993 kfree(tunables->target_loads);
994 tunables->target_loads = new_target_loads;
995 tunables->ntarget_loads = ntokens;
996 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Junjie Wud6f5e522015-07-29 18:22:21 -0700997
998 sched_update_freq_max_load(&controlled_cpus);
999
Todd Poynor8d2d93f2012-11-28 17:58:17 -08001000 return count;
1001}
1002
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001003static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301004 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001005{
1006 int i;
1007 ssize_t ret = 0;
1008 unsigned long flags;
1009
Viresh Kumar17d15c42013-05-16 14:58:54 +05301010 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001011
Viresh Kumar17d15c42013-05-16 14:58:54 +05301012 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
1013 ret += sprintf(buf + ret, "%u%s",
1014 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001015 i & 0x1 ? ":" : " ");
1016
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +08001017 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +05301018 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001019 return ret;
1020}
1021
1022static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301023 struct cpufreq_interactive_tunables *tunables,
1024 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001025{
1026 int ntokens;
1027 unsigned int *new_above_hispeed_delay = NULL;
1028 unsigned long flags;
1029
1030 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
1031 if (IS_ERR(new_above_hispeed_delay))
1032 return PTR_RET(new_above_hispeed_delay);
1033
Viresh Kumar17d15c42013-05-16 14:58:54 +05301034 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
1035 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
1036 kfree(tunables->above_hispeed_delay);
1037 tunables->above_hispeed_delay = new_above_hispeed_delay;
1038 tunables->nabove_hispeed_delay = ntokens;
1039 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001040 return count;
1041
1042}
1043
Viresh Kumar17d15c42013-05-16 14:58:54 +05301044static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1045 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001046{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301047 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -07001048}
1049
Viresh Kumar17d15c42013-05-16 14:58:54 +05301050static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1051 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001052{
1053 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -07001054 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -07001055
Amit Pundircf076402015-11-03 20:53:29 +05301056 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001057 if (ret < 0)
1058 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301059 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -07001060 return count;
1061}
1062
Junjie Wue05d74e2014-08-29 14:12:52 -07001063#define show_store_one(file_name) \
1064static ssize_t show_##file_name( \
1065 struct cpufreq_interactive_tunables *tunables, char *buf) \
1066{ \
1067 return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
1068} \
1069static ssize_t store_##file_name( \
1070 struct cpufreq_interactive_tunables *tunables, \
1071 const char *buf, size_t count) \
1072{ \
1073 int ret; \
1074 unsigned long int val; \
1075 \
1076 ret = kstrtoul(buf, 0, &val); \
1077 if (ret < 0) \
1078 return ret; \
1079 tunables->file_name = val; \
1080 return count; \
1081}
1082show_store_one(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001083show_store_one(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001084show_store_one(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001085show_store_one(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001086show_store_one(enable_prediction);
Junjie Wue05d74e2014-08-29 14:12:52 -07001087
Viresh Kumar17d15c42013-05-16 14:58:54 +05301088static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
1089 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001090{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301091 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -07001092}
1093
Viresh Kumar17d15c42013-05-16 14:58:54 +05301094static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
1095 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001096{
1097 int ret;
1098 unsigned long val;
1099
Amit Pundircf076402015-11-03 20:53:29 +05301100 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001101 if (ret < 0)
1102 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301103 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -07001104 return count;
1105}
1106
Viresh Kumar17d15c42013-05-16 14:58:54 +05301107static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
1108 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001109{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301110 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -07001111}
1112
Viresh Kumar17d15c42013-05-16 14:58:54 +05301113static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
1114 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001115{
1116 int ret;
1117 unsigned long val;
1118
Amit Pundircf076402015-11-03 20:53:29 +05301119 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001120 if (ret < 0)
1121 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301122 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -07001123 return count;
1124}
1125
Viresh Kumar17d15c42013-05-16 14:58:54 +05301126static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
1127 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001128{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301129 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -07001130}
1131
Viresh Kumar17d15c42013-05-16 14:58:54 +05301132static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
1133 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001134{
1135 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001136 unsigned long val, val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001137 struct cpufreq_interactive_tunables *t;
1138 int cpu;
Mike Chanef969692010-06-22 11:26:45 -07001139
Amit Pundircf076402015-11-03 20:53:29 +05301140 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001141 if (ret < 0)
1142 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001143
1144 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
1145 if (val != val_round)
1146 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
1147 val_round);
Junjie Wu847796e2014-08-15 16:34:37 -07001148 tunables->timer_rate = val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001149
1150 if (!tunables->use_sched_load)
1151 return count;
1152
1153 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001154 if (!per_cpu(polinfo, cpu))
1155 continue;
1156 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001157 if (t && t->use_sched_load)
1158 t->timer_rate = val_round;
1159 }
1160 set_window_helper(tunables);
1161
Mike Chanef969692010-06-22 11:26:45 -07001162 return count;
1163}
1164
Viresh Kumar17d15c42013-05-16 14:58:54 +05301165static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
1166 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -08001167{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301168 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -08001169}
1170
Viresh Kumar17d15c42013-05-16 14:58:54 +05301171static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
1172 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -08001173{
1174 int ret;
1175 unsigned long val;
1176
1177 ret = kstrtol(buf, 10, &val);
1178 if (ret < 0)
1179 return ret;
1180
Viresh Kumar17d15c42013-05-16 14:58:54 +05301181 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -08001182 return count;
1183}
1184
Viresh Kumar17d15c42013-05-16 14:58:54 +05301185static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001186 char *buf)
1187{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301188 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -07001189}
1190
Viresh Kumar17d15c42013-05-16 14:58:54 +05301191static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001192 const char *buf, size_t count)
1193{
1194 int ret;
1195 unsigned long val;
1196
1197 ret = kstrtoul(buf, 0, &val);
1198 if (ret < 0)
1199 return ret;
1200
Viresh Kumar17d15c42013-05-16 14:58:54 +05301201 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -07001202
Viresh Kumar17d15c42013-05-16 14:58:54 +05301203 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -07001204 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001205 if (!tunables->boosted)
1206 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001207 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -07001208 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -07001209 trace_cpufreq_interactive_unboost("off");
1210 }
Todd Poynor15a9ea02012-04-23 20:42:41 -07001211
1212 return count;
1213}
1214
Viresh Kumar17d15c42013-05-16 14:58:54 +05301215static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -07001216 const char *buf, size_t count)
1217{
1218 int ret;
1219 unsigned long val;
1220
1221 ret = kstrtoul(buf, 0, &val);
1222 if (ret < 0)
1223 return ret;
1224
Viresh Kumar17d15c42013-05-16 14:58:54 +05301225 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
1226 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -07001227 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001228 if (!tunables->boosted)
1229 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001230 return count;
1231}
1232
Viresh Kumar17d15c42013-05-16 14:58:54 +05301233static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
1234 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -08001235{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301236 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -08001237}
1238
Viresh Kumar17d15c42013-05-16 14:58:54 +05301239static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
1240 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -08001241{
1242 int ret;
1243 unsigned long val;
1244
1245 ret = kstrtoul(buf, 0, &val);
1246 if (ret < 0)
1247 return ret;
1248
Viresh Kumar17d15c42013-05-16 14:58:54 +05301249 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -08001250 return count;
1251}
1252
Viresh Kumar17d15c42013-05-16 14:58:54 +05301253static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1254 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +08001255{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301256 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +08001257}
1258
Viresh Kumar17d15c42013-05-16 14:58:54 +05301259static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1260 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +08001261{
1262 int ret;
1263 unsigned long val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001264 struct cpufreq_interactive_tunables *t;
1265 int cpu;
Lianwei Wang72e40572013-02-22 11:39:18 +08001266
1267 ret = kstrtoul(buf, 0, &val);
1268 if (ret < 0)
1269 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301270 tunables->io_is_busy = val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001271
1272 if (!tunables->use_sched_load)
1273 return count;
1274
1275 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001276 if (!per_cpu(polinfo, cpu))
1277 continue;
1278 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001279 if (t && t->use_sched_load)
1280 t->io_is_busy = val;
1281 }
1282 sched_set_io_is_busy(val);
1283
1284 return count;
1285}
1286
1287static int cpufreq_interactive_enable_sched_input(
1288 struct cpufreq_interactive_tunables *tunables)
1289{
1290 int rc = 0, j;
1291 struct cpufreq_interactive_tunables *t;
1292
1293 mutex_lock(&sched_lock);
1294
1295 set_window_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001296 if (set_window_count > 1) {
Junjie Wu4344ea32014-04-28 16:22:24 -07001297 for_each_possible_cpu(j) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001298 if (!per_cpu(polinfo, j))
1299 continue;
1300 t = per_cpu(polinfo, j)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001301 if (t && t->use_sched_load) {
1302 tunables->timer_rate = t->timer_rate;
1303 tunables->io_is_busy = t->io_is_busy;
1304 break;
1305 }
1306 }
Junjie Wue627d702014-12-15 16:51:08 -08001307 } else {
1308 rc = set_window_helper(tunables);
1309 if (rc) {
1310 pr_err("%s: Failed to set sched window\n", __func__);
1311 set_window_count--;
1312 goto out;
1313 }
1314 sched_set_io_is_busy(tunables->io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001315 }
1316
Junjie Wu4344ea32014-04-28 16:22:24 -07001317 if (!tunables->use_migration_notif)
1318 goto out;
1319
1320 migration_register_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001321 if (migration_register_count > 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001322 goto out;
1323 else
1324 atomic_notifier_chain_register(&load_alert_notifier_head,
1325 &load_notifier_block);
1326out:
1327 mutex_unlock(&sched_lock);
1328 return rc;
1329}
1330
1331static int cpufreq_interactive_disable_sched_input(
1332 struct cpufreq_interactive_tunables *tunables)
1333{
1334 mutex_lock(&sched_lock);
1335
1336 if (tunables->use_migration_notif) {
1337 migration_register_count--;
Junjie Wue627d702014-12-15 16:51:08 -08001338 if (migration_register_count < 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001339 atomic_notifier_chain_unregister(
1340 &load_alert_notifier_head,
1341 &load_notifier_block);
1342 }
1343 set_window_count--;
1344
1345 mutex_unlock(&sched_lock);
1346 return 0;
1347}
1348
1349static ssize_t show_use_sched_load(
1350 struct cpufreq_interactive_tunables *tunables, char *buf)
1351{
1352 return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
1353}
1354
1355static ssize_t store_use_sched_load(
1356 struct cpufreq_interactive_tunables *tunables,
1357 const char *buf, size_t count)
1358{
1359 int ret;
1360 unsigned long val;
1361
1362 ret = kstrtoul(buf, 0, &val);
1363 if (ret < 0)
1364 return ret;
1365
1366 if (tunables->use_sched_load == (bool) val)
1367 return count;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301368
1369 tunables->use_sched_load = val;
1370
Junjie Wu4344ea32014-04-28 16:22:24 -07001371 if (val)
1372 ret = cpufreq_interactive_enable_sched_input(tunables);
1373 else
1374 ret = cpufreq_interactive_disable_sched_input(tunables);
1375
Hanumath Prasada9c07002015-06-30 15:19:39 +05301376 if (ret) {
1377 tunables->use_sched_load = !val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001378 return ret;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301379 }
Junjie Wu4344ea32014-04-28 16:22:24 -07001380
Junjie Wu4344ea32014-04-28 16:22:24 -07001381 return count;
1382}
1383
1384static ssize_t show_use_migration_notif(
1385 struct cpufreq_interactive_tunables *tunables, char *buf)
1386{
1387 return snprintf(buf, PAGE_SIZE, "%d\n",
1388 tunables->use_migration_notif);
1389}
1390
1391static ssize_t store_use_migration_notif(
1392 struct cpufreq_interactive_tunables *tunables,
1393 const char *buf, size_t count)
1394{
1395 int ret;
1396 unsigned long val;
1397
1398 ret = kstrtoul(buf, 0, &val);
1399 if (ret < 0)
1400 return ret;
1401
1402 if (tunables->use_migration_notif == (bool) val)
1403 return count;
1404 tunables->use_migration_notif = val;
1405
1406 if (!tunables->use_sched_load)
1407 return count;
1408
1409 mutex_lock(&sched_lock);
1410 if (val) {
1411 migration_register_count++;
1412 if (migration_register_count == 1)
1413 atomic_notifier_chain_register(
1414 &load_alert_notifier_head,
1415 &load_notifier_block);
1416 } else {
1417 migration_register_count--;
1418 if (!migration_register_count)
1419 atomic_notifier_chain_unregister(
1420 &load_alert_notifier_head,
1421 &load_notifier_block);
1422 }
1423 mutex_unlock(&sched_lock);
1424
Lianwei Wang72e40572013-02-22 11:39:18 +08001425 return count;
1426}
1427
Viresh Kumar17d15c42013-05-16 14:58:54 +05301428/*
1429 * Create show/store routines
1430 * - sys: One governor instance for complete SYSTEM
1431 * - pol: One governor instance per struct cpufreq_policy
1432 */
1433#define show_gov_pol_sys(file_name) \
1434static ssize_t show_##file_name##_gov_sys \
1435(struct kobject *kobj, struct attribute *attr, char *buf) \
1436{ \
1437 return show_##file_name(common_tunables, buf); \
1438} \
1439 \
1440static ssize_t show_##file_name##_gov_pol \
1441(struct cpufreq_policy *policy, char *buf) \
1442{ \
1443 return show_##file_name(policy->governor_data, buf); \
1444}
Lianwei Wang72e40572013-02-22 11:39:18 +08001445
Viresh Kumar17d15c42013-05-16 14:58:54 +05301446#define store_gov_pol_sys(file_name) \
1447static ssize_t store_##file_name##_gov_sys \
1448(struct kobject *kobj, struct attribute *attr, const char *buf, \
1449 size_t count) \
1450{ \
1451 return store_##file_name(common_tunables, buf, count); \
1452} \
1453 \
1454static ssize_t store_##file_name##_gov_pol \
1455(struct cpufreq_policy *policy, const char *buf, size_t count) \
1456{ \
1457 return store_##file_name(policy->governor_data, buf, count); \
1458}
1459
1460#define show_store_gov_pol_sys(file_name) \
1461show_gov_pol_sys(file_name); \
1462store_gov_pol_sys(file_name)
1463
1464show_store_gov_pol_sys(target_loads);
1465show_store_gov_pol_sys(above_hispeed_delay);
1466show_store_gov_pol_sys(hispeed_freq);
1467show_store_gov_pol_sys(go_hispeed_load);
1468show_store_gov_pol_sys(min_sample_time);
1469show_store_gov_pol_sys(timer_rate);
1470show_store_gov_pol_sys(timer_slack);
1471show_store_gov_pol_sys(boost);
1472store_gov_pol_sys(boostpulse);
1473show_store_gov_pol_sys(boostpulse_duration);
1474show_store_gov_pol_sys(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001475show_store_gov_pol_sys(use_sched_load);
1476show_store_gov_pol_sys(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001477show_store_gov_pol_sys(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001478show_store_gov_pol_sys(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001479show_store_gov_pol_sys(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001480show_store_gov_pol_sys(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001481show_store_gov_pol_sys(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301482
1483#define gov_sys_attr_rw(_name) \
1484static struct global_attr _name##_gov_sys = \
1485__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1486
1487#define gov_pol_attr_rw(_name) \
1488static struct freq_attr _name##_gov_pol = \
1489__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1490
1491#define gov_sys_pol_attr_rw(_name) \
1492 gov_sys_attr_rw(_name); \
1493 gov_pol_attr_rw(_name)
1494
1495gov_sys_pol_attr_rw(target_loads);
1496gov_sys_pol_attr_rw(above_hispeed_delay);
1497gov_sys_pol_attr_rw(hispeed_freq);
1498gov_sys_pol_attr_rw(go_hispeed_load);
1499gov_sys_pol_attr_rw(min_sample_time);
1500gov_sys_pol_attr_rw(timer_rate);
1501gov_sys_pol_attr_rw(timer_slack);
1502gov_sys_pol_attr_rw(boost);
1503gov_sys_pol_attr_rw(boostpulse_duration);
1504gov_sys_pol_attr_rw(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001505gov_sys_pol_attr_rw(use_sched_load);
1506gov_sys_pol_attr_rw(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001507gov_sys_pol_attr_rw(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001508gov_sys_pol_attr_rw(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001509gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001510gov_sys_pol_attr_rw(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001511gov_sys_pol_attr_rw(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301512
1513static struct global_attr boostpulse_gov_sys =
1514 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1515
1516static struct freq_attr boostpulse_gov_pol =
1517 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1518
1519/* One Governor instance for entire system */
1520static struct attribute *interactive_attributes_gov_sys[] = {
1521 &target_loads_gov_sys.attr,
1522 &above_hispeed_delay_gov_sys.attr,
1523 &hispeed_freq_gov_sys.attr,
1524 &go_hispeed_load_gov_sys.attr,
1525 &min_sample_time_gov_sys.attr,
1526 &timer_rate_gov_sys.attr,
1527 &timer_slack_gov_sys.attr,
1528 &boost_gov_sys.attr,
1529 &boostpulse_gov_sys.attr,
1530 &boostpulse_duration_gov_sys.attr,
1531 &io_is_busy_gov_sys.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001532 &use_sched_load_gov_sys.attr,
1533 &use_migration_notif_gov_sys.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001534 &max_freq_hysteresis_gov_sys.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001535 &align_windows_gov_sys.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001536 &ignore_hispeed_on_notif_gov_sys.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001537 &fast_ramp_down_gov_sys.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001538 &enable_prediction_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001539 NULL,
1540};
1541
Viresh Kumar17d15c42013-05-16 14:58:54 +05301542static struct attribute_group interactive_attr_group_gov_sys = {
1543 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001544 .name = "interactive",
1545};
1546
Viresh Kumar17d15c42013-05-16 14:58:54 +05301547/* Per policy governor instance */
1548static struct attribute *interactive_attributes_gov_pol[] = {
1549 &target_loads_gov_pol.attr,
1550 &above_hispeed_delay_gov_pol.attr,
1551 &hispeed_freq_gov_pol.attr,
1552 &go_hispeed_load_gov_pol.attr,
1553 &min_sample_time_gov_pol.attr,
1554 &timer_rate_gov_pol.attr,
1555 &timer_slack_gov_pol.attr,
1556 &boost_gov_pol.attr,
1557 &boostpulse_gov_pol.attr,
1558 &boostpulse_duration_gov_pol.attr,
1559 &io_is_busy_gov_pol.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001560 &use_sched_load_gov_pol.attr,
1561 &use_migration_notif_gov_pol.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001562 &max_freq_hysteresis_gov_pol.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001563 &align_windows_gov_pol.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001564 &ignore_hispeed_on_notif_gov_pol.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001565 &fast_ramp_down_gov_pol.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001566 &enable_prediction_gov_pol.attr,
Viresh Kumar17d15c42013-05-16 14:58:54 +05301567 NULL,
1568};
1569
1570static struct attribute_group interactive_attr_group_gov_pol = {
1571 .attrs = interactive_attributes_gov_pol,
1572 .name = "interactive",
1573};
1574
1575static struct attribute_group *get_sysfs_attr(void)
1576{
1577 if (have_governor_per_policy())
1578 return &interactive_attr_group_gov_pol;
1579 else
1580 return &interactive_attr_group_gov_sys;
1581}
1582
Junjie Wucf531ef2015-04-17 12:48:36 -07001583static void cpufreq_interactive_nop_timer(unsigned long data)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001584{
Junjie Wu53f83f82014-08-18 16:35:09 -07001585}
1586
Junjie Wuc5a97d92014-05-23 12:22:59 -07001587static struct cpufreq_interactive_tunables *alloc_tunable(
1588 struct cpufreq_policy *policy)
1589{
1590 struct cpufreq_interactive_tunables *tunables;
1591
1592 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1593 if (!tunables)
1594 return ERR_PTR(-ENOMEM);
1595
1596 tunables->above_hispeed_delay = default_above_hispeed_delay;
1597 tunables->nabove_hispeed_delay =
1598 ARRAY_SIZE(default_above_hispeed_delay);
1599 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1600 tunables->target_loads = default_target_loads;
1601 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1602 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1603 tunables->timer_rate = DEFAULT_TIMER_RATE;
1604 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1605 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1606
1607 spin_lock_init(&tunables->target_loads_lock);
1608 spin_lock_init(&tunables->above_hispeed_delay_lock);
1609
1610 return tunables;
1611}
1612
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001613static void irq_work(struct irq_work *irq_work)
1614{
1615 struct cpufreq_interactive_policyinfo *ppol;
1616 unsigned long flags;
1617
1618 ppol = container_of(irq_work, struct cpufreq_interactive_policyinfo,
1619 irq_work);
1620
1621 cpufreq_interactive_timer(smp_processor_id());
1622 spin_lock_irqsave(&ppol->irq_work_lock, flags);
1623 ppol->work_in_progress = false;
1624 spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
1625}
1626
Junjie Wucf531ef2015-04-17 12:48:36 -07001627static struct cpufreq_interactive_policyinfo *get_policyinfo(
1628 struct cpufreq_policy *policy)
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001629{
Junjie Wucf531ef2015-04-17 12:48:36 -07001630 struct cpufreq_interactive_policyinfo *ppol =
1631 per_cpu(polinfo, policy->cpu);
1632 int i;
Joonwoo Park22d94972015-09-15 09:35:53 -07001633 struct sched_load *sl;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001634
Junjie Wucf531ef2015-04-17 12:48:36 -07001635 /* polinfo already allocated for policy, return */
1636 if (ppol)
1637 return ppol;
1638
1639 ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
1640 if (!ppol)
1641 return ERR_PTR(-ENOMEM);
1642
Joonwoo Park22d94972015-09-15 09:35:53 -07001643 sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
1644 GFP_KERNEL);
1645 if (!sl) {
Junjie Wufef75c02015-05-26 17:54:38 -07001646 kfree(ppol);
1647 return ERR_PTR(-ENOMEM);
1648 }
Joonwoo Park22d94972015-09-15 09:35:53 -07001649 ppol->sl = sl;
Junjie Wufef75c02015-05-26 17:54:38 -07001650
Junjie Wucf531ef2015-04-17 12:48:36 -07001651 init_timer(&ppol->policy_slack_timer);
1652 ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -07001653 hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1654 ppol->notif_timer.function = cpufreq_interactive_hrtimer;
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001655 init_irq_work(&ppol->irq_work, irq_work);
1656 spin_lock_init(&ppol->irq_work_lock);
Junjie Wucf531ef2015-04-17 12:48:36 -07001657 spin_lock_init(&ppol->load_lock);
1658 spin_lock_init(&ppol->target_freq_lock);
1659 init_rwsem(&ppol->enable_sem);
1660
1661 for_each_cpu(i, policy->related_cpus)
1662 per_cpu(polinfo, i) = ppol;
1663 return ppol;
1664}
1665
1666/* This function is not multithread-safe. */
1667static void free_policyinfo(int cpu)
1668{
1669 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
1670 int j;
1671
1672 if (!ppol)
1673 return;
1674
1675 for_each_possible_cpu(j)
1676 if (per_cpu(polinfo, j) == ppol)
1677 per_cpu(polinfo, cpu) = NULL;
1678 kfree(ppol->cached_tunables);
Joonwoo Park22d94972015-09-15 09:35:53 -07001679 kfree(ppol->sl);
Junjie Wucf531ef2015-04-17 12:48:36 -07001680 kfree(ppol);
1681}
1682
1683static struct cpufreq_interactive_tunables *get_tunables(
1684 struct cpufreq_interactive_policyinfo *ppol)
1685{
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001686 if (have_governor_per_policy())
Junjie Wucf531ef2015-04-17 12:48:36 -07001687 return ppol->cached_tunables;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001688 else
Junjie Wucf531ef2015-04-17 12:48:36 -07001689 return cached_common_tunables;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001690}
1691
Stephen Boyd9a864832017-03-13 16:49:15 -07001692/* Interactive Governor callbacks */
1693struct interactive_governor {
1694 struct cpufreq_governor gov;
1695 unsigned int usage_count;
1696};
1697
1698static struct interactive_governor interactive_gov;
1699
1700#define CPU_FREQ_GOV_INTERACTIVE (&interactive_gov.gov)
1701
1702int cpufreq_interactive_init(struct cpufreq_policy *policy)
Mike Chanef969692010-06-22 11:26:45 -07001703{
1704 int rc;
Junjie Wucf531ef2015-04-17 12:48:36 -07001705 struct cpufreq_interactive_policyinfo *ppol;
Stephen Boyd9a864832017-03-13 16:49:15 -07001706 struct cpufreq_interactive_tunables *tunables;
1707
1708 if (have_governor_per_policy())
1709 tunables = policy->governor_data;
1710 else
1711 tunables = common_tunables;
1712
1713 ppol = get_policyinfo(policy);
1714 if (IS_ERR(ppol))
1715 return PTR_ERR(ppol);
1716
1717 if (have_governor_per_policy()) {
1718 WARN_ON(tunables);
1719 } else if (tunables) {
1720 tunables->usage_count++;
1721 cpumask_or(&controlled_cpus, &controlled_cpus,
1722 policy->related_cpus);
1723 sched_update_freq_max_load(policy->related_cpus);
1724 policy->governor_data = tunables;
1725 return 0;
1726 }
1727
1728 tunables = get_tunables(ppol);
1729 if (!tunables) {
1730 tunables = alloc_tunable(policy);
1731 if (IS_ERR(tunables))
1732 return PTR_ERR(tunables);
1733 }
1734
1735 tunables->usage_count = 1;
1736 policy->governor_data = tunables;
1737 if (!have_governor_per_policy())
1738 common_tunables = tunables;
1739
1740 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1741 get_sysfs_attr());
1742 if (rc) {
1743 kfree(tunables);
1744 policy->governor_data = NULL;
1745 if (!have_governor_per_policy())
1746 common_tunables = NULL;
1747 return rc;
1748 }
1749
1750 if (!interactive_gov.usage_count++)
1751 cpufreq_register_notifier(&cpufreq_notifier_block,
1752 CPUFREQ_TRANSITION_NOTIFIER);
1753
1754 if (tunables->use_sched_load)
1755 cpufreq_interactive_enable_sched_input(tunables);
1756
1757 cpumask_or(&controlled_cpus, &controlled_cpus,
1758 policy->related_cpus);
1759 sched_update_freq_max_load(policy->related_cpus);
1760
1761 if (have_governor_per_policy())
1762 ppol->cached_tunables = tunables;
1763 else
1764 cached_common_tunables = tunables;
1765
1766 return 0;
1767}
1768
1769void cpufreq_interactive_exit(struct cpufreq_policy *policy)
1770{
1771 struct cpufreq_interactive_tunables *tunables;
1772
1773 if (have_governor_per_policy())
1774 tunables = policy->governor_data;
1775 else
1776 tunables = common_tunables;
1777
1778 BUG_ON(!tunables);
1779
1780 cpumask_andnot(&controlled_cpus, &controlled_cpus,
1781 policy->related_cpus);
1782 sched_update_freq_max_load(cpu_possible_mask);
1783 if (!--tunables->usage_count) {
1784 /* Last policy using the governor ? */
1785 if (!--interactive_gov.usage_count)
1786 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1787 CPUFREQ_TRANSITION_NOTIFIER);
1788
1789 sysfs_remove_group(get_governor_parent_kobj(policy),
1790 get_sysfs_attr());
1791
1792 common_tunables = NULL;
1793 }
1794
1795 policy->governor_data = NULL;
1796
1797 if (tunables->use_sched_load)
1798 cpufreq_interactive_disable_sched_input(tunables);
1799}
1800
1801int cpufreq_interactive_start(struct cpufreq_policy *policy)
1802{
1803 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -07001804 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301805 struct cpufreq_interactive_tunables *tunables;
1806
1807 if (have_governor_per_policy())
1808 tunables = policy->governor_data;
1809 else
1810 tunables = common_tunables;
1811
Stephen Boyd9a864832017-03-13 16:49:15 -07001812 BUG_ON(!tunables);
1813 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001814
Stephen Boyd9a864832017-03-13 16:49:15 -07001815 freq_table = policy->freq_table;
1816 if (!tunables->hispeed_freq)
1817 tunables->hispeed_freq = policy->max;
Junjie Wucf531ef2015-04-17 12:48:36 -07001818
Stephen Boyd9a864832017-03-13 16:49:15 -07001819 ppol = per_cpu(polinfo, policy->cpu);
1820 ppol->policy = policy;
1821 ppol->target_freq = policy->cur;
1822 ppol->freq_table = freq_table;
1823 ppol->p_nolim = *policy;
1824 ppol->p_nolim.min = policy->cpuinfo.min_freq;
1825 ppol->p_nolim.max = policy->cpuinfo.max_freq;
1826 ppol->floor_freq = ppol->target_freq;
1827 ppol->floor_validate_time = ktime_to_us(ktime_get());
1828 ppol->hispeed_validate_time = ppol->floor_validate_time;
1829 ppol->min_freq = policy->min;
1830 ppol->reject_notification = true;
1831 ppol->notif_pending = false;
1832 down_write(&ppol->enable_sem);
Stephen Boyd9a864832017-03-13 16:49:15 -07001833 del_timer_sync(&ppol->policy_slack_timer);
Stephen Boyd9a864832017-03-13 16:49:15 -07001834 ppol->last_evaluated_jiffy = get_jiffies_64();
1835 cpufreq_interactive_timer_start(tunables, policy->cpu);
1836 ppol->governor_enabled = 1;
1837 up_write(&ppol->enable_sem);
1838 ppol->reject_notification = false;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301839
Stephen Boyd9a864832017-03-13 16:49:15 -07001840 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001841 return 0;
1842}
1843
Stephen Boyd9a864832017-03-13 16:49:15 -07001844void cpufreq_interactive_stop(struct cpufreq_policy *policy)
1845{
1846 struct cpufreq_interactive_policyinfo *ppol;
1847 struct cpufreq_interactive_tunables *tunables;
1848
1849 if (have_governor_per_policy())
1850 tunables = policy->governor_data;
1851 else
1852 tunables = common_tunables;
1853
1854 BUG_ON(!tunables);
1855
1856 mutex_lock(&gov_lock);
1857
1858 ppol = per_cpu(polinfo, policy->cpu);
1859 ppol->reject_notification = true;
1860 down_write(&ppol->enable_sem);
1861 ppol->governor_enabled = 0;
1862 ppol->target_freq = 0;
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001863 gov_clear_update_util(ppol->policy);
1864 irq_work_sync(&ppol->irq_work);
1865 ppol->work_in_progress = false;
Stephen Boyd9a864832017-03-13 16:49:15 -07001866 del_timer_sync(&ppol->policy_slack_timer);
1867 up_write(&ppol->enable_sem);
1868 ppol->reject_notification = false;
1869
1870 mutex_unlock(&gov_lock);
1871}
1872
1873void cpufreq_interactive_limits(struct cpufreq_policy *policy)
1874{
1875 struct cpufreq_interactive_policyinfo *ppol;
1876 struct cpufreq_interactive_tunables *tunables;
1877
1878 if (have_governor_per_policy())
1879 tunables = policy->governor_data;
1880 else
1881 tunables = common_tunables;
1882
1883 BUG_ON(!tunables);
1884 ppol = per_cpu(polinfo, policy->cpu);
1885
1886 __cpufreq_driver_target(policy,
1887 ppol->target_freq, CPUFREQ_RELATION_L);
1888
1889 down_read(&ppol->enable_sem);
1890 if (ppol->governor_enabled) {
1891 if (policy->min < ppol->min_freq)
1892 cpufreq_interactive_timer_resched(policy->cpu,
1893 true);
1894 ppol->min_freq = policy->min;
1895 }
1896 up_read(&ppol->enable_sem);
1897}
1898
1899static struct interactive_governor interactive_gov = {
1900 .gov = {
1901 .name = "interactive",
1902 .max_transition_latency = 10000000,
1903 .owner = THIS_MODULE,
1904 .init = cpufreq_interactive_init,
1905 .exit = cpufreq_interactive_exit,
1906 .start = cpufreq_interactive_start,
1907 .stop = cpufreq_interactive_stop,
1908 .limits = cpufreq_interactive_limits,
1909 }
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301910};
1911
Stephen Boyd9a864832017-03-13 16:49:15 -07001912static int __init cpufreq_interactive_gov_init(void)
Mike Chanef969692010-06-22 11:26:45 -07001913{
Mike Chanef969692010-06-22 11:26:45 -07001914 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1915
Todd Poynor0f1920b2012-07-16 17:07:15 -07001916 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001917 mutex_init(&gov_lock);
Junjie Wu4344ea32014-04-28 16:22:24 -07001918 mutex_init(&sched_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001919 speedchange_task =
1920 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1921 "cfinteractive");
1922 if (IS_ERR(speedchange_task))
1923 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001924
Todd Poynor0f1920b2012-07-16 17:07:15 -07001925 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1926 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001927
Sam Leffler5c9b8272012-06-27 12:55:56 -07001928 /* NB: wake up so the thread does not look hung to the freezer */
Junjie Wu94137b22016-01-05 11:09:41 -08001929 wake_up_process_no_notif(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001930
Stephen Boyd9a864832017-03-13 16:49:15 -07001931 return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
Mike Chanef969692010-06-22 11:26:45 -07001932}
1933
1934#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
Stephen Boyd9a864832017-03-13 16:49:15 -07001935struct cpufreq_governor *cpufreq_default_governor(void)
1936{
1937 return CPU_FREQ_GOV_INTERACTIVE;
1938}
1939
1940fs_initcall(cpufreq_interactive_gov_init);
Mike Chanef969692010-06-22 11:26:45 -07001941#else
Stephen Boyd9a864832017-03-13 16:49:15 -07001942module_init(cpufreq_interactive_gov_init);
Mike Chanef969692010-06-22 11:26:45 -07001943#endif
1944
Stephen Boyd9a864832017-03-13 16:49:15 -07001945static void __exit cpufreq_interactive_gov_exit(void)
Mike Chanef969692010-06-22 11:26:45 -07001946{
Junjie Wuc5a97d92014-05-23 12:22:59 -07001947 int cpu;
1948
Stephen Boyd9a864832017-03-13 16:49:15 -07001949 cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001950 kthread_stop(speedchange_task);
1951 put_task_struct(speedchange_task);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001952
Junjie Wucf531ef2015-04-17 12:48:36 -07001953 for_each_possible_cpu(cpu)
1954 free_policyinfo(cpu);
Mike Chanef969692010-06-22 11:26:45 -07001955}
1956
Stephen Boyd9a864832017-03-13 16:49:15 -07001957module_exit(cpufreq_interactive_gov_exit);
Mike Chanef969692010-06-22 11:26:45 -07001958
1959MODULE_AUTHOR("Mike Chan <mike@android.com>");
1960MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1961 "Latency sensitive workloads");
1962MODULE_LICENSE("GPL");