blob: a6edf2f9d4f86dbd11d03872d1478d07f0d453de [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
Junjie Wuaceecc062015-09-18 18:13:01 -070030#include <linux/hrtimer.h>
Mike Chanef969692010-06-22 11:26:45 -070031#include <linux/workqueue.h>
32#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070033#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070034
Todd Poynorae010472012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Stephen Boyd1c2271f2017-03-20 18:57:28 -070038static DEFINE_PER_CPU(struct update_util_data, update_util);
39
Junjie Wucf531ef2015-04-17 12:48:36 -070040struct cpufreq_interactive_policyinfo {
Stephen Boyd1c2271f2017-03-20 18:57:28 -070041 bool work_in_progress;
42 struct irq_work irq_work;
43 spinlock_t irq_work_lock; /* protects work_in_progress */
Junjie Wucf531ef2015-04-17 12:48:36 -070044 struct timer_list policy_slack_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -070045 struct hrtimer notif_timer;
Junjie Wucf531ef2015-04-17 12:48:36 -070046 spinlock_t load_lock; /* protects load tracking stat */
Junjie Wu6b974ed2014-04-28 15:11:47 -070047 u64 last_evaluated_jiffy;
Mike Chanef969692010-06-22 11:26:45 -070048 struct cpufreq_policy *policy;
Saravana Kannan433ed992014-08-14 18:29:45 -070049 struct cpufreq_policy p_nolim; /* policy copy with no limits */
Mike Chanef969692010-06-22 11:26:45 -070050 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070051 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070052 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070053 unsigned int floor_freq;
Junjie Wu1d868952015-03-27 11:44:21 -070054 unsigned int min_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -070055 u64 floor_validate_time;
56 u64 hispeed_validate_time;
57 u64 max_freq_hyst_start_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080058 struct rw_semaphore enable_sem;
Junjie Wu82f08032014-12-09 13:20:26 -080059 bool reject_notification;
Junjie Wu506bfb02015-09-23 12:00:33 -070060 bool notif_pending;
Junjie Wuaceecc062015-09-18 18:13:01 -070061 unsigned long notif_cpu;
Mike Chanef969692010-06-22 11:26:45 -070062 int governor_enabled;
Junjie Wu13c6a762014-08-07 18:04:13 -070063 struct cpufreq_interactive_tunables *cached_tunables;
Joonwoo Park22d94972015-09-15 09:35:53 -070064 struct sched_load *sl;
Mike Chanef969692010-06-22 11:26:45 -070065};
66
Junjie Wucf531ef2015-04-17 12:48:36 -070067/* Protected by per-policy load_lock */
68struct cpufreq_interactive_cpuinfo {
69 u64 time_in_idle;
70 u64 time_in_idle_timestamp;
71 u64 cputime_speedadj;
72 u64 cputime_speedadj_timestamp;
73 unsigned int loadadjfreq;
74};
75
76static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
Mike Chanef969692010-06-22 11:26:45 -070077static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
78
Todd Poynor0f1920b2012-07-16 17:07:15 -070079/* realtime thread handles frequency scaling */
80static struct task_struct *speedchange_task;
81static cpumask_t speedchange_cpumask;
82static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080083static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070084
Junjie Wu4344ea32014-04-28 16:22:24 -070085static int set_window_count;
86static int migration_register_count;
87static struct mutex sched_lock;
Junjie Wud6f5e522015-07-29 18:22:21 -070088static cpumask_t controlled_cpus;
Junjie Wu4344ea32014-04-28 16:22:24 -070089
Todd Poynor8d2d93f2012-11-28 17:58:17 -080090/* Target load. Lower values result in higher CPU speeds. */
91#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080092static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080093
Todd Poynora380aa82012-04-17 17:39:34 -070094#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070095#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090096static unsigned int default_above_hispeed_delay[] = {
97 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070098
Viresh Kumar17d15c42013-05-16 14:58:54 +053099struct cpufreq_interactive_tunables {
100 int usage_count;
101 /* Hi speed to bump to from lo speed when load burst (default max) */
102 unsigned int hispeed_freq;
103 /* Go to hi speed when CPU load at or above this value. */
104#define DEFAULT_GO_HISPEED_LOAD 99
105 unsigned long go_hispeed_load;
106 /* Target load. Lower values result in higher CPU speeds. */
107 spinlock_t target_loads_lock;
108 unsigned int *target_loads;
109 int ntarget_loads;
110 /*
111 * The minimum amount of time to spend at a frequency before we can ramp
112 * down.
113 */
114#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
115 unsigned long min_sample_time;
116 /*
117 * The sample rate of the timer used to increase frequency
118 */
119 unsigned long timer_rate;
120 /*
121 * Wait this long before raising speed above hispeed, by default a
122 * single timer interval.
123 */
124 spinlock_t above_hispeed_delay_lock;
125 unsigned int *above_hispeed_delay;
126 int nabove_hispeed_delay;
127 /* Non-zero means indefinite speed boost active */
128 int boost_val;
129 /* Duration of a boot pulse in usecs */
130 int boostpulse_duration_val;
131 /* End time of boost pulse in ktime converted to usecs */
132 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800133 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530134 /*
135 * Max additional time to wait in idle, beyond timer_rate, at speeds
136 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
137 */
Todd Poynor4add2592012-12-18 17:50:10 -0800138#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530139 int timer_slack_val;
140 bool io_is_busy;
Junjie Wu4344ea32014-04-28 16:22:24 -0700141
142 /* scheduler input related flags */
143 bool use_sched_load;
144 bool use_migration_notif;
Junjie Wue05d74e2014-08-29 14:12:52 -0700145
146 /*
Junjie Wu7ca999f2014-08-29 18:55:45 -0700147 * Whether to align timer windows across all CPUs. When
148 * use_sched_load is true, this flag is ignored and windows
149 * will always be aligned.
150 */
151 bool align_windows;
152
153 /*
Junjie Wue05d74e2014-08-29 14:12:52 -0700154 * Stay at max freq for at least max_freq_hysteresis before dropping
155 * frequency.
156 */
157 unsigned int max_freq_hysteresis;
Junjie Wu450c8572015-07-22 17:38:49 -0700158
Junjie Wu3381c4c2015-08-19 15:45:37 -0700159 /* Ignore hispeed_freq and above_hispeed_delay for notification */
160 bool ignore_hispeed_on_notif;
161
162 /* Ignore min_sample_time for notification */
Junjie Wu450c8572015-07-22 17:38:49 -0700163 bool fast_ramp_down;
Junjie Wu7c128602015-06-09 17:36:11 -0700164
165 /* Whether to enable prediction or not */
166 bool enable_prediction;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530167};
Lianwei Wangd72db422012-11-01 09:59:52 +0800168
Viresh Kumar17d15c42013-05-16 14:58:54 +0530169/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700170static struct cpufreq_interactive_tunables *common_tunables;
Junjie Wucf531ef2015-04-17 12:48:36 -0700171static struct cpufreq_interactive_tunables *cached_common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530172
173static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800174
Junjie Wu6b974ed2014-04-28 15:11:47 -0700175/* Round to starting jiffy of next evaluation window */
176static u64 round_to_nw_start(u64 jif,
177 struct cpufreq_interactive_tunables *tunables)
178{
179 unsigned long step = usecs_to_jiffies(tunables->timer_rate);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700180 u64 ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700181
Junjie Wu7ca999f2014-08-29 18:55:45 -0700182 if (tunables->use_sched_load || tunables->align_windows) {
183 do_div(jif, step);
184 ret = (jif + 1) * step;
185 } else {
186 ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
187 }
188
189 return ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700190}
191
Junjie Wu4344ea32014-04-28 16:22:24 -0700192static inline int set_window_helper(
193 struct cpufreq_interactive_tunables *tunables)
194{
195 return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
196 usecs_to_jiffies(tunables->timer_rate));
197}
198
Junjie Wu1d868952015-03-27 11:44:21 -0700199static void cpufreq_interactive_timer_resched(unsigned long cpu,
200 bool slack_only)
Todd Poynor8eccd412012-10-08 20:14:34 -0700201{
Junjie Wucf531ef2015-04-17 12:48:36 -0700202 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
203 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530204 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700205 ppol->policy->governor_data;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700206 u64 expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800207 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700208 int i;
Todd Poynor4add2592012-12-18 17:50:10 -0800209
Junjie Wucf531ef2015-04-17 12:48:36 -0700210 spin_lock_irqsave(&ppol->load_lock, flags);
Junjie Wua26c0702015-07-20 10:20:08 -0700211 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Junjie Wu1d868952015-03-27 11:44:21 -0700212 if (!slack_only) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700213 for_each_cpu(i, ppol->policy->cpus) {
214 pcpu = &per_cpu(cpuinfo, i);
215 pcpu->time_in_idle = get_cpu_idle_time(i,
216 &pcpu->time_in_idle_timestamp,
217 tunables->io_is_busy);
218 pcpu->cputime_speedadj = 0;
219 pcpu->cputime_speedadj_timestamp =
220 pcpu->time_in_idle_timestamp;
221 }
Junjie Wu1d868952015-03-27 11:44:21 -0700222 }
Todd Poynor4e25bf92013-04-05 13:25:21 -0700223
Viresh Kumar17d15c42013-05-16 14:58:54 +0530224 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700225 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530226 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700227 del_timer(&ppol->policy_slack_timer);
228 ppol->policy_slack_timer.expires = expires;
229 add_timer(&ppol->policy_slack_timer);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700230 }
231
Junjie Wucf531ef2015-04-17 12:48:36 -0700232 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700233}
234
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700235static void update_util_handler(struct update_util_data *data, u64 time,
236 unsigned int sched_flags)
237{
238 struct cpufreq_interactive_policyinfo *ppol;
239 unsigned long flags;
240
241 ppol = *this_cpu_ptr(&polinfo);
242 spin_lock_irqsave(&ppol->irq_work_lock, flags);
243 /*
244 * The irq-work may not be allowed to be queued up right now
245 * because work has already been queued up or is in progress.
246 */
247 if (ppol->work_in_progress ||
248 sched_flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)
249 goto out;
250
251 ppol->work_in_progress = true;
252 irq_work_queue(&ppol->irq_work);
253out:
254 spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
255}
256
257static inline void gov_clear_update_util(struct cpufreq_policy *policy)
258{
259 int i;
260
261 for_each_cpu(i, policy->cpus)
262 cpufreq_remove_update_util_hook(i);
263
264 synchronize_sched();
265}
266
267static void gov_set_update_util(struct cpufreq_policy *policy)
268{
269 struct update_util_data *util;
270 int cpu;
271
272 for_each_cpu(cpu, policy->cpus) {
273 util = &per_cpu(update_util, cpu);
274 cpufreq_add_update_util_hook(cpu, util, update_util_handler);
275 }
276}
277
Lianwei Wang90c6c152013-04-26 13:30:51 +0800278/* The caller shall take enable_sem write semaphore to avoid any timer race.
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700279 * The policy_slack_timer must be deactivated when calling this function.
Lianwei Wang90c6c152013-04-26 13:30:51 +0800280 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530281static void cpufreq_interactive_timer_start(
282 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800283{
Junjie Wucf531ef2015-04-17 12:48:36 -0700284 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
285 struct cpufreq_interactive_cpuinfo *pcpu;
286 u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800287 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700288 int i;
Lianwei Wang90c6c152013-04-26 13:30:51 +0800289
Junjie Wucf531ef2015-04-17 12:48:36 -0700290 spin_lock_irqsave(&ppol->load_lock, flags);
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700291 gov_set_update_util(ppol->policy);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530292 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700293 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530294 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700295 ppol->policy_slack_timer.expires = expires;
296 add_timer(&ppol->policy_slack_timer);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800297 }
298
Junjie Wucf531ef2015-04-17 12:48:36 -0700299 for_each_cpu(i, ppol->policy->cpus) {
300 pcpu = &per_cpu(cpuinfo, i);
301 pcpu->time_in_idle =
302 get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
303 tunables->io_is_busy);
304 pcpu->cputime_speedadj = 0;
305 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
306 }
307 spin_unlock_irqrestore(&ppol->load_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800308}
309
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700310
Viresh Kumar17d15c42013-05-16 14:58:54 +0530311static unsigned int freq_to_above_hispeed_delay(
312 struct cpufreq_interactive_tunables *tunables,
313 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900314{
315 int i;
316 unsigned int ret;
317 unsigned long flags;
318
Viresh Kumar17d15c42013-05-16 14:58:54 +0530319 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900320
Viresh Kumar17d15c42013-05-16 14:58:54 +0530321 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
322 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900323 ;
324
Viresh Kumar17d15c42013-05-16 14:58:54 +0530325 ret = tunables->above_hispeed_delay[i];
326 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900327 return ret;
328}
329
Viresh Kumar17d15c42013-05-16 14:58:54 +0530330static unsigned int freq_to_targetload(
331 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800332{
333 int i;
334 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800335 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800336
Viresh Kumar17d15c42013-05-16 14:58:54 +0530337 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800338
Viresh Kumar17d15c42013-05-16 14:58:54 +0530339 for (i = 0; i < tunables->ntarget_loads - 1 &&
340 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800341 ;
342
Viresh Kumar17d15c42013-05-16 14:58:54 +0530343 ret = tunables->target_loads[i];
344 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800345 return ret;
346}
347
Junjie Wud6f5e522015-07-29 18:22:21 -0700348#define DEFAULT_MAX_LOAD 100
349u32 get_freq_max_load(int cpu, unsigned int freq)
350{
351 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
352
353 if (!cpumask_test_cpu(cpu, &controlled_cpus))
354 return DEFAULT_MAX_LOAD;
355
356 if (have_governor_per_policy()) {
357 if (!ppol || !ppol->cached_tunables)
358 return DEFAULT_MAX_LOAD;
359 return freq_to_targetload(ppol->cached_tunables, freq);
360 }
361
362 if (!cached_common_tunables)
363 return DEFAULT_MAX_LOAD;
364 return freq_to_targetload(cached_common_tunables, freq);
365}
366
Todd Poynore9c60742012-11-14 11:41:21 -0800367/*
368 * If increasing frequencies never map to a lower target load then
369 * choose_freq() will find the minimum frequency that does not exceed its
370 * target load given the current load.
371 */
Junjie Wucf531ef2015-04-17 12:48:36 -0700372static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
Viresh Kumar17d15c42013-05-16 14:58:54 +0530373 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800374{
375 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800376 unsigned int prevfreq, freqmin, freqmax;
377 unsigned int tl;
378 int index;
379
380 freqmin = 0;
381 freqmax = UINT_MAX;
382
383 do {
384 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530385 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800386
387 /*
388 * Find the lowest frequency where the computed load is less
389 * than or equal to the target load.
390 */
391
Stephen Boyd9a864832017-03-13 16:49:15 -0700392 index = cpufreq_frequency_table_target(&pcpu->p_nolim,
393 loadadjfreq / tl,
394 CPUFREQ_RELATION_L);
Todd Poynore9c60742012-11-14 11:41:21 -0800395 freq = pcpu->freq_table[index].frequency;
396
397 if (freq > prevfreq) {
398 /* The previous frequency is too low. */
399 freqmin = prevfreq;
400
401 if (freq >= freqmax) {
402 /*
403 * Find the highest frequency that is less
404 * than freqmax.
405 */
Stephen Boyd9a864832017-03-13 16:49:15 -0700406 index = cpufreq_frequency_table_target(
407 &pcpu->p_nolim,
408 freqmax - 1, CPUFREQ_RELATION_H);
Todd Poynore9c60742012-11-14 11:41:21 -0800409 freq = pcpu->freq_table[index].frequency;
410
411 if (freq == freqmin) {
412 /*
413 * The first frequency below freqmax
414 * has already been found to be too
415 * low. freqmax is the lowest speed
416 * we found that is fast enough.
417 */
418 freq = freqmax;
419 break;
420 }
421 }
422 } else if (freq < prevfreq) {
423 /* The previous frequency is high enough. */
424 freqmax = prevfreq;
425
426 if (freq <= freqmin) {
427 /*
428 * Find the lowest frequency that is higher
429 * than freqmin.
430 */
Stephen Boyd9a864832017-03-13 16:49:15 -0700431 index = cpufreq_frequency_table_target(
432 &pcpu->p_nolim,
433 freqmin + 1, CPUFREQ_RELATION_L);
Todd Poynore9c60742012-11-14 11:41:21 -0800434 freq = pcpu->freq_table[index].frequency;
435
436 /*
437 * If freqmax is the first frequency above
438 * freqmin then we have already found that
439 * this speed is fast enough.
440 */
441 if (freq == freqmax)
442 break;
443 }
444 }
445
446 /* If same frequency chosen as previous then done. */
447 } while (freq != prevfreq);
448
449 return freq;
450}
451
Todd Poynor0e58da22012-12-11 16:05:03 -0800452static u64 update_load(int cpu)
453{
Junjie Wucf531ef2015-04-17 12:48:36 -0700454 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Todd Poynor0e58da22012-12-11 16:05:03 -0800455 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530456 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700457 ppol->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800458 u64 now;
459 u64 now_idle;
460 unsigned int delta_idle;
461 unsigned int delta_time;
462 u64 active_time;
463
Viresh Kumar17d15c42013-05-16 14:58:54 +0530464 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800465 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
466 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900467
468 if (delta_time <= delta_idle)
469 active_time = 0;
470 else
471 active_time = delta_time - delta_idle;
472
Junjie Wucf531ef2015-04-17 12:48:36 -0700473 pcpu->cputime_speedadj += active_time * ppol->policy->cur;
Todd Poynor0e58da22012-12-11 16:05:03 -0800474
475 pcpu->time_in_idle = now_idle;
476 pcpu->time_in_idle_timestamp = now;
477 return now;
478}
479
Junjie Wu7c128602015-06-09 17:36:11 -0700480static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
481 unsigned long busy)
482{
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530483 int prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700484 struct cpufreq_interactive_tunables *tunables =
485 ppol->policy->governor_data;
486
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530487 prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
488 busy, tunables->timer_rate);
489 return prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700490}
491
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700492#define NEW_TASK_RATIO 75
Junjie Wu7c128602015-06-09 17:36:11 -0700493#define PRED_TOLERANCE_PCT 10
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700494static void cpufreq_interactive_timer(int data)
Mike Chanef969692010-06-22 11:26:45 -0700495{
Junjie Wu7c128602015-06-09 17:36:11 -0700496 s64 now;
Mike Chanef969692010-06-22 11:26:45 -0700497 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800498 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700499 int cpu_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700500 int pol_load = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700501 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530502 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700503 ppol->policy->governor_data;
Junjie Wu7c128602015-06-09 17:36:11 -0700504 struct sched_load *sl = ppol->sl;
Junjie Wucf531ef2015-04-17 12:48:36 -0700505 struct cpufreq_interactive_cpuinfo *pcpu;
Mike Chanef969692010-06-22 11:26:45 -0700506 unsigned int new_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700507 unsigned int prev_laf = 0, t_prevlaf;
508 unsigned int pred_laf = 0, t_predlaf = 0;
509 unsigned int prev_chfreq, pred_chfreq, chosen_freq;
Mike Chanef969692010-06-22 11:26:45 -0700510 unsigned int index;
511 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700512 unsigned long max_cpu;
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700513 int i, cpu;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700514 int new_load_pct = 0;
Junjie Wu7c128602015-06-09 17:36:11 -0700515 int prev_l, pred_l = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700516 struct cpufreq_govinfo govinfo;
Junjie Wud8a5e842015-08-26 17:47:21 -0700517 bool skip_hispeed_logic, skip_min_sample_time;
Junjie Wu7c128602015-06-09 17:36:11 -0700518 bool jump_to_max_no_ts = false;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700519 bool jump_to_max = false;
Mike Chanef969692010-06-22 11:26:45 -0700520
Junjie Wucf531ef2015-04-17 12:48:36 -0700521 if (!down_read_trylock(&ppol->enable_sem))
Todd Poynor5cad6092012-12-18 17:50:44 -0800522 return;
Junjie Wucf531ef2015-04-17 12:48:36 -0700523 if (!ppol->governor_enabled)
Mike Chanef969692010-06-22 11:26:45 -0700524 goto exit;
525
Junjie Wucf531ef2015-04-17 12:48:36 -0700526 now = ktime_to_us(ktime_get());
Junjie Wu506bfb02015-09-23 12:00:33 -0700527
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700528 spin_lock_irqsave(&ppol->target_freq_lock, flags);
529 spin_lock(&ppol->load_lock);
Junjie Wu506bfb02015-09-23 12:00:33 -0700530
Rohit Guptab56dc4d2016-12-05 17:36:49 -0800531 skip_hispeed_logic =
Junjie Wu7c128602015-06-09 17:36:11 -0700532 tunables->ignore_hispeed_on_notif && ppol->notif_pending;
Junjie Wu506bfb02015-09-23 12:00:33 -0700533 skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
534 ppol->notif_pending = false;
Junjie Wu7c128602015-06-09 17:36:11 -0700535 now = ktime_to_us(ktime_get());
Junjie Wucf531ef2015-04-17 12:48:36 -0700536 ppol->last_evaluated_jiffy = get_jiffies_64();
537
Junjie Wufef75c02015-05-26 17:54:38 -0700538 if (tunables->use_sched_load)
Junjie Wu7c128602015-06-09 17:36:11 -0700539 sched_get_cpus_busy(sl, ppol->policy->cpus);
Junjie Wucf531ef2015-04-17 12:48:36 -0700540 max_cpu = cpumask_first(ppol->policy->cpus);
Junjie Wu7c128602015-06-09 17:36:11 -0700541 i = 0;
542 for_each_cpu(cpu, ppol->policy->cpus) {
543 pcpu = &per_cpu(cpuinfo, cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700544 if (tunables->use_sched_load) {
Junjie Wu7c128602015-06-09 17:36:11 -0700545 t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
546 prev_l = t_prevlaf / ppol->target_freq;
547 if (tunables->enable_prediction) {
548 t_predlaf = sl_busy_to_laf(ppol,
549 sl[i].predicted_load);
550 pred_l = t_predlaf / ppol->target_freq;
551 }
552 if (sl[i].prev_load)
553 new_load_pct = sl[i].new_task_load * 100 /
554 sl[i].prev_load;
555 else
556 new_load_pct = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700557 } else {
Junjie Wu7c128602015-06-09 17:36:11 -0700558 now = update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700559 delta_time = (unsigned int)
Junjie Wu4344ea32014-04-28 16:22:24 -0700560 (now - pcpu->cputime_speedadj_timestamp);
Junjie Wucf531ef2015-04-17 12:48:36 -0700561 if (WARN_ON_ONCE(!delta_time))
562 continue;
563 cputime_speedadj = pcpu->cputime_speedadj;
564 do_div(cputime_speedadj, delta_time);
Junjie Wu7c128602015-06-09 17:36:11 -0700565 t_prevlaf = (unsigned int)cputime_speedadj * 100;
566 prev_l = t_prevlaf / ppol->target_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -0700567 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700568
Junjie Wu7c128602015-06-09 17:36:11 -0700569 /* find max of loadadjfreq inside policy */
570 if (t_prevlaf > prev_laf) {
571 prev_laf = t_prevlaf;
572 max_cpu = cpu;
Junjie Wucf531ef2015-04-17 12:48:36 -0700573 }
Junjie Wu7c128602015-06-09 17:36:11 -0700574 pred_laf = max(t_predlaf, pred_laf);
Junjie Wucf531ef2015-04-17 12:48:36 -0700575
Junjie Wu7c128602015-06-09 17:36:11 -0700576 cpu_load = max(prev_l, pred_l);
577 pol_load = max(pol_load, cpu_load);
578 trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
579 prev_l, pred_l);
580
581 /* save loadadjfreq for notification */
582 pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
583
584 /* detect heavy new task and jump to policy->max */
585 if (prev_l >= tunables->go_hispeed_load &&
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700586 new_load_pct >= NEW_TASK_RATIO) {
587 skip_hispeed_logic = true;
588 jump_to_max = true;
589 }
Junjie Wu7c128602015-06-09 17:36:11 -0700590 i++;
Junjie Wu4344ea32014-04-28 16:22:24 -0700591 }
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700592 spin_unlock(&ppol->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700593
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800594 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700595
Junjie Wu7c128602015-06-09 17:36:11 -0700596 prev_chfreq = choose_freq(ppol, prev_laf);
597 pred_chfreq = choose_freq(ppol, pred_laf);
598 chosen_freq = max(prev_chfreq, pred_chfreq);
599
600 if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
601 if (!jump_to_max)
602 jump_to_max_no_ts = true;
603
Junjie Wud8a5e842015-08-26 17:47:21 -0700604 if (now - ppol->max_freq_hyst_start_time <
605 tunables->max_freq_hysteresis &&
Junjie Wu7c128602015-06-09 17:36:11 -0700606 pol_load >= tunables->go_hispeed_load &&
Junjie Wud8a5e842015-08-26 17:47:21 -0700607 ppol->target_freq < ppol->policy->max) {
608 skip_hispeed_logic = true;
609 skip_min_sample_time = true;
Junjie Wu7c128602015-06-09 17:36:11 -0700610 if (!jump_to_max)
611 jump_to_max_no_ts = true;
Junjie Wud8a5e842015-08-26 17:47:21 -0700612 }
613
Junjie Wu7c128602015-06-09 17:36:11 -0700614 new_freq = chosen_freq;
615 if (jump_to_max_no_ts || jump_to_max) {
Saravana Kannan433ed992014-08-14 18:29:45 -0700616 new_freq = ppol->policy->cpuinfo.max_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700617 } else if (!skip_hispeed_logic) {
618 if (pol_load >= tunables->go_hispeed_load ||
619 tunables->boosted) {
620 if (ppol->target_freq < tunables->hispeed_freq)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530621 new_freq = tunables->hispeed_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700622 else
623 new_freq = max(new_freq,
624 tunables->hispeed_freq);
Todd Poynor2b660492012-12-19 16:06:48 -0800625 }
Todd Poynor2b660492012-12-19 16:06:48 -0800626 }
Todd Poynor131ff022012-11-08 15:06:55 -0800627
Junjie Wud8a5e842015-08-26 17:47:21 -0700628 if (now - ppol->max_freq_hyst_start_time <
629 tunables->max_freq_hysteresis)
630 new_freq = max(tunables->hispeed_freq, new_freq);
631
632 if (!skip_hispeed_logic &&
Junjie Wu51042d32015-08-17 16:02:55 -0700633 ppol->target_freq >= tunables->hispeed_freq &&
634 new_freq > ppol->target_freq &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700635 now - ppol->hispeed_validate_time <
Junjie Wu51042d32015-08-17 16:02:55 -0700636 freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800637 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700638 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700639 ppol->policy->cur, new_freq);
640 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800641 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700642 }
643
Junjie Wucf531ef2015-04-17 12:48:36 -0700644 ppol->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700645
Stephen Boyd9a864832017-03-13 16:49:15 -0700646 index = cpufreq_frequency_table_target(&ppol->p_nolim, new_freq,
647 CPUFREQ_RELATION_L);
Junjie Wucf531ef2015-04-17 12:48:36 -0700648 new_freq = ppol->freq_table[index].frequency;
Mike Chanef969692010-06-22 11:26:45 -0700649
Mike Chanef969692010-06-22 11:26:45 -0700650 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700651 * Do not scale below floor_freq unless we have been at or above the
652 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700653 */
Junjie Wud8a5e842015-08-26 17:47:21 -0700654 if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700655 if (now - ppol->floor_validate_time <
656 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800657 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700658 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700659 ppol->policy->cur, new_freq);
660 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700661 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800662 }
Mike Chanef969692010-06-22 11:26:45 -0700663 }
664
Todd Poynore16d5922012-12-14 17:31:19 -0800665 /*
666 * Update the timestamp for checking whether speed has been held at
667 * or above the selected frequency for a minimum of min_sample_time,
668 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
669 * allow the speed to drop as soon as the boostpulse duration expires
Junjie Wud8a5e842015-08-26 17:47:21 -0700670 * (or the indefinite boost is turned off). If policy->max is restored
671 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
672 * could incorrectly extended the duration of max_freq_hysteresis by
673 * min_sample_time.
Todd Poynore16d5922012-12-14 17:31:19 -0800674 */
675
Junjie Wud8a5e842015-08-26 17:47:21 -0700676 if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
Junjie Wu7c128602015-06-09 17:36:11 -0700677 && !jump_to_max_no_ts) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700678 ppol->floor_freq = new_freq;
679 ppol->floor_validate_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800680 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700681
Junjie Wu7c128602015-06-09 17:36:11 -0700682 if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
Junjie Wucf531ef2015-04-17 12:48:36 -0700683 ppol->max_freq_hyst_start_time = now;
684
685 if (ppol->target_freq == new_freq &&
686 ppol->target_freq <= ppol->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800687 trace_cpufreq_interactive_already(
Junjie Wu7c128602015-06-09 17:36:11 -0700688 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700689 ppol->policy->cur, new_freq);
690 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800691 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700692 }
693
Junjie Wu7c128602015-06-09 17:36:11 -0700694 trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700695 ppol->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800696
Junjie Wucf531ef2015-04-17 12:48:36 -0700697 ppol->target_freq = new_freq;
698 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700699 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Junjie Wucf531ef2015-04-17 12:48:36 -0700700 cpumask_set_cpu(max_cpu, &speedchange_cpumask);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700701 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Puja Gupta487dec62017-06-27 10:13:50 -0700702
703 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700704
Mike Chanef969692010-06-22 11:26:45 -0700705rearm:
Stephen Boyd1c2271f2017-03-20 18:57:28 -0700706 cpufreq_interactive_timer_resched(data, false);
Mike Chanef969692010-06-22 11:26:45 -0700707
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700708 /*
709 * Send govinfo notification.
710 * Govinfo notification could potentially wake up another thread
711 * managed by its clients. Thread wakeups might trigger a load
712 * change callback that executes this function again. Therefore
713 * no spinlock could be held when sending the notification.
714 */
715 for_each_cpu(i, ppol->policy->cpus) {
716 pcpu = &per_cpu(cpuinfo, i);
717 govinfo.cpu = i;
718 govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
719 govinfo.sampling_rate_us = tunables->timer_rate;
720 atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
721 CPUFREQ_LOAD_CHANGE, &govinfo);
722 }
723
Mike Chanef969692010-06-22 11:26:45 -0700724exit:
Junjie Wucf531ef2015-04-17 12:48:36 -0700725 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700726 return;
727}
728
Todd Poynor0f1920b2012-07-16 17:07:15 -0700729static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700730{
731 unsigned int cpu;
732 cpumask_t tmp_mask;
733 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700734 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -0700735
736 while (1) {
737 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700738 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700739
Todd Poynor0f1920b2012-07-16 17:07:15 -0700740 if (cpumask_empty(&speedchange_cpumask)) {
741 spin_unlock_irqrestore(&speedchange_cpumask_lock,
742 flags);
Mike Chanef969692010-06-22 11:26:45 -0700743 schedule();
744
745 if (kthread_should_stop())
746 break;
747
Todd Poynor0f1920b2012-07-16 17:07:15 -0700748 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700749 }
750
751 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700752 tmp_mask = speedchange_cpumask;
753 cpumask_clear(&speedchange_cpumask);
754 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700755
756 for_each_cpu(cpu, &tmp_mask) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700757 ppol = per_cpu(polinfo, cpu);
758 if (!down_read_trylock(&ppol->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700759 continue;
Junjie Wucf531ef2015-04-17 12:48:36 -0700760 if (!ppol->governor_enabled) {
761 up_read(&ppol->enable_sem);
Todd Poynor5cad6092012-12-18 17:50:44 -0800762 continue;
763 }
Mike Chanef969692010-06-22 11:26:45 -0700764
Junjie Wucf531ef2015-04-17 12:48:36 -0700765 if (ppol->target_freq != ppol->policy->cur)
766 __cpufreq_driver_target(ppol->policy,
767 ppol->target_freq,
Mike Chanef969692010-06-22 11:26:45 -0700768 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700769 trace_cpufreq_interactive_setspeed(cpu,
Junjie Wucf531ef2015-04-17 12:48:36 -0700770 ppol->target_freq,
771 ppol->policy->cur);
772 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700773 }
774 }
775
776 return 0;
777}
778
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800779static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700780{
781 int i;
782 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700783 unsigned long flags[2];
Junjie Wucf531ef2015-04-17 12:48:36 -0700784 struct cpufreq_interactive_policyinfo *ppol;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800785
786 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700787
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700788 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700789
790 for_each_online_cpu(i) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700791 ppol = per_cpu(polinfo, i);
792 if (!ppol || tunables != ppol->policy->governor_data)
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800793 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700794
Junjie Wucf531ef2015-04-17 12:48:36 -0700795 spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
796 if (ppol->target_freq < tunables->hispeed_freq) {
797 ppol->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700798 cpumask_set_cpu(i, &speedchange_cpumask);
Junjie Wucf531ef2015-04-17 12:48:36 -0700799 ppol->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800800 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700801 anyboost = 1;
802 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700803
804 /*
805 * Set floor freq and (re)start timer for when last
806 * validated.
807 */
808
809 ppol->floor_freq = tunables->hispeed_freq;
810 ppol->floor_validate_time = ktime_to_us(ktime_get());
811 spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
812 break;
Todd Poynorab8dc402012-04-02 17:17:14 -0700813 }
814
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700815 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700816
817 if (anyboost)
Puja Gupta487dec62017-06-27 10:13:50 -0700818 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700819}
820
Junjie Wu4344ea32014-04-28 16:22:24 -0700821static int load_change_callback(struct notifier_block *nb, unsigned long val,
822 void *data)
823{
824 unsigned long cpu = (unsigned long) data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700825 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Junjie Wu4344ea32014-04-28 16:22:24 -0700826 struct cpufreq_interactive_tunables *tunables;
Junjie Wu506bfb02015-09-23 12:00:33 -0700827 unsigned long flags;
Junjie Wu4344ea32014-04-28 16:22:24 -0700828
Junjie Wucf531ef2015-04-17 12:48:36 -0700829 if (!ppol || ppol->reject_notification)
Junjie Wu82f08032014-12-09 13:20:26 -0800830 return 0;
831
Junjie Wucf531ef2015-04-17 12:48:36 -0700832 if (!down_read_trylock(&ppol->enable_sem))
Junjie Wu18e7fd22014-09-17 18:51:41 -0700833 return 0;
Junjie Wuaceecc062015-09-18 18:13:01 -0700834 if (!ppol->governor_enabled)
835 goto exit;
836
837 tunables = ppol->policy->governor_data;
838 if (!tunables->use_sched_load || !tunables->use_migration_notif)
839 goto exit;
840
841 spin_lock_irqsave(&ppol->target_freq_lock, flags);
842 ppol->notif_pending = true;
843 ppol->notif_cpu = cpu;
844 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
845
846 if (!hrtimer_is_queued(&ppol->notif_timer))
847 hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
848 HRTIMER_MODE_REL);
849exit:
850 up_read(&ppol->enable_sem);
851 return 0;
852}
853
854static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
855{
856 struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
857 struct cpufreq_interactive_policyinfo, notif_timer);
858 int cpu;
859
860 if (!down_read_trylock(&ppol->enable_sem))
861 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700862 if (!ppol->governor_enabled) {
863 up_read(&ppol->enable_sem);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700864 return 0;
865 }
Junjie Wuaceecc062015-09-18 18:13:01 -0700866 cpu = ppol->notif_cpu;
Junjie Wu18e7fd22014-09-17 18:51:41 -0700867 trace_cpufreq_interactive_load_change(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700868 del_timer(&ppol->policy_slack_timer);
Junjie Wu506bfb02015-09-23 12:00:33 -0700869 cpufreq_interactive_timer(cpu);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700870
Junjie Wucf531ef2015-04-17 12:48:36 -0700871 up_read(&ppol->enable_sem);
Junjie Wuaceecc062015-09-18 18:13:01 -0700872 return HRTIMER_NORESTART;
Junjie Wu4344ea32014-04-28 16:22:24 -0700873}
874
875static struct notifier_block load_notifier_block = {
876 .notifier_call = load_change_callback,
877};
878
Todd Poynor0e58da22012-12-11 16:05:03 -0800879static int cpufreq_interactive_notifier(
880 struct notifier_block *nb, unsigned long val, void *data)
881{
882 struct cpufreq_freqs *freq = data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700883 struct cpufreq_interactive_policyinfo *ppol;
Todd Poynor0e58da22012-12-11 16:05:03 -0800884 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800885 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800886
887 if (val == CPUFREQ_POSTCHANGE) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700888 ppol = per_cpu(polinfo, freq->cpu);
889 if (!ppol)
Todd Poynor34974c32012-12-23 12:28:49 -0800890 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700891 if (!down_read_trylock(&ppol->enable_sem))
892 return 0;
893 if (!ppol->governor_enabled) {
894 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800895 return 0;
896 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800897
Junjie Wucf531ef2015-04-17 12:48:36 -0700898 if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
899 up_read(&ppol->enable_sem);
900 return 0;
901 }
902 spin_lock_irqsave(&ppol->load_lock, flags);
903 for_each_cpu(cpu, ppol->policy->cpus)
Todd Poynor0e58da22012-12-11 16:05:03 -0800904 update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700905 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800906
Junjie Wucf531ef2015-04-17 12:48:36 -0700907 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800908 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800909 return 0;
910}
911
912static struct notifier_block cpufreq_notifier_block = {
913 .notifier_call = cpufreq_interactive_notifier,
914};
915
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900916static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
917{
918 const char *cp;
919 int i;
920 int ntokens = 1;
921 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700922 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900923
924 cp = buf;
925 while ((cp = strpbrk(cp + 1, " :")))
926 ntokens++;
927
Todd Poynor233dfa02013-03-20 15:40:46 -0700928 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900929 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900930
931 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
932 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700933 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900934 goto err;
935 }
936
937 cp = buf;
938 i = 0;
939 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700940 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900941 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900942
943 cp = strpbrk(cp, " :");
944 if (!cp)
945 break;
946 cp++;
947 }
948
Todd Poynor233dfa02013-03-20 15:40:46 -0700949 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900950 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900951
952 *num_tokens = ntokens;
953 return tokenized_data;
954
955err_kfree:
956 kfree(tokenized_data);
957err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700958 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900959}
960
Todd Poynore9c60742012-11-14 11:41:21 -0800961static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530962 struct cpufreq_interactive_tunables *tunables,
963 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800964{
Todd Poynore9c60742012-11-14 11:41:21 -0800965 int i;
966 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800967 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800968
Viresh Kumar17d15c42013-05-16 14:58:54 +0530969 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800970
Viresh Kumar17d15c42013-05-16 14:58:54 +0530971 for (i = 0; i < tunables->ntarget_loads; i++)
972 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800973 i & 0x1 ? ":" : " ");
974
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800975 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530976 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800977 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800978}
979
Todd Poynore9c60742012-11-14 11:41:21 -0800980static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530981 struct cpufreq_interactive_tunables *tunables,
982 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800983{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900984 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800985 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800986 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800987
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900988 new_target_loads = get_tokenized_data(buf, &ntokens);
989 if (IS_ERR(new_target_loads))
990 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800991
Viresh Kumar17d15c42013-05-16 14:58:54 +0530992 spin_lock_irqsave(&tunables->target_loads_lock, flags);
993 if (tunables->target_loads != default_target_loads)
994 kfree(tunables->target_loads);
995 tunables->target_loads = new_target_loads;
996 tunables->ntarget_loads = ntokens;
997 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Junjie Wud6f5e522015-07-29 18:22:21 -0700998
999 sched_update_freq_max_load(&controlled_cpus);
1000
Todd Poynor8d2d93f2012-11-28 17:58:17 -08001001 return count;
1002}
1003
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001004static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301005 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001006{
1007 int i;
1008 ssize_t ret = 0;
1009 unsigned long flags;
1010
Viresh Kumar17d15c42013-05-16 14:58:54 +05301011 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001012
Viresh Kumar17d15c42013-05-16 14:58:54 +05301013 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
1014 ret += sprintf(buf + ret, "%u%s",
1015 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001016 i & 0x1 ? ":" : " ");
1017
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +08001018 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +05301019 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001020 return ret;
1021}
1022
1023static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301024 struct cpufreq_interactive_tunables *tunables,
1025 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001026{
1027 int ntokens;
1028 unsigned int *new_above_hispeed_delay = NULL;
1029 unsigned long flags;
1030
1031 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
1032 if (IS_ERR(new_above_hispeed_delay))
1033 return PTR_RET(new_above_hispeed_delay);
1034
Viresh Kumar17d15c42013-05-16 14:58:54 +05301035 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
1036 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
1037 kfree(tunables->above_hispeed_delay);
1038 tunables->above_hispeed_delay = new_above_hispeed_delay;
1039 tunables->nabove_hispeed_delay = ntokens;
1040 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001041 return count;
1042
1043}
1044
Viresh Kumar17d15c42013-05-16 14:58:54 +05301045static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1046 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001047{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301048 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -07001049}
1050
Viresh Kumar17d15c42013-05-16 14:58:54 +05301051static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1052 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001053{
1054 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -07001055 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -07001056
Amit Pundircf076402015-11-03 20:53:29 +05301057 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001058 if (ret < 0)
1059 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301060 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -07001061 return count;
1062}
1063
Junjie Wue05d74e2014-08-29 14:12:52 -07001064#define show_store_one(file_name) \
1065static ssize_t show_##file_name( \
1066 struct cpufreq_interactive_tunables *tunables, char *buf) \
1067{ \
1068 return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
1069} \
1070static ssize_t store_##file_name( \
1071 struct cpufreq_interactive_tunables *tunables, \
1072 const char *buf, size_t count) \
1073{ \
1074 int ret; \
1075 unsigned long int val; \
1076 \
1077 ret = kstrtoul(buf, 0, &val); \
1078 if (ret < 0) \
1079 return ret; \
1080 tunables->file_name = val; \
1081 return count; \
1082}
1083show_store_one(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001084show_store_one(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001085show_store_one(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001086show_store_one(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001087show_store_one(enable_prediction);
Junjie Wue05d74e2014-08-29 14:12:52 -07001088
Viresh Kumar17d15c42013-05-16 14:58:54 +05301089static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
1090 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001091{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301092 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -07001093}
1094
Viresh Kumar17d15c42013-05-16 14:58:54 +05301095static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
1096 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001097{
1098 int ret;
1099 unsigned long val;
1100
Amit Pundircf076402015-11-03 20:53:29 +05301101 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001102 if (ret < 0)
1103 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301104 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -07001105 return count;
1106}
1107
Viresh Kumar17d15c42013-05-16 14:58:54 +05301108static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
1109 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001110{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301111 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -07001112}
1113
Viresh Kumar17d15c42013-05-16 14:58:54 +05301114static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
1115 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001116{
1117 int ret;
1118 unsigned long val;
1119
Amit Pundircf076402015-11-03 20:53:29 +05301120 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001121 if (ret < 0)
1122 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301123 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -07001124 return count;
1125}
1126
Viresh Kumar17d15c42013-05-16 14:58:54 +05301127static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
1128 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001129{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301130 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -07001131}
1132
Viresh Kumar17d15c42013-05-16 14:58:54 +05301133static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
1134 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001135{
1136 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001137 unsigned long val, val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001138 struct cpufreq_interactive_tunables *t;
1139 int cpu;
Mike Chanef969692010-06-22 11:26:45 -07001140
Amit Pundircf076402015-11-03 20:53:29 +05301141 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001142 if (ret < 0)
1143 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001144
1145 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
1146 if (val != val_round)
1147 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
1148 val_round);
Junjie Wu847796e2014-08-15 16:34:37 -07001149 tunables->timer_rate = val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001150
1151 if (!tunables->use_sched_load)
1152 return count;
1153
1154 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001155 if (!per_cpu(polinfo, cpu))
1156 continue;
1157 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001158 if (t && t->use_sched_load)
1159 t->timer_rate = val_round;
1160 }
1161 set_window_helper(tunables);
1162
Mike Chanef969692010-06-22 11:26:45 -07001163 return count;
1164}
1165
Viresh Kumar17d15c42013-05-16 14:58:54 +05301166static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
1167 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -08001168{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301169 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -08001170}
1171
Viresh Kumar17d15c42013-05-16 14:58:54 +05301172static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
1173 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -08001174{
1175 int ret;
1176 unsigned long val;
1177
1178 ret = kstrtol(buf, 10, &val);
1179 if (ret < 0)
1180 return ret;
1181
Viresh Kumar17d15c42013-05-16 14:58:54 +05301182 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -08001183 return count;
1184}
1185
Viresh Kumar17d15c42013-05-16 14:58:54 +05301186static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001187 char *buf)
1188{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301189 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -07001190}
1191
Viresh Kumar17d15c42013-05-16 14:58:54 +05301192static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001193 const char *buf, size_t count)
1194{
1195 int ret;
1196 unsigned long val;
1197
1198 ret = kstrtoul(buf, 0, &val);
1199 if (ret < 0)
1200 return ret;
1201
Viresh Kumar17d15c42013-05-16 14:58:54 +05301202 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -07001203
Viresh Kumar17d15c42013-05-16 14:58:54 +05301204 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -07001205 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001206 if (!tunables->boosted)
1207 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001208 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -07001209 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -07001210 trace_cpufreq_interactive_unboost("off");
1211 }
Todd Poynor15a9ea02012-04-23 20:42:41 -07001212
1213 return count;
1214}
1215
Viresh Kumar17d15c42013-05-16 14:58:54 +05301216static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -07001217 const char *buf, size_t count)
1218{
1219 int ret;
1220 unsigned long val;
1221
1222 ret = kstrtoul(buf, 0, &val);
1223 if (ret < 0)
1224 return ret;
1225
Viresh Kumar17d15c42013-05-16 14:58:54 +05301226 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
1227 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -07001228 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001229 if (!tunables->boosted)
1230 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001231 return count;
1232}
1233
Viresh Kumar17d15c42013-05-16 14:58:54 +05301234static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
1235 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -08001236{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301237 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -08001238}
1239
Viresh Kumar17d15c42013-05-16 14:58:54 +05301240static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
1241 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -08001242{
1243 int ret;
1244 unsigned long val;
1245
1246 ret = kstrtoul(buf, 0, &val);
1247 if (ret < 0)
1248 return ret;
1249
Viresh Kumar17d15c42013-05-16 14:58:54 +05301250 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -08001251 return count;
1252}
1253
Viresh Kumar17d15c42013-05-16 14:58:54 +05301254static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1255 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +08001256{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301257 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +08001258}
1259
Viresh Kumar17d15c42013-05-16 14:58:54 +05301260static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1261 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +08001262{
1263 int ret;
1264 unsigned long val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001265 struct cpufreq_interactive_tunables *t;
1266 int cpu;
Lianwei Wang72e40572013-02-22 11:39:18 +08001267
1268 ret = kstrtoul(buf, 0, &val);
1269 if (ret < 0)
1270 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301271 tunables->io_is_busy = val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001272
1273 if (!tunables->use_sched_load)
1274 return count;
1275
1276 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001277 if (!per_cpu(polinfo, cpu))
1278 continue;
1279 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001280 if (t && t->use_sched_load)
1281 t->io_is_busy = val;
1282 }
1283 sched_set_io_is_busy(val);
1284
1285 return count;
1286}
1287
1288static int cpufreq_interactive_enable_sched_input(
1289 struct cpufreq_interactive_tunables *tunables)
1290{
1291 int rc = 0, j;
1292 struct cpufreq_interactive_tunables *t;
1293
1294 mutex_lock(&sched_lock);
1295
1296 set_window_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001297 if (set_window_count > 1) {
Junjie Wu4344ea32014-04-28 16:22:24 -07001298 for_each_possible_cpu(j) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001299 if (!per_cpu(polinfo, j))
1300 continue;
1301 t = per_cpu(polinfo, j)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001302 if (t && t->use_sched_load) {
1303 tunables->timer_rate = t->timer_rate;
1304 tunables->io_is_busy = t->io_is_busy;
1305 break;
1306 }
1307 }
Junjie Wue627d702014-12-15 16:51:08 -08001308 } else {
1309 rc = set_window_helper(tunables);
1310 if (rc) {
1311 pr_err("%s: Failed to set sched window\n", __func__);
1312 set_window_count--;
1313 goto out;
1314 }
1315 sched_set_io_is_busy(tunables->io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001316 }
1317
Junjie Wu4344ea32014-04-28 16:22:24 -07001318 if (!tunables->use_migration_notif)
1319 goto out;
1320
1321 migration_register_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001322 if (migration_register_count > 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001323 goto out;
1324 else
1325 atomic_notifier_chain_register(&load_alert_notifier_head,
1326 &load_notifier_block);
1327out:
1328 mutex_unlock(&sched_lock);
1329 return rc;
1330}
1331
1332static int cpufreq_interactive_disable_sched_input(
1333 struct cpufreq_interactive_tunables *tunables)
1334{
1335 mutex_lock(&sched_lock);
1336
1337 if (tunables->use_migration_notif) {
1338 migration_register_count--;
Junjie Wue627d702014-12-15 16:51:08 -08001339 if (migration_register_count < 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001340 atomic_notifier_chain_unregister(
1341 &load_alert_notifier_head,
1342 &load_notifier_block);
1343 }
1344 set_window_count--;
1345
1346 mutex_unlock(&sched_lock);
1347 return 0;
1348}
1349
1350static ssize_t show_use_sched_load(
1351 struct cpufreq_interactive_tunables *tunables, char *buf)
1352{
1353 return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
1354}
1355
1356static ssize_t store_use_sched_load(
1357 struct cpufreq_interactive_tunables *tunables,
1358 const char *buf, size_t count)
1359{
1360 int ret;
1361 unsigned long val;
1362
1363 ret = kstrtoul(buf, 0, &val);
1364 if (ret < 0)
1365 return ret;
1366
1367 if (tunables->use_sched_load == (bool) val)
1368 return count;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301369
1370 tunables->use_sched_load = val;
1371
Junjie Wu4344ea32014-04-28 16:22:24 -07001372 if (val)
1373 ret = cpufreq_interactive_enable_sched_input(tunables);
1374 else
1375 ret = cpufreq_interactive_disable_sched_input(tunables);
1376
Hanumath Prasada9c07002015-06-30 15:19:39 +05301377 if (ret) {
1378 tunables->use_sched_load = !val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001379 return ret;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301380 }
Junjie Wu4344ea32014-04-28 16:22:24 -07001381
Junjie Wu4344ea32014-04-28 16:22:24 -07001382 return count;
1383}
1384
1385static ssize_t show_use_migration_notif(
1386 struct cpufreq_interactive_tunables *tunables, char *buf)
1387{
1388 return snprintf(buf, PAGE_SIZE, "%d\n",
1389 tunables->use_migration_notif);
1390}
1391
1392static ssize_t store_use_migration_notif(
1393 struct cpufreq_interactive_tunables *tunables,
1394 const char *buf, size_t count)
1395{
1396 int ret;
1397 unsigned long val;
1398
1399 ret = kstrtoul(buf, 0, &val);
1400 if (ret < 0)
1401 return ret;
1402
1403 if (tunables->use_migration_notif == (bool) val)
1404 return count;
1405 tunables->use_migration_notif = val;
1406
1407 if (!tunables->use_sched_load)
1408 return count;
1409
1410 mutex_lock(&sched_lock);
1411 if (val) {
1412 migration_register_count++;
1413 if (migration_register_count == 1)
1414 atomic_notifier_chain_register(
1415 &load_alert_notifier_head,
1416 &load_notifier_block);
1417 } else {
1418 migration_register_count--;
1419 if (!migration_register_count)
1420 atomic_notifier_chain_unregister(
1421 &load_alert_notifier_head,
1422 &load_notifier_block);
1423 }
1424 mutex_unlock(&sched_lock);
1425
Lianwei Wang72e40572013-02-22 11:39:18 +08001426 return count;
1427}
1428
Viresh Kumar17d15c42013-05-16 14:58:54 +05301429/*
1430 * Create show/store routines
1431 * - sys: One governor instance for complete SYSTEM
1432 * - pol: One governor instance per struct cpufreq_policy
1433 */
1434#define show_gov_pol_sys(file_name) \
1435static ssize_t show_##file_name##_gov_sys \
1436(struct kobject *kobj, struct attribute *attr, char *buf) \
1437{ \
1438 return show_##file_name(common_tunables, buf); \
1439} \
1440 \
1441static ssize_t show_##file_name##_gov_pol \
1442(struct cpufreq_policy *policy, char *buf) \
1443{ \
1444 return show_##file_name(policy->governor_data, buf); \
1445}
Lianwei Wang72e40572013-02-22 11:39:18 +08001446
Viresh Kumar17d15c42013-05-16 14:58:54 +05301447#define store_gov_pol_sys(file_name) \
1448static ssize_t store_##file_name##_gov_sys \
1449(struct kobject *kobj, struct attribute *attr, const char *buf, \
1450 size_t count) \
1451{ \
1452 return store_##file_name(common_tunables, buf, count); \
1453} \
1454 \
1455static ssize_t store_##file_name##_gov_pol \
1456(struct cpufreq_policy *policy, const char *buf, size_t count) \
1457{ \
1458 return store_##file_name(policy->governor_data, buf, count); \
1459}
1460
1461#define show_store_gov_pol_sys(file_name) \
1462show_gov_pol_sys(file_name); \
1463store_gov_pol_sys(file_name)
1464
1465show_store_gov_pol_sys(target_loads);
1466show_store_gov_pol_sys(above_hispeed_delay);
1467show_store_gov_pol_sys(hispeed_freq);
1468show_store_gov_pol_sys(go_hispeed_load);
1469show_store_gov_pol_sys(min_sample_time);
1470show_store_gov_pol_sys(timer_rate);
1471show_store_gov_pol_sys(timer_slack);
1472show_store_gov_pol_sys(boost);
1473store_gov_pol_sys(boostpulse);
1474show_store_gov_pol_sys(boostpulse_duration);
1475show_store_gov_pol_sys(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001476show_store_gov_pol_sys(use_sched_load);
1477show_store_gov_pol_sys(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001478show_store_gov_pol_sys(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001479show_store_gov_pol_sys(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001480show_store_gov_pol_sys(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001481show_store_gov_pol_sys(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001482show_store_gov_pol_sys(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301483
1484#define gov_sys_attr_rw(_name) \
1485static struct global_attr _name##_gov_sys = \
1486__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1487
1488#define gov_pol_attr_rw(_name) \
1489static struct freq_attr _name##_gov_pol = \
1490__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1491
1492#define gov_sys_pol_attr_rw(_name) \
1493 gov_sys_attr_rw(_name); \
1494 gov_pol_attr_rw(_name)
1495
1496gov_sys_pol_attr_rw(target_loads);
1497gov_sys_pol_attr_rw(above_hispeed_delay);
1498gov_sys_pol_attr_rw(hispeed_freq);
1499gov_sys_pol_attr_rw(go_hispeed_load);
1500gov_sys_pol_attr_rw(min_sample_time);
1501gov_sys_pol_attr_rw(timer_rate);
1502gov_sys_pol_attr_rw(timer_slack);
1503gov_sys_pol_attr_rw(boost);
1504gov_sys_pol_attr_rw(boostpulse_duration);
1505gov_sys_pol_attr_rw(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001506gov_sys_pol_attr_rw(use_sched_load);
1507gov_sys_pol_attr_rw(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001508gov_sys_pol_attr_rw(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001509gov_sys_pol_attr_rw(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001510gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001511gov_sys_pol_attr_rw(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001512gov_sys_pol_attr_rw(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301513
1514static struct global_attr boostpulse_gov_sys =
1515 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1516
1517static struct freq_attr boostpulse_gov_pol =
1518 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1519
1520/* One Governor instance for entire system */
1521static struct attribute *interactive_attributes_gov_sys[] = {
1522 &target_loads_gov_sys.attr,
1523 &above_hispeed_delay_gov_sys.attr,
1524 &hispeed_freq_gov_sys.attr,
1525 &go_hispeed_load_gov_sys.attr,
1526 &min_sample_time_gov_sys.attr,
1527 &timer_rate_gov_sys.attr,
1528 &timer_slack_gov_sys.attr,
1529 &boost_gov_sys.attr,
1530 &boostpulse_gov_sys.attr,
1531 &boostpulse_duration_gov_sys.attr,
1532 &io_is_busy_gov_sys.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001533 &use_sched_load_gov_sys.attr,
1534 &use_migration_notif_gov_sys.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001535 &max_freq_hysteresis_gov_sys.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001536 &align_windows_gov_sys.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001537 &ignore_hispeed_on_notif_gov_sys.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001538 &fast_ramp_down_gov_sys.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001539 &enable_prediction_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001540 NULL,
1541};
1542
Viresh Kumar17d15c42013-05-16 14:58:54 +05301543static struct attribute_group interactive_attr_group_gov_sys = {
1544 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001545 .name = "interactive",
1546};
1547
Viresh Kumar17d15c42013-05-16 14:58:54 +05301548/* Per policy governor instance */
1549static struct attribute *interactive_attributes_gov_pol[] = {
1550 &target_loads_gov_pol.attr,
1551 &above_hispeed_delay_gov_pol.attr,
1552 &hispeed_freq_gov_pol.attr,
1553 &go_hispeed_load_gov_pol.attr,
1554 &min_sample_time_gov_pol.attr,
1555 &timer_rate_gov_pol.attr,
1556 &timer_slack_gov_pol.attr,
1557 &boost_gov_pol.attr,
1558 &boostpulse_gov_pol.attr,
1559 &boostpulse_duration_gov_pol.attr,
1560 &io_is_busy_gov_pol.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001561 &use_sched_load_gov_pol.attr,
1562 &use_migration_notif_gov_pol.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001563 &max_freq_hysteresis_gov_pol.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001564 &align_windows_gov_pol.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001565 &ignore_hispeed_on_notif_gov_pol.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001566 &fast_ramp_down_gov_pol.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001567 &enable_prediction_gov_pol.attr,
Viresh Kumar17d15c42013-05-16 14:58:54 +05301568 NULL,
1569};
1570
1571static struct attribute_group interactive_attr_group_gov_pol = {
1572 .attrs = interactive_attributes_gov_pol,
1573 .name = "interactive",
1574};
1575
1576static struct attribute_group *get_sysfs_attr(void)
1577{
1578 if (have_governor_per_policy())
1579 return &interactive_attr_group_gov_pol;
1580 else
1581 return &interactive_attr_group_gov_sys;
1582}
1583
Junjie Wucf531ef2015-04-17 12:48:36 -07001584static void cpufreq_interactive_nop_timer(unsigned long data)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001585{
Junjie Wu53f83f82014-08-18 16:35:09 -07001586}
1587
Junjie Wuc5a97d92014-05-23 12:22:59 -07001588static struct cpufreq_interactive_tunables *alloc_tunable(
1589 struct cpufreq_policy *policy)
1590{
1591 struct cpufreq_interactive_tunables *tunables;
1592
1593 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1594 if (!tunables)
1595 return ERR_PTR(-ENOMEM);
1596
1597 tunables->above_hispeed_delay = default_above_hispeed_delay;
1598 tunables->nabove_hispeed_delay =
1599 ARRAY_SIZE(default_above_hispeed_delay);
1600 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1601 tunables->target_loads = default_target_loads;
1602 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1603 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1604 tunables->timer_rate = DEFAULT_TIMER_RATE;
1605 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1606 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1607
1608 spin_lock_init(&tunables->target_loads_lock);
1609 spin_lock_init(&tunables->above_hispeed_delay_lock);
1610
1611 return tunables;
1612}
1613
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001614static void irq_work(struct irq_work *irq_work)
1615{
1616 struct cpufreq_interactive_policyinfo *ppol;
1617 unsigned long flags;
1618
1619 ppol = container_of(irq_work, struct cpufreq_interactive_policyinfo,
1620 irq_work);
1621
1622 cpufreq_interactive_timer(smp_processor_id());
1623 spin_lock_irqsave(&ppol->irq_work_lock, flags);
1624 ppol->work_in_progress = false;
1625 spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
1626}
1627
Junjie Wucf531ef2015-04-17 12:48:36 -07001628static struct cpufreq_interactive_policyinfo *get_policyinfo(
1629 struct cpufreq_policy *policy)
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001630{
Junjie Wucf531ef2015-04-17 12:48:36 -07001631 struct cpufreq_interactive_policyinfo *ppol =
1632 per_cpu(polinfo, policy->cpu);
1633 int i;
Joonwoo Park22d94972015-09-15 09:35:53 -07001634 struct sched_load *sl;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001635
Junjie Wucf531ef2015-04-17 12:48:36 -07001636 /* polinfo already allocated for policy, return */
1637 if (ppol)
1638 return ppol;
1639
1640 ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
1641 if (!ppol)
1642 return ERR_PTR(-ENOMEM);
1643
Joonwoo Park22d94972015-09-15 09:35:53 -07001644 sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
1645 GFP_KERNEL);
1646 if (!sl) {
Junjie Wufef75c02015-05-26 17:54:38 -07001647 kfree(ppol);
1648 return ERR_PTR(-ENOMEM);
1649 }
Joonwoo Park22d94972015-09-15 09:35:53 -07001650 ppol->sl = sl;
Junjie Wufef75c02015-05-26 17:54:38 -07001651
Junjie Wucf531ef2015-04-17 12:48:36 -07001652 init_timer(&ppol->policy_slack_timer);
1653 ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -07001654 hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1655 ppol->notif_timer.function = cpufreq_interactive_hrtimer;
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001656 init_irq_work(&ppol->irq_work, irq_work);
1657 spin_lock_init(&ppol->irq_work_lock);
Junjie Wucf531ef2015-04-17 12:48:36 -07001658 spin_lock_init(&ppol->load_lock);
1659 spin_lock_init(&ppol->target_freq_lock);
1660 init_rwsem(&ppol->enable_sem);
1661
1662 for_each_cpu(i, policy->related_cpus)
1663 per_cpu(polinfo, i) = ppol;
1664 return ppol;
1665}
1666
1667/* This function is not multithread-safe. */
1668static void free_policyinfo(int cpu)
1669{
1670 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
1671 int j;
1672
1673 if (!ppol)
1674 return;
1675
1676 for_each_possible_cpu(j)
1677 if (per_cpu(polinfo, j) == ppol)
1678 per_cpu(polinfo, cpu) = NULL;
1679 kfree(ppol->cached_tunables);
Joonwoo Park22d94972015-09-15 09:35:53 -07001680 kfree(ppol->sl);
Junjie Wucf531ef2015-04-17 12:48:36 -07001681 kfree(ppol);
1682}
1683
1684static struct cpufreq_interactive_tunables *get_tunables(
1685 struct cpufreq_interactive_policyinfo *ppol)
1686{
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001687 if (have_governor_per_policy())
Junjie Wucf531ef2015-04-17 12:48:36 -07001688 return ppol->cached_tunables;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001689 else
Junjie Wucf531ef2015-04-17 12:48:36 -07001690 return cached_common_tunables;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001691}
1692
Stephen Boyd9a864832017-03-13 16:49:15 -07001693/* Interactive Governor callbacks */
1694struct interactive_governor {
1695 struct cpufreq_governor gov;
1696 unsigned int usage_count;
1697};
1698
1699static struct interactive_governor interactive_gov;
1700
1701#define CPU_FREQ_GOV_INTERACTIVE (&interactive_gov.gov)
1702
1703int cpufreq_interactive_init(struct cpufreq_policy *policy)
Mike Chanef969692010-06-22 11:26:45 -07001704{
1705 int rc;
Junjie Wucf531ef2015-04-17 12:48:36 -07001706 struct cpufreq_interactive_policyinfo *ppol;
Stephen Boyd9a864832017-03-13 16:49:15 -07001707 struct cpufreq_interactive_tunables *tunables;
1708
1709 if (have_governor_per_policy())
1710 tunables = policy->governor_data;
1711 else
1712 tunables = common_tunables;
1713
1714 ppol = get_policyinfo(policy);
1715 if (IS_ERR(ppol))
1716 return PTR_ERR(ppol);
1717
1718 if (have_governor_per_policy()) {
1719 WARN_ON(tunables);
1720 } else if (tunables) {
1721 tunables->usage_count++;
1722 cpumask_or(&controlled_cpus, &controlled_cpus,
1723 policy->related_cpus);
1724 sched_update_freq_max_load(policy->related_cpus);
1725 policy->governor_data = tunables;
1726 return 0;
1727 }
1728
1729 tunables = get_tunables(ppol);
1730 if (!tunables) {
1731 tunables = alloc_tunable(policy);
1732 if (IS_ERR(tunables))
1733 return PTR_ERR(tunables);
1734 }
1735
1736 tunables->usage_count = 1;
1737 policy->governor_data = tunables;
1738 if (!have_governor_per_policy())
1739 common_tunables = tunables;
1740
1741 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1742 get_sysfs_attr());
1743 if (rc) {
1744 kfree(tunables);
1745 policy->governor_data = NULL;
1746 if (!have_governor_per_policy())
1747 common_tunables = NULL;
1748 return rc;
1749 }
1750
1751 if (!interactive_gov.usage_count++)
1752 cpufreq_register_notifier(&cpufreq_notifier_block,
1753 CPUFREQ_TRANSITION_NOTIFIER);
1754
1755 if (tunables->use_sched_load)
1756 cpufreq_interactive_enable_sched_input(tunables);
1757
1758 cpumask_or(&controlled_cpus, &controlled_cpus,
1759 policy->related_cpus);
1760 sched_update_freq_max_load(policy->related_cpus);
1761
1762 if (have_governor_per_policy())
1763 ppol->cached_tunables = tunables;
1764 else
1765 cached_common_tunables = tunables;
1766
1767 return 0;
1768}
1769
1770void cpufreq_interactive_exit(struct cpufreq_policy *policy)
1771{
1772 struct cpufreq_interactive_tunables *tunables;
1773
1774 if (have_governor_per_policy())
1775 tunables = policy->governor_data;
1776 else
1777 tunables = common_tunables;
1778
1779 BUG_ON(!tunables);
1780
1781 cpumask_andnot(&controlled_cpus, &controlled_cpus,
1782 policy->related_cpus);
1783 sched_update_freq_max_load(cpu_possible_mask);
1784 if (!--tunables->usage_count) {
1785 /* Last policy using the governor ? */
1786 if (!--interactive_gov.usage_count)
1787 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1788 CPUFREQ_TRANSITION_NOTIFIER);
1789
1790 sysfs_remove_group(get_governor_parent_kobj(policy),
1791 get_sysfs_attr());
1792
1793 common_tunables = NULL;
1794 }
1795
1796 policy->governor_data = NULL;
1797
1798 if (tunables->use_sched_load)
1799 cpufreq_interactive_disable_sched_input(tunables);
1800}
1801
1802int cpufreq_interactive_start(struct cpufreq_policy *policy)
1803{
1804 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -07001805 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301806 struct cpufreq_interactive_tunables *tunables;
1807
1808 if (have_governor_per_policy())
1809 tunables = policy->governor_data;
1810 else
1811 tunables = common_tunables;
1812
Stephen Boyd9a864832017-03-13 16:49:15 -07001813 BUG_ON(!tunables);
1814 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001815
Stephen Boyd9a864832017-03-13 16:49:15 -07001816 freq_table = policy->freq_table;
1817 if (!tunables->hispeed_freq)
1818 tunables->hispeed_freq = policy->max;
Junjie Wucf531ef2015-04-17 12:48:36 -07001819
Stephen Boyd9a864832017-03-13 16:49:15 -07001820 ppol = per_cpu(polinfo, policy->cpu);
1821 ppol->policy = policy;
1822 ppol->target_freq = policy->cur;
1823 ppol->freq_table = freq_table;
1824 ppol->p_nolim = *policy;
1825 ppol->p_nolim.min = policy->cpuinfo.min_freq;
1826 ppol->p_nolim.max = policy->cpuinfo.max_freq;
1827 ppol->floor_freq = ppol->target_freq;
1828 ppol->floor_validate_time = ktime_to_us(ktime_get());
1829 ppol->hispeed_validate_time = ppol->floor_validate_time;
1830 ppol->min_freq = policy->min;
1831 ppol->reject_notification = true;
1832 ppol->notif_pending = false;
1833 down_write(&ppol->enable_sem);
Stephen Boyd9a864832017-03-13 16:49:15 -07001834 del_timer_sync(&ppol->policy_slack_timer);
Stephen Boyd9a864832017-03-13 16:49:15 -07001835 ppol->last_evaluated_jiffy = get_jiffies_64();
1836 cpufreq_interactive_timer_start(tunables, policy->cpu);
1837 ppol->governor_enabled = 1;
1838 up_write(&ppol->enable_sem);
1839 ppol->reject_notification = false;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301840
Stephen Boyd9a864832017-03-13 16:49:15 -07001841 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001842 return 0;
1843}
1844
Stephen Boyd9a864832017-03-13 16:49:15 -07001845void cpufreq_interactive_stop(struct cpufreq_policy *policy)
1846{
1847 struct cpufreq_interactive_policyinfo *ppol;
1848 struct cpufreq_interactive_tunables *tunables;
1849
1850 if (have_governor_per_policy())
1851 tunables = policy->governor_data;
1852 else
1853 tunables = common_tunables;
1854
1855 BUG_ON(!tunables);
1856
1857 mutex_lock(&gov_lock);
1858
1859 ppol = per_cpu(polinfo, policy->cpu);
1860 ppol->reject_notification = true;
1861 down_write(&ppol->enable_sem);
1862 ppol->governor_enabled = 0;
1863 ppol->target_freq = 0;
Stephen Boyd1c2271f2017-03-20 18:57:28 -07001864 gov_clear_update_util(ppol->policy);
1865 irq_work_sync(&ppol->irq_work);
1866 ppol->work_in_progress = false;
Stephen Boyd9a864832017-03-13 16:49:15 -07001867 del_timer_sync(&ppol->policy_slack_timer);
1868 up_write(&ppol->enable_sem);
1869 ppol->reject_notification = false;
1870
1871 mutex_unlock(&gov_lock);
1872}
1873
1874void cpufreq_interactive_limits(struct cpufreq_policy *policy)
1875{
1876 struct cpufreq_interactive_policyinfo *ppol;
1877 struct cpufreq_interactive_tunables *tunables;
1878
1879 if (have_governor_per_policy())
1880 tunables = policy->governor_data;
1881 else
1882 tunables = common_tunables;
1883
1884 BUG_ON(!tunables);
1885 ppol = per_cpu(polinfo, policy->cpu);
1886
1887 __cpufreq_driver_target(policy,
1888 ppol->target_freq, CPUFREQ_RELATION_L);
1889
1890 down_read(&ppol->enable_sem);
1891 if (ppol->governor_enabled) {
1892 if (policy->min < ppol->min_freq)
1893 cpufreq_interactive_timer_resched(policy->cpu,
1894 true);
1895 ppol->min_freq = policy->min;
1896 }
1897 up_read(&ppol->enable_sem);
1898}
1899
1900static struct interactive_governor interactive_gov = {
1901 .gov = {
1902 .name = "interactive",
1903 .max_transition_latency = 10000000,
1904 .owner = THIS_MODULE,
1905 .init = cpufreq_interactive_init,
1906 .exit = cpufreq_interactive_exit,
1907 .start = cpufreq_interactive_start,
1908 .stop = cpufreq_interactive_stop,
1909 .limits = cpufreq_interactive_limits,
1910 }
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301911};
1912
Stephen Boyd9a864832017-03-13 16:49:15 -07001913static int __init cpufreq_interactive_gov_init(void)
Mike Chanef969692010-06-22 11:26:45 -07001914{
Mike Chanef969692010-06-22 11:26:45 -07001915 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1916
Todd Poynor0f1920b2012-07-16 17:07:15 -07001917 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001918 mutex_init(&gov_lock);
Junjie Wu4344ea32014-04-28 16:22:24 -07001919 mutex_init(&sched_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001920 speedchange_task =
1921 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1922 "cfinteractive");
1923 if (IS_ERR(speedchange_task))
1924 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001925
Todd Poynor0f1920b2012-07-16 17:07:15 -07001926 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1927 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001928
Sam Leffler5c9b8272012-06-27 12:55:56 -07001929 /* NB: wake up so the thread does not look hung to the freezer */
Puja Gupta487dec62017-06-27 10:13:50 -07001930 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001931
Stephen Boyd9a864832017-03-13 16:49:15 -07001932 return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
Mike Chanef969692010-06-22 11:26:45 -07001933}
1934
1935#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
Stephen Boyd9a864832017-03-13 16:49:15 -07001936struct cpufreq_governor *cpufreq_default_governor(void)
1937{
1938 return CPU_FREQ_GOV_INTERACTIVE;
1939}
1940
1941fs_initcall(cpufreq_interactive_gov_init);
Mike Chanef969692010-06-22 11:26:45 -07001942#else
Stephen Boyd9a864832017-03-13 16:49:15 -07001943module_init(cpufreq_interactive_gov_init);
Mike Chanef969692010-06-22 11:26:45 -07001944#endif
1945
Stephen Boyd9a864832017-03-13 16:49:15 -07001946static void __exit cpufreq_interactive_gov_exit(void)
Mike Chanef969692010-06-22 11:26:45 -07001947{
Junjie Wuc5a97d92014-05-23 12:22:59 -07001948 int cpu;
1949
Stephen Boyd9a864832017-03-13 16:49:15 -07001950 cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001951 kthread_stop(speedchange_task);
1952 put_task_struct(speedchange_task);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001953
Junjie Wucf531ef2015-04-17 12:48:36 -07001954 for_each_possible_cpu(cpu)
1955 free_policyinfo(cpu);
Mike Chanef969692010-06-22 11:26:45 -07001956}
1957
Stephen Boyd9a864832017-03-13 16:49:15 -07001958module_exit(cpufreq_interactive_gov_exit);
Mike Chanef969692010-06-22 11:26:45 -07001959
1960MODULE_AUTHOR("Mike Chan <mike@android.com>");
1961MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1962 "Latency sensitive workloads");
1963MODULE_LICENSE("GPL");