blob: 90638f325d0ba93cd70bc1250892cf53c7edda89 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
Junjie Wuaceecc062015-09-18 18:13:01 -070030#include <linux/hrtimer.h>
Mike Chanef969692010-06-22 11:26:45 -070031#include <linux/workqueue.h>
32#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070033#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070034
Todd Poynorae010472012-02-16 16:27:59 -080035#define CREATE_TRACE_POINTS
36#include <trace/events/cpufreq_interactive.h>
37
Junjie Wucf531ef2015-04-17 12:48:36 -070038struct cpufreq_interactive_policyinfo {
39 struct timer_list policy_timer;
40 struct timer_list policy_slack_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -070041 struct hrtimer notif_timer;
Junjie Wucf531ef2015-04-17 12:48:36 -070042 spinlock_t load_lock; /* protects load tracking stat */
Junjie Wu6b974ed2014-04-28 15:11:47 -070043 u64 last_evaluated_jiffy;
Mike Chanef969692010-06-22 11:26:45 -070044 struct cpufreq_policy *policy;
Saravana Kannan433ed992014-08-14 18:29:45 -070045 struct cpufreq_policy p_nolim; /* policy copy with no limits */
Mike Chanef969692010-06-22 11:26:45 -070046 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070047 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070048 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070049 unsigned int floor_freq;
Junjie Wu1d868952015-03-27 11:44:21 -070050 unsigned int min_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -070051 u64 floor_validate_time;
52 u64 hispeed_validate_time;
53 u64 max_freq_hyst_start_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080054 struct rw_semaphore enable_sem;
Junjie Wu82f08032014-12-09 13:20:26 -080055 bool reject_notification;
Junjie Wu506bfb02015-09-23 12:00:33 -070056 bool notif_pending;
Junjie Wuaceecc062015-09-18 18:13:01 -070057 unsigned long notif_cpu;
Mike Chanef969692010-06-22 11:26:45 -070058 int governor_enabled;
Junjie Wu13c6a762014-08-07 18:04:13 -070059 struct cpufreq_interactive_tunables *cached_tunables;
Joonwoo Park22d94972015-09-15 09:35:53 -070060 struct sched_load *sl;
Mike Chanef969692010-06-22 11:26:45 -070061};
62
Junjie Wucf531ef2015-04-17 12:48:36 -070063/* Protected by per-policy load_lock */
64struct cpufreq_interactive_cpuinfo {
65 u64 time_in_idle;
66 u64 time_in_idle_timestamp;
67 u64 cputime_speedadj;
68 u64 cputime_speedadj_timestamp;
69 unsigned int loadadjfreq;
70};
71
72static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
Mike Chanef969692010-06-22 11:26:45 -070073static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
74
Todd Poynor0f1920b2012-07-16 17:07:15 -070075/* realtime thread handles frequency scaling */
76static struct task_struct *speedchange_task;
77static cpumask_t speedchange_cpumask;
78static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080079static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070080
Junjie Wu4344ea32014-04-28 16:22:24 -070081static int set_window_count;
82static int migration_register_count;
83static struct mutex sched_lock;
Junjie Wud6f5e522015-07-29 18:22:21 -070084static cpumask_t controlled_cpus;
Junjie Wu4344ea32014-04-28 16:22:24 -070085
Todd Poynor8d2d93f2012-11-28 17:58:17 -080086/* Target load. Lower values result in higher CPU speeds. */
87#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080088static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080089
Todd Poynora380aa82012-04-17 17:39:34 -070090#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070091#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090092static unsigned int default_above_hispeed_delay[] = {
93 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070094
Viresh Kumar17d15c42013-05-16 14:58:54 +053095struct cpufreq_interactive_tunables {
96 int usage_count;
97 /* Hi speed to bump to from lo speed when load burst (default max) */
98 unsigned int hispeed_freq;
99 /* Go to hi speed when CPU load at or above this value. */
100#define DEFAULT_GO_HISPEED_LOAD 99
101 unsigned long go_hispeed_load;
102 /* Target load. Lower values result in higher CPU speeds. */
103 spinlock_t target_loads_lock;
104 unsigned int *target_loads;
105 int ntarget_loads;
106 /*
107 * The minimum amount of time to spend at a frequency before we can ramp
108 * down.
109 */
110#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
111 unsigned long min_sample_time;
112 /*
113 * The sample rate of the timer used to increase frequency
114 */
115 unsigned long timer_rate;
116 /*
117 * Wait this long before raising speed above hispeed, by default a
118 * single timer interval.
119 */
120 spinlock_t above_hispeed_delay_lock;
121 unsigned int *above_hispeed_delay;
122 int nabove_hispeed_delay;
123 /* Non-zero means indefinite speed boost active */
124 int boost_val;
125 /* Duration of a boot pulse in usecs */
126 int boostpulse_duration_val;
127 /* End time of boost pulse in ktime converted to usecs */
128 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800129 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530130 /*
131 * Max additional time to wait in idle, beyond timer_rate, at speeds
132 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
133 */
Todd Poynor4add2592012-12-18 17:50:10 -0800134#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530135 int timer_slack_val;
136 bool io_is_busy;
Junjie Wu4344ea32014-04-28 16:22:24 -0700137
138 /* scheduler input related flags */
139 bool use_sched_load;
140 bool use_migration_notif;
Junjie Wue05d74e2014-08-29 14:12:52 -0700141
142 /*
Junjie Wu7ca999f2014-08-29 18:55:45 -0700143 * Whether to align timer windows across all CPUs. When
144 * use_sched_load is true, this flag is ignored and windows
145 * will always be aligned.
146 */
147 bool align_windows;
148
149 /*
Junjie Wue05d74e2014-08-29 14:12:52 -0700150 * Stay at max freq for at least max_freq_hysteresis before dropping
151 * frequency.
152 */
153 unsigned int max_freq_hysteresis;
Junjie Wu450c8572015-07-22 17:38:49 -0700154
Junjie Wu3381c4c2015-08-19 15:45:37 -0700155 /* Ignore hispeed_freq and above_hispeed_delay for notification */
156 bool ignore_hispeed_on_notif;
157
158 /* Ignore min_sample_time for notification */
Junjie Wu450c8572015-07-22 17:38:49 -0700159 bool fast_ramp_down;
Junjie Wu7c128602015-06-09 17:36:11 -0700160
161 /* Whether to enable prediction or not */
162 bool enable_prediction;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530163};
Lianwei Wangd72db422012-11-01 09:59:52 +0800164
Amit Pundir94c7a812015-11-20 18:54:30 +0530165/*
166 * HACK: FIXME: Bring back cpufreq_{get,put}_global_kobject()
167 * definition removed by upstream commit 8eec1020f0c0 "cpufreq:
168 * create cpu/cpufreq at boot time" to fix build failures.
169 */
170static int cpufreq_global_kobject_usage;
171
172int cpufreq_get_global_kobject(void)
173{
174 if (!cpufreq_global_kobject_usage++)
175 return kobject_add(cpufreq_global_kobject,
176 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
177
178 return 0;
179}
180
181void cpufreq_put_global_kobject(void)
182{
183 if (!--cpufreq_global_kobject_usage)
184 kobject_del(cpufreq_global_kobject);
185}
186
Viresh Kumar17d15c42013-05-16 14:58:54 +0530187/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700188static struct cpufreq_interactive_tunables *common_tunables;
Junjie Wucf531ef2015-04-17 12:48:36 -0700189static struct cpufreq_interactive_tunables *cached_common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530190
191static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800192
Junjie Wu6b974ed2014-04-28 15:11:47 -0700193/* Round to starting jiffy of next evaluation window */
194static u64 round_to_nw_start(u64 jif,
195 struct cpufreq_interactive_tunables *tunables)
196{
197 unsigned long step = usecs_to_jiffies(tunables->timer_rate);
Junjie Wu7ca999f2014-08-29 18:55:45 -0700198 u64 ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700199
Junjie Wu7ca999f2014-08-29 18:55:45 -0700200 if (tunables->use_sched_load || tunables->align_windows) {
201 do_div(jif, step);
202 ret = (jif + 1) * step;
203 } else {
204 ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
205 }
206
207 return ret;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700208}
209
Junjie Wu4344ea32014-04-28 16:22:24 -0700210static inline int set_window_helper(
211 struct cpufreq_interactive_tunables *tunables)
212{
213 return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
214 usecs_to_jiffies(tunables->timer_rate));
215}
216
Junjie Wu1d868952015-03-27 11:44:21 -0700217static void cpufreq_interactive_timer_resched(unsigned long cpu,
218 bool slack_only)
Todd Poynor8eccd412012-10-08 20:14:34 -0700219{
Junjie Wucf531ef2015-04-17 12:48:36 -0700220 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
221 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530222 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700223 ppol->policy->governor_data;
Junjie Wu6b974ed2014-04-28 15:11:47 -0700224 u64 expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800225 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700226 int i;
Todd Poynor4add2592012-12-18 17:50:10 -0800227
Junjie Wucf531ef2015-04-17 12:48:36 -0700228 spin_lock_irqsave(&ppol->load_lock, flags);
Junjie Wua26c0702015-07-20 10:20:08 -0700229 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Junjie Wu1d868952015-03-27 11:44:21 -0700230 if (!slack_only) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700231 for_each_cpu(i, ppol->policy->cpus) {
232 pcpu = &per_cpu(cpuinfo, i);
233 pcpu->time_in_idle = get_cpu_idle_time(i,
234 &pcpu->time_in_idle_timestamp,
235 tunables->io_is_busy);
236 pcpu->cputime_speedadj = 0;
237 pcpu->cputime_speedadj_timestamp =
238 pcpu->time_in_idle_timestamp;
239 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700240 del_timer(&ppol->policy_timer);
241 ppol->policy_timer.expires = expires;
242 add_timer(&ppol->policy_timer);
Junjie Wu1d868952015-03-27 11:44:21 -0700243 }
Todd Poynor4e25bf92013-04-05 13:25:21 -0700244
Viresh Kumar17d15c42013-05-16 14:58:54 +0530245 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700246 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530247 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700248 del_timer(&ppol->policy_slack_timer);
249 ppol->policy_slack_timer.expires = expires;
250 add_timer(&ppol->policy_slack_timer);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700251 }
252
Junjie Wucf531ef2015-04-17 12:48:36 -0700253 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700254}
255
Lianwei Wang90c6c152013-04-26 13:30:51 +0800256/* The caller shall take enable_sem write semaphore to avoid any timer race.
Junjie Wucf531ef2015-04-17 12:48:36 -0700257 * The policy_timer and policy_slack_timer must be deactivated when calling
258 * this function.
Lianwei Wang90c6c152013-04-26 13:30:51 +0800259 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530260static void cpufreq_interactive_timer_start(
261 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800262{
Junjie Wucf531ef2015-04-17 12:48:36 -0700263 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
264 struct cpufreq_interactive_cpuinfo *pcpu;
265 u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800266 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700267 int i;
Lianwei Wang90c6c152013-04-26 13:30:51 +0800268
Junjie Wucf531ef2015-04-17 12:48:36 -0700269 spin_lock_irqsave(&ppol->load_lock, flags);
270 ppol->policy_timer.expires = expires;
271 add_timer(&ppol->policy_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530272 if (tunables->timer_slack_val >= 0 &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700273 ppol->target_freq > ppol->policy->min) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530274 expires += usecs_to_jiffies(tunables->timer_slack_val);
Junjie Wucf531ef2015-04-17 12:48:36 -0700275 ppol->policy_slack_timer.expires = expires;
276 add_timer(&ppol->policy_slack_timer);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800277 }
278
Junjie Wucf531ef2015-04-17 12:48:36 -0700279 for_each_cpu(i, ppol->policy->cpus) {
280 pcpu = &per_cpu(cpuinfo, i);
281 pcpu->time_in_idle =
282 get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
283 tunables->io_is_busy);
284 pcpu->cputime_speedadj = 0;
285 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
286 }
287 spin_unlock_irqrestore(&ppol->load_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800288}
289
Viresh Kumar17d15c42013-05-16 14:58:54 +0530290static unsigned int freq_to_above_hispeed_delay(
291 struct cpufreq_interactive_tunables *tunables,
292 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900293{
294 int i;
295 unsigned int ret;
296 unsigned long flags;
297
Viresh Kumar17d15c42013-05-16 14:58:54 +0530298 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900299
Viresh Kumar17d15c42013-05-16 14:58:54 +0530300 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
301 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900302 ;
303
Viresh Kumar17d15c42013-05-16 14:58:54 +0530304 ret = tunables->above_hispeed_delay[i];
305 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900306 return ret;
307}
308
Viresh Kumar17d15c42013-05-16 14:58:54 +0530309static unsigned int freq_to_targetload(
310 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800311{
312 int i;
313 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800314 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800315
Viresh Kumar17d15c42013-05-16 14:58:54 +0530316 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800317
Viresh Kumar17d15c42013-05-16 14:58:54 +0530318 for (i = 0; i < tunables->ntarget_loads - 1 &&
319 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800320 ;
321
Viresh Kumar17d15c42013-05-16 14:58:54 +0530322 ret = tunables->target_loads[i];
323 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800324 return ret;
325}
326
Junjie Wud6f5e522015-07-29 18:22:21 -0700327#define DEFAULT_MAX_LOAD 100
328u32 get_freq_max_load(int cpu, unsigned int freq)
329{
330 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
331
332 if (!cpumask_test_cpu(cpu, &controlled_cpus))
333 return DEFAULT_MAX_LOAD;
334
335 if (have_governor_per_policy()) {
336 if (!ppol || !ppol->cached_tunables)
337 return DEFAULT_MAX_LOAD;
338 return freq_to_targetload(ppol->cached_tunables, freq);
339 }
340
341 if (!cached_common_tunables)
342 return DEFAULT_MAX_LOAD;
343 return freq_to_targetload(cached_common_tunables, freq);
344}
345
Todd Poynore9c60742012-11-14 11:41:21 -0800346/*
347 * If increasing frequencies never map to a lower target load then
348 * choose_freq() will find the minimum frequency that does not exceed its
349 * target load given the current load.
350 */
Junjie Wucf531ef2015-04-17 12:48:36 -0700351static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
Viresh Kumar17d15c42013-05-16 14:58:54 +0530352 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800353{
354 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800355 unsigned int prevfreq, freqmin, freqmax;
356 unsigned int tl;
357 int index;
358
359 freqmin = 0;
360 freqmax = UINT_MAX;
361
362 do {
363 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530364 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800365
366 /*
367 * Find the lowest frequency where the computed load is less
368 * than or equal to the target load.
369 */
370
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700371 if (cpufreq_frequency_table_target(
Saravana Kannan433ed992014-08-14 18:29:45 -0700372 &pcpu->p_nolim, pcpu->freq_table, loadadjfreq / tl,
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700373 CPUFREQ_RELATION_L, &index))
374 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800375 freq = pcpu->freq_table[index].frequency;
376
377 if (freq > prevfreq) {
378 /* The previous frequency is too low. */
379 freqmin = prevfreq;
380
381 if (freq >= freqmax) {
382 /*
383 * Find the highest frequency that is less
384 * than freqmax.
385 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700386 if (cpufreq_frequency_table_target(
Saravana Kannan433ed992014-08-14 18:29:45 -0700387 &pcpu->p_nolim, pcpu->freq_table,
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700388 freqmax - 1, CPUFREQ_RELATION_H,
389 &index))
390 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800391 freq = pcpu->freq_table[index].frequency;
392
393 if (freq == freqmin) {
394 /*
395 * The first frequency below freqmax
396 * has already been found to be too
397 * low. freqmax is the lowest speed
398 * we found that is fast enough.
399 */
400 freq = freqmax;
401 break;
402 }
403 }
404 } else if (freq < prevfreq) {
405 /* The previous frequency is high enough. */
406 freqmax = prevfreq;
407
408 if (freq <= freqmin) {
409 /*
410 * Find the lowest frequency that is higher
411 * than freqmin.
412 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700413 if (cpufreq_frequency_table_target(
Saravana Kannan433ed992014-08-14 18:29:45 -0700414 &pcpu->p_nolim, pcpu->freq_table,
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700415 freqmin + 1, CPUFREQ_RELATION_L,
416 &index))
417 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800418 freq = pcpu->freq_table[index].frequency;
419
420 /*
421 * If freqmax is the first frequency above
422 * freqmin then we have already found that
423 * this speed is fast enough.
424 */
425 if (freq == freqmax)
426 break;
427 }
428 }
429
430 /* If same frequency chosen as previous then done. */
431 } while (freq != prevfreq);
432
433 return freq;
434}
435
Todd Poynor0e58da22012-12-11 16:05:03 -0800436static u64 update_load(int cpu)
437{
Junjie Wucf531ef2015-04-17 12:48:36 -0700438 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Todd Poynor0e58da22012-12-11 16:05:03 -0800439 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530440 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700441 ppol->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800442 u64 now;
443 u64 now_idle;
444 unsigned int delta_idle;
445 unsigned int delta_time;
446 u64 active_time;
447
Viresh Kumar17d15c42013-05-16 14:58:54 +0530448 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800449 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
450 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900451
452 if (delta_time <= delta_idle)
453 active_time = 0;
454 else
455 active_time = delta_time - delta_idle;
456
Junjie Wucf531ef2015-04-17 12:48:36 -0700457 pcpu->cputime_speedadj += active_time * ppol->policy->cur;
Todd Poynor0e58da22012-12-11 16:05:03 -0800458
459 pcpu->time_in_idle = now_idle;
460 pcpu->time_in_idle_timestamp = now;
461 return now;
462}
463
Junjie Wu7c128602015-06-09 17:36:11 -0700464static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
465 unsigned long busy)
466{
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530467 int prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700468 struct cpufreq_interactive_tunables *tunables =
469 ppol->policy->governor_data;
470
Hanumath Prasadbdfb4c32016-02-08 17:44:07 +0530471 prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
472 busy, tunables->timer_rate);
473 return prev_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700474}
475
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700476#define NEW_TASK_RATIO 75
Junjie Wu7c128602015-06-09 17:36:11 -0700477#define PRED_TOLERANCE_PCT 10
Junjie Wu506bfb02015-09-23 12:00:33 -0700478static void cpufreq_interactive_timer(unsigned long data)
Mike Chanef969692010-06-22 11:26:45 -0700479{
Junjie Wu7c128602015-06-09 17:36:11 -0700480 s64 now;
Mike Chanef969692010-06-22 11:26:45 -0700481 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800482 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700483 int cpu_load;
Junjie Wu7c128602015-06-09 17:36:11 -0700484 int pol_load = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700485 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530486 struct cpufreq_interactive_tunables *tunables =
Junjie Wucf531ef2015-04-17 12:48:36 -0700487 ppol->policy->governor_data;
Junjie Wu7c128602015-06-09 17:36:11 -0700488 struct sched_load *sl = ppol->sl;
Junjie Wucf531ef2015-04-17 12:48:36 -0700489 struct cpufreq_interactive_cpuinfo *pcpu;
Mike Chanef969692010-06-22 11:26:45 -0700490 unsigned int new_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700491 unsigned int prev_laf = 0, t_prevlaf;
492 unsigned int pred_laf = 0, t_predlaf = 0;
493 unsigned int prev_chfreq, pred_chfreq, chosen_freq;
Mike Chanef969692010-06-22 11:26:45 -0700494 unsigned int index;
495 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700496 unsigned long max_cpu;
Junjie Wu7c128602015-06-09 17:36:11 -0700497 int cpu, i;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700498 int new_load_pct = 0;
Junjie Wu7c128602015-06-09 17:36:11 -0700499 int prev_l, pred_l = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700500 struct cpufreq_govinfo govinfo;
Junjie Wud8a5e842015-08-26 17:47:21 -0700501 bool skip_hispeed_logic, skip_min_sample_time;
Junjie Wu7c128602015-06-09 17:36:11 -0700502 bool jump_to_max_no_ts = false;
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700503 bool jump_to_max = false;
Mike Chanef969692010-06-22 11:26:45 -0700504
Junjie Wucf531ef2015-04-17 12:48:36 -0700505 if (!down_read_trylock(&ppol->enable_sem))
Todd Poynor5cad6092012-12-18 17:50:44 -0800506 return;
Junjie Wucf531ef2015-04-17 12:48:36 -0700507 if (!ppol->governor_enabled)
Mike Chanef969692010-06-22 11:26:45 -0700508 goto exit;
509
Junjie Wucf531ef2015-04-17 12:48:36 -0700510 now = ktime_to_us(ktime_get());
Junjie Wu506bfb02015-09-23 12:00:33 -0700511
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700512 spin_lock_irqsave(&ppol->target_freq_lock, flags);
513 spin_lock(&ppol->load_lock);
Junjie Wu506bfb02015-09-23 12:00:33 -0700514
Junjie Wu7c128602015-06-09 17:36:11 -0700515 skip_hispeed_logic = tunables->enable_prediction ? true :
516 tunables->ignore_hispeed_on_notif && ppol->notif_pending;
Junjie Wu506bfb02015-09-23 12:00:33 -0700517 skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
518 ppol->notif_pending = false;
Junjie Wu7c128602015-06-09 17:36:11 -0700519 now = ktime_to_us(ktime_get());
Junjie Wucf531ef2015-04-17 12:48:36 -0700520 ppol->last_evaluated_jiffy = get_jiffies_64();
521
Junjie Wufef75c02015-05-26 17:54:38 -0700522 if (tunables->use_sched_load)
Junjie Wu7c128602015-06-09 17:36:11 -0700523 sched_get_cpus_busy(sl, ppol->policy->cpus);
Junjie Wucf531ef2015-04-17 12:48:36 -0700524 max_cpu = cpumask_first(ppol->policy->cpus);
Junjie Wu7c128602015-06-09 17:36:11 -0700525 i = 0;
526 for_each_cpu(cpu, ppol->policy->cpus) {
527 pcpu = &per_cpu(cpuinfo, cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700528 if (tunables->use_sched_load) {
Junjie Wu7c128602015-06-09 17:36:11 -0700529 t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
530 prev_l = t_prevlaf / ppol->target_freq;
531 if (tunables->enable_prediction) {
532 t_predlaf = sl_busy_to_laf(ppol,
533 sl[i].predicted_load);
534 pred_l = t_predlaf / ppol->target_freq;
535 }
536 if (sl[i].prev_load)
537 new_load_pct = sl[i].new_task_load * 100 /
538 sl[i].prev_load;
539 else
540 new_load_pct = 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700541 } else {
Junjie Wu7c128602015-06-09 17:36:11 -0700542 now = update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700543 delta_time = (unsigned int)
Junjie Wu4344ea32014-04-28 16:22:24 -0700544 (now - pcpu->cputime_speedadj_timestamp);
Junjie Wucf531ef2015-04-17 12:48:36 -0700545 if (WARN_ON_ONCE(!delta_time))
546 continue;
547 cputime_speedadj = pcpu->cputime_speedadj;
548 do_div(cputime_speedadj, delta_time);
Junjie Wu7c128602015-06-09 17:36:11 -0700549 t_prevlaf = (unsigned int)cputime_speedadj * 100;
550 prev_l = t_prevlaf / ppol->target_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -0700551 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700552
Junjie Wu7c128602015-06-09 17:36:11 -0700553 /* find max of loadadjfreq inside policy */
554 if (t_prevlaf > prev_laf) {
555 prev_laf = t_prevlaf;
556 max_cpu = cpu;
Junjie Wucf531ef2015-04-17 12:48:36 -0700557 }
Junjie Wu7c128602015-06-09 17:36:11 -0700558 pred_laf = max(t_predlaf, pred_laf);
Junjie Wucf531ef2015-04-17 12:48:36 -0700559
Junjie Wu7c128602015-06-09 17:36:11 -0700560 cpu_load = max(prev_l, pred_l);
561 pol_load = max(pol_load, cpu_load);
562 trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
563 prev_l, pred_l);
564
565 /* save loadadjfreq for notification */
566 pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
567
568 /* detect heavy new task and jump to policy->max */
569 if (prev_l >= tunables->go_hispeed_load &&
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700570 new_load_pct >= NEW_TASK_RATIO) {
571 skip_hispeed_logic = true;
572 jump_to_max = true;
573 }
Junjie Wu7c128602015-06-09 17:36:11 -0700574 i++;
Junjie Wu4344ea32014-04-28 16:22:24 -0700575 }
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700576 spin_unlock(&ppol->load_lock);
Mike Chanef969692010-06-22 11:26:45 -0700577
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800578 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700579
Junjie Wu7c128602015-06-09 17:36:11 -0700580 prev_chfreq = choose_freq(ppol, prev_laf);
581 pred_chfreq = choose_freq(ppol, pred_laf);
582 chosen_freq = max(prev_chfreq, pred_chfreq);
583
584 if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
585 if (!jump_to_max)
586 jump_to_max_no_ts = true;
587
Junjie Wud8a5e842015-08-26 17:47:21 -0700588 if (now - ppol->max_freq_hyst_start_time <
589 tunables->max_freq_hysteresis &&
Junjie Wu7c128602015-06-09 17:36:11 -0700590 pol_load >= tunables->go_hispeed_load &&
Junjie Wud8a5e842015-08-26 17:47:21 -0700591 ppol->target_freq < ppol->policy->max) {
592 skip_hispeed_logic = true;
593 skip_min_sample_time = true;
Junjie Wu7c128602015-06-09 17:36:11 -0700594 if (!jump_to_max)
595 jump_to_max_no_ts = true;
Junjie Wud8a5e842015-08-26 17:47:21 -0700596 }
597
Junjie Wu7c128602015-06-09 17:36:11 -0700598 new_freq = chosen_freq;
599 if (jump_to_max_no_ts || jump_to_max) {
Saravana Kannan433ed992014-08-14 18:29:45 -0700600 new_freq = ppol->policy->cpuinfo.max_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700601 } else if (!skip_hispeed_logic) {
602 if (pol_load >= tunables->go_hispeed_load ||
603 tunables->boosted) {
604 if (ppol->target_freq < tunables->hispeed_freq)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530605 new_freq = tunables->hispeed_freq;
Junjie Wu7c128602015-06-09 17:36:11 -0700606 else
607 new_freq = max(new_freq,
608 tunables->hispeed_freq);
Todd Poynor2b660492012-12-19 16:06:48 -0800609 }
Todd Poynor2b660492012-12-19 16:06:48 -0800610 }
Todd Poynor131ff022012-11-08 15:06:55 -0800611
Junjie Wud8a5e842015-08-26 17:47:21 -0700612 if (now - ppol->max_freq_hyst_start_time <
613 tunables->max_freq_hysteresis)
614 new_freq = max(tunables->hispeed_freq, new_freq);
615
616 if (!skip_hispeed_logic &&
Junjie Wu51042d32015-08-17 16:02:55 -0700617 ppol->target_freq >= tunables->hispeed_freq &&
618 new_freq > ppol->target_freq &&
Junjie Wucf531ef2015-04-17 12:48:36 -0700619 now - ppol->hispeed_validate_time <
Junjie Wu51042d32015-08-17 16:02:55 -0700620 freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800621 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700622 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700623 ppol->policy->cur, new_freq);
624 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800625 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700626 }
627
Junjie Wucf531ef2015-04-17 12:48:36 -0700628 ppol->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700629
Saravana Kannan433ed992014-08-14 18:29:45 -0700630 if (cpufreq_frequency_table_target(&ppol->p_nolim, ppol->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800631 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700632 &index)) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700633 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700634 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700635 }
Mike Chanef969692010-06-22 11:26:45 -0700636
Junjie Wucf531ef2015-04-17 12:48:36 -0700637 new_freq = ppol->freq_table[index].frequency;
Mike Chanef969692010-06-22 11:26:45 -0700638
Mike Chanef969692010-06-22 11:26:45 -0700639 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700640 * Do not scale below floor_freq unless we have been at or above the
641 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700642 */
Junjie Wud8a5e842015-08-26 17:47:21 -0700643 if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700644 if (now - ppol->floor_validate_time <
645 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800646 trace_cpufreq_interactive_notyet(
Junjie Wu7c128602015-06-09 17:36:11 -0700647 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700648 ppol->policy->cur, new_freq);
649 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700650 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800651 }
Mike Chanef969692010-06-22 11:26:45 -0700652 }
653
Todd Poynore16d5922012-12-14 17:31:19 -0800654 /*
655 * Update the timestamp for checking whether speed has been held at
656 * or above the selected frequency for a minimum of min_sample_time,
657 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
658 * allow the speed to drop as soon as the boostpulse duration expires
Junjie Wud8a5e842015-08-26 17:47:21 -0700659 * (or the indefinite boost is turned off). If policy->max is restored
660 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
661 * could incorrectly extended the duration of max_freq_hysteresis by
662 * min_sample_time.
Todd Poynore16d5922012-12-14 17:31:19 -0800663 */
664
Junjie Wud8a5e842015-08-26 17:47:21 -0700665 if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
Junjie Wu7c128602015-06-09 17:36:11 -0700666 && !jump_to_max_no_ts) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700667 ppol->floor_freq = new_freq;
668 ppol->floor_validate_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800669 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700670
Junjie Wu7c128602015-06-09 17:36:11 -0700671 if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
Junjie Wucf531ef2015-04-17 12:48:36 -0700672 ppol->max_freq_hyst_start_time = now;
673
674 if (ppol->target_freq == new_freq &&
675 ppol->target_freq <= ppol->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800676 trace_cpufreq_interactive_already(
Junjie Wu7c128602015-06-09 17:36:11 -0700677 max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700678 ppol->policy->cur, new_freq);
679 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800680 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700681 }
682
Junjie Wu7c128602015-06-09 17:36:11 -0700683 trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
Junjie Wucf531ef2015-04-17 12:48:36 -0700684 ppol->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800685
Junjie Wucf531ef2015-04-17 12:48:36 -0700686 ppol->target_freq = new_freq;
687 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700688 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Junjie Wucf531ef2015-04-17 12:48:36 -0700689 cpumask_set_cpu(max_cpu, &speedchange_cpumask);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700690 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Junjie Wu94137b22016-01-05 11:09:41 -0800691 wake_up_process_no_notif(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700692
Mike Chanef969692010-06-22 11:26:45 -0700693rearm:
Junjie Wucf531ef2015-04-17 12:48:36 -0700694 if (!timer_pending(&ppol->policy_timer))
Junjie Wu1d868952015-03-27 11:44:21 -0700695 cpufreq_interactive_timer_resched(data, false);
Mike Chanef969692010-06-22 11:26:45 -0700696
Junjie Wu26fa2dd2015-09-14 17:16:17 -0700697 /*
698 * Send govinfo notification.
699 * Govinfo notification could potentially wake up another thread
700 * managed by its clients. Thread wakeups might trigger a load
701 * change callback that executes this function again. Therefore
702 * no spinlock could be held when sending the notification.
703 */
704 for_each_cpu(i, ppol->policy->cpus) {
705 pcpu = &per_cpu(cpuinfo, i);
706 govinfo.cpu = i;
707 govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
708 govinfo.sampling_rate_us = tunables->timer_rate;
709 atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
710 CPUFREQ_LOAD_CHANGE, &govinfo);
711 }
712
Mike Chanef969692010-06-22 11:26:45 -0700713exit:
Junjie Wucf531ef2015-04-17 12:48:36 -0700714 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700715 return;
716}
717
Todd Poynor0f1920b2012-07-16 17:07:15 -0700718static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700719{
720 unsigned int cpu;
721 cpumask_t tmp_mask;
722 unsigned long flags;
Junjie Wucf531ef2015-04-17 12:48:36 -0700723 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -0700724
725 while (1) {
726 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700727 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700728
Todd Poynor0f1920b2012-07-16 17:07:15 -0700729 if (cpumask_empty(&speedchange_cpumask)) {
730 spin_unlock_irqrestore(&speedchange_cpumask_lock,
731 flags);
Mike Chanef969692010-06-22 11:26:45 -0700732 schedule();
733
734 if (kthread_should_stop())
735 break;
736
Todd Poynor0f1920b2012-07-16 17:07:15 -0700737 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700738 }
739
740 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700741 tmp_mask = speedchange_cpumask;
742 cpumask_clear(&speedchange_cpumask);
743 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700744
745 for_each_cpu(cpu, &tmp_mask) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700746 ppol = per_cpu(polinfo, cpu);
747 if (!down_read_trylock(&ppol->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700748 continue;
Junjie Wucf531ef2015-04-17 12:48:36 -0700749 if (!ppol->governor_enabled) {
750 up_read(&ppol->enable_sem);
Todd Poynor5cad6092012-12-18 17:50:44 -0800751 continue;
752 }
Mike Chanef969692010-06-22 11:26:45 -0700753
Junjie Wucf531ef2015-04-17 12:48:36 -0700754 if (ppol->target_freq != ppol->policy->cur)
755 __cpufreq_driver_target(ppol->policy,
756 ppol->target_freq,
Mike Chanef969692010-06-22 11:26:45 -0700757 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700758 trace_cpufreq_interactive_setspeed(cpu,
Junjie Wucf531ef2015-04-17 12:48:36 -0700759 ppol->target_freq,
760 ppol->policy->cur);
761 up_read(&ppol->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700762 }
763 }
764
765 return 0;
766}
767
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800768static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700769{
770 int i;
771 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700772 unsigned long flags[2];
Junjie Wucf531ef2015-04-17 12:48:36 -0700773 struct cpufreq_interactive_policyinfo *ppol;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800774
775 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700776
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700777 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700778
779 for_each_online_cpu(i) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700780 ppol = per_cpu(polinfo, i);
781 if (!ppol || tunables != ppol->policy->governor_data)
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800782 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700783
Junjie Wucf531ef2015-04-17 12:48:36 -0700784 spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
785 if (ppol->target_freq < tunables->hispeed_freq) {
786 ppol->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700787 cpumask_set_cpu(i, &speedchange_cpumask);
Junjie Wucf531ef2015-04-17 12:48:36 -0700788 ppol->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800789 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700790 anyboost = 1;
791 }
Junjie Wucf531ef2015-04-17 12:48:36 -0700792
793 /*
794 * Set floor freq and (re)start timer for when last
795 * validated.
796 */
797
798 ppol->floor_freq = tunables->hispeed_freq;
799 ppol->floor_validate_time = ktime_to_us(ktime_get());
800 spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
801 break;
Todd Poynorab8dc402012-04-02 17:17:14 -0700802 }
803
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700804 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700805
806 if (anyboost)
Junjie Wu94137b22016-01-05 11:09:41 -0800807 wake_up_process_no_notif(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700808}
809
Junjie Wu4344ea32014-04-28 16:22:24 -0700810static int load_change_callback(struct notifier_block *nb, unsigned long val,
811 void *data)
812{
813 unsigned long cpu = (unsigned long) data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700814 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
Junjie Wu4344ea32014-04-28 16:22:24 -0700815 struct cpufreq_interactive_tunables *tunables;
Junjie Wu506bfb02015-09-23 12:00:33 -0700816 unsigned long flags;
Junjie Wu4344ea32014-04-28 16:22:24 -0700817
Junjie Wucf531ef2015-04-17 12:48:36 -0700818 if (!ppol || ppol->reject_notification)
Junjie Wu82f08032014-12-09 13:20:26 -0800819 return 0;
820
Junjie Wucf531ef2015-04-17 12:48:36 -0700821 if (!down_read_trylock(&ppol->enable_sem))
Junjie Wu18e7fd22014-09-17 18:51:41 -0700822 return 0;
Junjie Wuaceecc062015-09-18 18:13:01 -0700823 if (!ppol->governor_enabled)
824 goto exit;
825
826 tunables = ppol->policy->governor_data;
827 if (!tunables->use_sched_load || !tunables->use_migration_notif)
828 goto exit;
829
830 spin_lock_irqsave(&ppol->target_freq_lock, flags);
831 ppol->notif_pending = true;
832 ppol->notif_cpu = cpu;
833 spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
834
835 if (!hrtimer_is_queued(&ppol->notif_timer))
836 hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
837 HRTIMER_MODE_REL);
838exit:
839 up_read(&ppol->enable_sem);
840 return 0;
841}
842
843static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
844{
845 struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
846 struct cpufreq_interactive_policyinfo, notif_timer);
847 int cpu;
848
849 if (!down_read_trylock(&ppol->enable_sem))
850 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700851 if (!ppol->governor_enabled) {
852 up_read(&ppol->enable_sem);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700853 return 0;
854 }
Junjie Wuaceecc062015-09-18 18:13:01 -0700855 cpu = ppol->notif_cpu;
Junjie Wu18e7fd22014-09-17 18:51:41 -0700856 trace_cpufreq_interactive_load_change(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700857 del_timer(&ppol->policy_timer);
858 del_timer(&ppol->policy_slack_timer);
Junjie Wu506bfb02015-09-23 12:00:33 -0700859 cpufreq_interactive_timer(cpu);
Junjie Wu18e7fd22014-09-17 18:51:41 -0700860
Junjie Wucf531ef2015-04-17 12:48:36 -0700861 up_read(&ppol->enable_sem);
Junjie Wuaceecc062015-09-18 18:13:01 -0700862 return HRTIMER_NORESTART;
Junjie Wu4344ea32014-04-28 16:22:24 -0700863}
864
865static struct notifier_block load_notifier_block = {
866 .notifier_call = load_change_callback,
867};
868
Todd Poynor0e58da22012-12-11 16:05:03 -0800869static int cpufreq_interactive_notifier(
870 struct notifier_block *nb, unsigned long val, void *data)
871{
872 struct cpufreq_freqs *freq = data;
Junjie Wucf531ef2015-04-17 12:48:36 -0700873 struct cpufreq_interactive_policyinfo *ppol;
Todd Poynor0e58da22012-12-11 16:05:03 -0800874 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800875 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800876
877 if (val == CPUFREQ_POSTCHANGE) {
Junjie Wucf531ef2015-04-17 12:48:36 -0700878 ppol = per_cpu(polinfo, freq->cpu);
879 if (!ppol)
Todd Poynor34974c32012-12-23 12:28:49 -0800880 return 0;
Junjie Wucf531ef2015-04-17 12:48:36 -0700881 if (!down_read_trylock(&ppol->enable_sem))
882 return 0;
883 if (!ppol->governor_enabled) {
884 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800885 return 0;
886 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800887
Junjie Wucf531ef2015-04-17 12:48:36 -0700888 if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
889 up_read(&ppol->enable_sem);
890 return 0;
891 }
892 spin_lock_irqsave(&ppol->load_lock, flags);
893 for_each_cpu(cpu, ppol->policy->cpus)
Todd Poynor0e58da22012-12-11 16:05:03 -0800894 update_load(cpu);
Junjie Wucf531ef2015-04-17 12:48:36 -0700895 spin_unlock_irqrestore(&ppol->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800896
Junjie Wucf531ef2015-04-17 12:48:36 -0700897 up_read(&ppol->enable_sem);
Todd Poynor34974c32012-12-23 12:28:49 -0800898 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800899 return 0;
900}
901
902static struct notifier_block cpufreq_notifier_block = {
903 .notifier_call = cpufreq_interactive_notifier,
904};
905
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900906static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
907{
908 const char *cp;
909 int i;
910 int ntokens = 1;
911 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700912 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900913
914 cp = buf;
915 while ((cp = strpbrk(cp + 1, " :")))
916 ntokens++;
917
Todd Poynor233dfa02013-03-20 15:40:46 -0700918 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900919 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900920
921 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
922 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700923 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900924 goto err;
925 }
926
927 cp = buf;
928 i = 0;
929 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700930 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900931 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900932
933 cp = strpbrk(cp, " :");
934 if (!cp)
935 break;
936 cp++;
937 }
938
Todd Poynor233dfa02013-03-20 15:40:46 -0700939 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900940 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900941
942 *num_tokens = ntokens;
943 return tokenized_data;
944
945err_kfree:
946 kfree(tokenized_data);
947err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700948 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900949}
950
Todd Poynore9c60742012-11-14 11:41:21 -0800951static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530952 struct cpufreq_interactive_tunables *tunables,
953 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800954{
Todd Poynore9c60742012-11-14 11:41:21 -0800955 int i;
956 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800957 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800958
Viresh Kumar17d15c42013-05-16 14:58:54 +0530959 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800960
Viresh Kumar17d15c42013-05-16 14:58:54 +0530961 for (i = 0; i < tunables->ntarget_loads; i++)
962 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800963 i & 0x1 ? ":" : " ");
964
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800965 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530966 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800967 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800968}
969
Todd Poynore9c60742012-11-14 11:41:21 -0800970static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530971 struct cpufreq_interactive_tunables *tunables,
972 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800973{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900974 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800975 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800976 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800977
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900978 new_target_loads = get_tokenized_data(buf, &ntokens);
979 if (IS_ERR(new_target_loads))
980 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800981
Viresh Kumar17d15c42013-05-16 14:58:54 +0530982 spin_lock_irqsave(&tunables->target_loads_lock, flags);
983 if (tunables->target_loads != default_target_loads)
984 kfree(tunables->target_loads);
985 tunables->target_loads = new_target_loads;
986 tunables->ntarget_loads = ntokens;
987 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Junjie Wud6f5e522015-07-29 18:22:21 -0700988
989 sched_update_freq_max_load(&controlled_cpus);
990
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800991 return count;
992}
993
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900994static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530995 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900996{
997 int i;
998 ssize_t ret = 0;
999 unsigned long flags;
1000
Viresh Kumar17d15c42013-05-16 14:58:54 +05301001 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001002
Viresh Kumar17d15c42013-05-16 14:58:54 +05301003 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
1004 ret += sprintf(buf + ret, "%u%s",
1005 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001006 i & 0x1 ? ":" : " ");
1007
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +08001008 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +05301009 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001010 return ret;
1011}
1012
1013static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +05301014 struct cpufreq_interactive_tunables *tunables,
1015 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001016{
1017 int ntokens;
1018 unsigned int *new_above_hispeed_delay = NULL;
1019 unsigned long flags;
1020
1021 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
1022 if (IS_ERR(new_above_hispeed_delay))
1023 return PTR_RET(new_above_hispeed_delay);
1024
Viresh Kumar17d15c42013-05-16 14:58:54 +05301025 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
1026 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
1027 kfree(tunables->above_hispeed_delay);
1028 tunables->above_hispeed_delay = new_above_hispeed_delay;
1029 tunables->nabove_hispeed_delay = ntokens;
1030 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +09001031 return count;
1032
1033}
1034
Viresh Kumar17d15c42013-05-16 14:58:54 +05301035static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1036 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001037{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301038 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -07001039}
1040
Viresh Kumar17d15c42013-05-16 14:58:54 +05301041static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
1042 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001043{
1044 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -07001045 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -07001046
Amit Pundircf076402015-11-03 20:53:29 +05301047 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001048 if (ret < 0)
1049 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301050 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -07001051 return count;
1052}
1053
Junjie Wue05d74e2014-08-29 14:12:52 -07001054#define show_store_one(file_name) \
1055static ssize_t show_##file_name( \
1056 struct cpufreq_interactive_tunables *tunables, char *buf) \
1057{ \
1058 return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name); \
1059} \
1060static ssize_t store_##file_name( \
1061 struct cpufreq_interactive_tunables *tunables, \
1062 const char *buf, size_t count) \
1063{ \
1064 int ret; \
1065 unsigned long int val; \
1066 \
1067 ret = kstrtoul(buf, 0, &val); \
1068 if (ret < 0) \
1069 return ret; \
1070 tunables->file_name = val; \
1071 return count; \
1072}
1073show_store_one(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001074show_store_one(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001075show_store_one(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001076show_store_one(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001077show_store_one(enable_prediction);
Junjie Wue05d74e2014-08-29 14:12:52 -07001078
Viresh Kumar17d15c42013-05-16 14:58:54 +05301079static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
1080 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001081{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301082 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -07001083}
1084
Viresh Kumar17d15c42013-05-16 14:58:54 +05301085static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
1086 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001087{
1088 int ret;
1089 unsigned long val;
1090
Amit Pundircf076402015-11-03 20:53:29 +05301091 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001092 if (ret < 0)
1093 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301094 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -07001095 return count;
1096}
1097
Viresh Kumar17d15c42013-05-16 14:58:54 +05301098static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
1099 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001100{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301101 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -07001102}
1103
Viresh Kumar17d15c42013-05-16 14:58:54 +05301104static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
1105 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001106{
1107 int ret;
1108 unsigned long val;
1109
Amit Pundircf076402015-11-03 20:53:29 +05301110 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001111 if (ret < 0)
1112 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301113 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -07001114 return count;
1115}
1116
Viresh Kumar17d15c42013-05-16 14:58:54 +05301117static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
1118 char *buf)
Mike Chanef969692010-06-22 11:26:45 -07001119{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301120 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -07001121}
1122
Viresh Kumar17d15c42013-05-16 14:58:54 +05301123static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
1124 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -07001125{
1126 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001127 unsigned long val, val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001128 struct cpufreq_interactive_tunables *t;
1129 int cpu;
Mike Chanef969692010-06-22 11:26:45 -07001130
Amit Pundircf076402015-11-03 20:53:29 +05301131 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -07001132 if (ret < 0)
1133 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -07001134
1135 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
1136 if (val != val_round)
1137 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
1138 val_round);
Junjie Wu847796e2014-08-15 16:34:37 -07001139 tunables->timer_rate = val_round;
Junjie Wu4344ea32014-04-28 16:22:24 -07001140
1141 if (!tunables->use_sched_load)
1142 return count;
1143
1144 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001145 if (!per_cpu(polinfo, cpu))
1146 continue;
1147 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001148 if (t && t->use_sched_load)
1149 t->timer_rate = val_round;
1150 }
1151 set_window_helper(tunables);
1152
Mike Chanef969692010-06-22 11:26:45 -07001153 return count;
1154}
1155
Viresh Kumar17d15c42013-05-16 14:58:54 +05301156static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
1157 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -08001158{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301159 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -08001160}
1161
Viresh Kumar17d15c42013-05-16 14:58:54 +05301162static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
1163 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -08001164{
1165 int ret;
1166 unsigned long val;
1167
1168 ret = kstrtol(buf, 10, &val);
1169 if (ret < 0)
1170 return ret;
1171
Viresh Kumar17d15c42013-05-16 14:58:54 +05301172 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -08001173 return count;
1174}
1175
Viresh Kumar17d15c42013-05-16 14:58:54 +05301176static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001177 char *buf)
1178{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301179 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -07001180}
1181
Viresh Kumar17d15c42013-05-16 14:58:54 +05301182static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -07001183 const char *buf, size_t count)
1184{
1185 int ret;
1186 unsigned long val;
1187
1188 ret = kstrtoul(buf, 0, &val);
1189 if (ret < 0)
1190 return ret;
1191
Viresh Kumar17d15c42013-05-16 14:58:54 +05301192 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -07001193
Viresh Kumar17d15c42013-05-16 14:58:54 +05301194 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -07001195 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001196 if (!tunables->boosted)
1197 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001198 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -07001199 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -07001200 trace_cpufreq_interactive_unboost("off");
1201 }
Todd Poynor15a9ea02012-04-23 20:42:41 -07001202
1203 return count;
1204}
1205
Viresh Kumar17d15c42013-05-16 14:58:54 +05301206static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -07001207 const char *buf, size_t count)
1208{
1209 int ret;
1210 unsigned long val;
1211
1212 ret = kstrtoul(buf, 0, &val);
1213 if (ret < 0)
1214 return ret;
1215
Viresh Kumar17d15c42013-05-16 14:58:54 +05301216 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
1217 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -07001218 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -08001219 if (!tunables->boosted)
1220 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -07001221 return count;
1222}
1223
Viresh Kumar17d15c42013-05-16 14:58:54 +05301224static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
1225 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -08001226{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301227 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -08001228}
1229
Viresh Kumar17d15c42013-05-16 14:58:54 +05301230static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
1231 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -08001232{
1233 int ret;
1234 unsigned long val;
1235
1236 ret = kstrtoul(buf, 0, &val);
1237 if (ret < 0)
1238 return ret;
1239
Viresh Kumar17d15c42013-05-16 14:58:54 +05301240 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -08001241 return count;
1242}
1243
Viresh Kumar17d15c42013-05-16 14:58:54 +05301244static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1245 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +08001246{
Viresh Kumar17d15c42013-05-16 14:58:54 +05301247 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +08001248}
1249
Viresh Kumar17d15c42013-05-16 14:58:54 +05301250static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
1251 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +08001252{
1253 int ret;
1254 unsigned long val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001255 struct cpufreq_interactive_tunables *t;
1256 int cpu;
Lianwei Wang72e40572013-02-22 11:39:18 +08001257
1258 ret = kstrtoul(buf, 0, &val);
1259 if (ret < 0)
1260 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301261 tunables->io_is_busy = val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001262
1263 if (!tunables->use_sched_load)
1264 return count;
1265
1266 for_each_possible_cpu(cpu) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001267 if (!per_cpu(polinfo, cpu))
1268 continue;
1269 t = per_cpu(polinfo, cpu)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001270 if (t && t->use_sched_load)
1271 t->io_is_busy = val;
1272 }
1273 sched_set_io_is_busy(val);
1274
1275 return count;
1276}
1277
1278static int cpufreq_interactive_enable_sched_input(
1279 struct cpufreq_interactive_tunables *tunables)
1280{
1281 int rc = 0, j;
1282 struct cpufreq_interactive_tunables *t;
1283
1284 mutex_lock(&sched_lock);
1285
1286 set_window_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001287 if (set_window_count > 1) {
Junjie Wu4344ea32014-04-28 16:22:24 -07001288 for_each_possible_cpu(j) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001289 if (!per_cpu(polinfo, j))
1290 continue;
1291 t = per_cpu(polinfo, j)->cached_tunables;
Junjie Wu4344ea32014-04-28 16:22:24 -07001292 if (t && t->use_sched_load) {
1293 tunables->timer_rate = t->timer_rate;
1294 tunables->io_is_busy = t->io_is_busy;
1295 break;
1296 }
1297 }
Junjie Wue627d702014-12-15 16:51:08 -08001298 } else {
1299 rc = set_window_helper(tunables);
1300 if (rc) {
1301 pr_err("%s: Failed to set sched window\n", __func__);
1302 set_window_count--;
1303 goto out;
1304 }
1305 sched_set_io_is_busy(tunables->io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001306 }
1307
Junjie Wu4344ea32014-04-28 16:22:24 -07001308 if (!tunables->use_migration_notif)
1309 goto out;
1310
1311 migration_register_count++;
Junjie Wue627d702014-12-15 16:51:08 -08001312 if (migration_register_count > 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001313 goto out;
1314 else
1315 atomic_notifier_chain_register(&load_alert_notifier_head,
1316 &load_notifier_block);
1317out:
1318 mutex_unlock(&sched_lock);
1319 return rc;
1320}
1321
1322static int cpufreq_interactive_disable_sched_input(
1323 struct cpufreq_interactive_tunables *tunables)
1324{
1325 mutex_lock(&sched_lock);
1326
1327 if (tunables->use_migration_notif) {
1328 migration_register_count--;
Junjie Wue627d702014-12-15 16:51:08 -08001329 if (migration_register_count < 1)
Junjie Wu4344ea32014-04-28 16:22:24 -07001330 atomic_notifier_chain_unregister(
1331 &load_alert_notifier_head,
1332 &load_notifier_block);
1333 }
1334 set_window_count--;
1335
1336 mutex_unlock(&sched_lock);
1337 return 0;
1338}
1339
1340static ssize_t show_use_sched_load(
1341 struct cpufreq_interactive_tunables *tunables, char *buf)
1342{
1343 return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
1344}
1345
1346static ssize_t store_use_sched_load(
1347 struct cpufreq_interactive_tunables *tunables,
1348 const char *buf, size_t count)
1349{
1350 int ret;
1351 unsigned long val;
1352
1353 ret = kstrtoul(buf, 0, &val);
1354 if (ret < 0)
1355 return ret;
1356
1357 if (tunables->use_sched_load == (bool) val)
1358 return count;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301359
1360 tunables->use_sched_load = val;
1361
Junjie Wu4344ea32014-04-28 16:22:24 -07001362 if (val)
1363 ret = cpufreq_interactive_enable_sched_input(tunables);
1364 else
1365 ret = cpufreq_interactive_disable_sched_input(tunables);
1366
Hanumath Prasada9c07002015-06-30 15:19:39 +05301367 if (ret) {
1368 tunables->use_sched_load = !val;
Junjie Wu4344ea32014-04-28 16:22:24 -07001369 return ret;
Hanumath Prasada9c07002015-06-30 15:19:39 +05301370 }
Junjie Wu4344ea32014-04-28 16:22:24 -07001371
Junjie Wu4344ea32014-04-28 16:22:24 -07001372 return count;
1373}
1374
1375static ssize_t show_use_migration_notif(
1376 struct cpufreq_interactive_tunables *tunables, char *buf)
1377{
1378 return snprintf(buf, PAGE_SIZE, "%d\n",
1379 tunables->use_migration_notif);
1380}
1381
1382static ssize_t store_use_migration_notif(
1383 struct cpufreq_interactive_tunables *tunables,
1384 const char *buf, size_t count)
1385{
1386 int ret;
1387 unsigned long val;
1388
1389 ret = kstrtoul(buf, 0, &val);
1390 if (ret < 0)
1391 return ret;
1392
1393 if (tunables->use_migration_notif == (bool) val)
1394 return count;
1395 tunables->use_migration_notif = val;
1396
1397 if (!tunables->use_sched_load)
1398 return count;
1399
1400 mutex_lock(&sched_lock);
1401 if (val) {
1402 migration_register_count++;
1403 if (migration_register_count == 1)
1404 atomic_notifier_chain_register(
1405 &load_alert_notifier_head,
1406 &load_notifier_block);
1407 } else {
1408 migration_register_count--;
1409 if (!migration_register_count)
1410 atomic_notifier_chain_unregister(
1411 &load_alert_notifier_head,
1412 &load_notifier_block);
1413 }
1414 mutex_unlock(&sched_lock);
1415
Lianwei Wang72e40572013-02-22 11:39:18 +08001416 return count;
1417}
1418
Viresh Kumar17d15c42013-05-16 14:58:54 +05301419/*
1420 * Create show/store routines
1421 * - sys: One governor instance for complete SYSTEM
1422 * - pol: One governor instance per struct cpufreq_policy
1423 */
1424#define show_gov_pol_sys(file_name) \
1425static ssize_t show_##file_name##_gov_sys \
1426(struct kobject *kobj, struct attribute *attr, char *buf) \
1427{ \
1428 return show_##file_name(common_tunables, buf); \
1429} \
1430 \
1431static ssize_t show_##file_name##_gov_pol \
1432(struct cpufreq_policy *policy, char *buf) \
1433{ \
1434 return show_##file_name(policy->governor_data, buf); \
1435}
Lianwei Wang72e40572013-02-22 11:39:18 +08001436
Viresh Kumar17d15c42013-05-16 14:58:54 +05301437#define store_gov_pol_sys(file_name) \
1438static ssize_t store_##file_name##_gov_sys \
1439(struct kobject *kobj, struct attribute *attr, const char *buf, \
1440 size_t count) \
1441{ \
1442 return store_##file_name(common_tunables, buf, count); \
1443} \
1444 \
1445static ssize_t store_##file_name##_gov_pol \
1446(struct cpufreq_policy *policy, const char *buf, size_t count) \
1447{ \
1448 return store_##file_name(policy->governor_data, buf, count); \
1449}
1450
1451#define show_store_gov_pol_sys(file_name) \
1452show_gov_pol_sys(file_name); \
1453store_gov_pol_sys(file_name)
1454
1455show_store_gov_pol_sys(target_loads);
1456show_store_gov_pol_sys(above_hispeed_delay);
1457show_store_gov_pol_sys(hispeed_freq);
1458show_store_gov_pol_sys(go_hispeed_load);
1459show_store_gov_pol_sys(min_sample_time);
1460show_store_gov_pol_sys(timer_rate);
1461show_store_gov_pol_sys(timer_slack);
1462show_store_gov_pol_sys(boost);
1463store_gov_pol_sys(boostpulse);
1464show_store_gov_pol_sys(boostpulse_duration);
1465show_store_gov_pol_sys(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001466show_store_gov_pol_sys(use_sched_load);
1467show_store_gov_pol_sys(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001468show_store_gov_pol_sys(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001469show_store_gov_pol_sys(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001470show_store_gov_pol_sys(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001471show_store_gov_pol_sys(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001472show_store_gov_pol_sys(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301473
1474#define gov_sys_attr_rw(_name) \
1475static struct global_attr _name##_gov_sys = \
1476__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1477
1478#define gov_pol_attr_rw(_name) \
1479static struct freq_attr _name##_gov_pol = \
1480__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1481
1482#define gov_sys_pol_attr_rw(_name) \
1483 gov_sys_attr_rw(_name); \
1484 gov_pol_attr_rw(_name)
1485
1486gov_sys_pol_attr_rw(target_loads);
1487gov_sys_pol_attr_rw(above_hispeed_delay);
1488gov_sys_pol_attr_rw(hispeed_freq);
1489gov_sys_pol_attr_rw(go_hispeed_load);
1490gov_sys_pol_attr_rw(min_sample_time);
1491gov_sys_pol_attr_rw(timer_rate);
1492gov_sys_pol_attr_rw(timer_slack);
1493gov_sys_pol_attr_rw(boost);
1494gov_sys_pol_attr_rw(boostpulse_duration);
1495gov_sys_pol_attr_rw(io_is_busy);
Junjie Wu4344ea32014-04-28 16:22:24 -07001496gov_sys_pol_attr_rw(use_sched_load);
1497gov_sys_pol_attr_rw(use_migration_notif);
Junjie Wue05d74e2014-08-29 14:12:52 -07001498gov_sys_pol_attr_rw(max_freq_hysteresis);
Junjie Wu7ca999f2014-08-29 18:55:45 -07001499gov_sys_pol_attr_rw(align_windows);
Junjie Wu3381c4c2015-08-19 15:45:37 -07001500gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
Junjie Wu450c8572015-07-22 17:38:49 -07001501gov_sys_pol_attr_rw(fast_ramp_down);
Junjie Wu7c128602015-06-09 17:36:11 -07001502gov_sys_pol_attr_rw(enable_prediction);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301503
1504static struct global_attr boostpulse_gov_sys =
1505 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1506
1507static struct freq_attr boostpulse_gov_pol =
1508 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1509
1510/* One Governor instance for entire system */
1511static struct attribute *interactive_attributes_gov_sys[] = {
1512 &target_loads_gov_sys.attr,
1513 &above_hispeed_delay_gov_sys.attr,
1514 &hispeed_freq_gov_sys.attr,
1515 &go_hispeed_load_gov_sys.attr,
1516 &min_sample_time_gov_sys.attr,
1517 &timer_rate_gov_sys.attr,
1518 &timer_slack_gov_sys.attr,
1519 &boost_gov_sys.attr,
1520 &boostpulse_gov_sys.attr,
1521 &boostpulse_duration_gov_sys.attr,
1522 &io_is_busy_gov_sys.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001523 &use_sched_load_gov_sys.attr,
1524 &use_migration_notif_gov_sys.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001525 &max_freq_hysteresis_gov_sys.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001526 &align_windows_gov_sys.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001527 &ignore_hispeed_on_notif_gov_sys.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001528 &fast_ramp_down_gov_sys.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001529 &enable_prediction_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001530 NULL,
1531};
1532
Viresh Kumar17d15c42013-05-16 14:58:54 +05301533static struct attribute_group interactive_attr_group_gov_sys = {
1534 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001535 .name = "interactive",
1536};
1537
Viresh Kumar17d15c42013-05-16 14:58:54 +05301538/* Per policy governor instance */
1539static struct attribute *interactive_attributes_gov_pol[] = {
1540 &target_loads_gov_pol.attr,
1541 &above_hispeed_delay_gov_pol.attr,
1542 &hispeed_freq_gov_pol.attr,
1543 &go_hispeed_load_gov_pol.attr,
1544 &min_sample_time_gov_pol.attr,
1545 &timer_rate_gov_pol.attr,
1546 &timer_slack_gov_pol.attr,
1547 &boost_gov_pol.attr,
1548 &boostpulse_gov_pol.attr,
1549 &boostpulse_duration_gov_pol.attr,
1550 &io_is_busy_gov_pol.attr,
Junjie Wu4344ea32014-04-28 16:22:24 -07001551 &use_sched_load_gov_pol.attr,
1552 &use_migration_notif_gov_pol.attr,
Junjie Wue05d74e2014-08-29 14:12:52 -07001553 &max_freq_hysteresis_gov_pol.attr,
Junjie Wu7ca999f2014-08-29 18:55:45 -07001554 &align_windows_gov_pol.attr,
Junjie Wu3381c4c2015-08-19 15:45:37 -07001555 &ignore_hispeed_on_notif_gov_pol.attr,
Junjie Wu450c8572015-07-22 17:38:49 -07001556 &fast_ramp_down_gov_pol.attr,
Junjie Wu7c128602015-06-09 17:36:11 -07001557 &enable_prediction_gov_pol.attr,
Viresh Kumar17d15c42013-05-16 14:58:54 +05301558 NULL,
1559};
1560
1561static struct attribute_group interactive_attr_group_gov_pol = {
1562 .attrs = interactive_attributes_gov_pol,
1563 .name = "interactive",
1564};
1565
1566static struct attribute_group *get_sysfs_attr(void)
1567{
1568 if (have_governor_per_policy())
1569 return &interactive_attr_group_gov_pol;
1570 else
1571 return &interactive_attr_group_gov_sys;
1572}
1573
Junjie Wucf531ef2015-04-17 12:48:36 -07001574static void cpufreq_interactive_nop_timer(unsigned long data)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001575{
Junjie Wu53f83f82014-08-18 16:35:09 -07001576}
1577
Junjie Wuc5a97d92014-05-23 12:22:59 -07001578static struct cpufreq_interactive_tunables *alloc_tunable(
1579 struct cpufreq_policy *policy)
1580{
1581 struct cpufreq_interactive_tunables *tunables;
1582
1583 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1584 if (!tunables)
1585 return ERR_PTR(-ENOMEM);
1586
1587 tunables->above_hispeed_delay = default_above_hispeed_delay;
1588 tunables->nabove_hispeed_delay =
1589 ARRAY_SIZE(default_above_hispeed_delay);
1590 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1591 tunables->target_loads = default_target_loads;
1592 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1593 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1594 tunables->timer_rate = DEFAULT_TIMER_RATE;
1595 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1596 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1597
1598 spin_lock_init(&tunables->target_loads_lock);
1599 spin_lock_init(&tunables->above_hispeed_delay_lock);
1600
1601 return tunables;
1602}
1603
Junjie Wucf531ef2015-04-17 12:48:36 -07001604static struct cpufreq_interactive_policyinfo *get_policyinfo(
1605 struct cpufreq_policy *policy)
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001606{
Junjie Wucf531ef2015-04-17 12:48:36 -07001607 struct cpufreq_interactive_policyinfo *ppol =
1608 per_cpu(polinfo, policy->cpu);
1609 int i;
Joonwoo Park22d94972015-09-15 09:35:53 -07001610 struct sched_load *sl;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001611
Junjie Wucf531ef2015-04-17 12:48:36 -07001612 /* polinfo already allocated for policy, return */
1613 if (ppol)
1614 return ppol;
1615
1616 ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
1617 if (!ppol)
1618 return ERR_PTR(-ENOMEM);
1619
Joonwoo Park22d94972015-09-15 09:35:53 -07001620 sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
1621 GFP_KERNEL);
1622 if (!sl) {
Junjie Wufef75c02015-05-26 17:54:38 -07001623 kfree(ppol);
1624 return ERR_PTR(-ENOMEM);
1625 }
Joonwoo Park22d94972015-09-15 09:35:53 -07001626 ppol->sl = sl;
Junjie Wufef75c02015-05-26 17:54:38 -07001627
Junjie Wucf531ef2015-04-17 12:48:36 -07001628 init_timer_deferrable(&ppol->policy_timer);
1629 ppol->policy_timer.function = cpufreq_interactive_timer;
1630 init_timer(&ppol->policy_slack_timer);
1631 ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
Junjie Wuaceecc062015-09-18 18:13:01 -07001632 hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1633 ppol->notif_timer.function = cpufreq_interactive_hrtimer;
Junjie Wucf531ef2015-04-17 12:48:36 -07001634 spin_lock_init(&ppol->load_lock);
1635 spin_lock_init(&ppol->target_freq_lock);
1636 init_rwsem(&ppol->enable_sem);
1637
1638 for_each_cpu(i, policy->related_cpus)
1639 per_cpu(polinfo, i) = ppol;
1640 return ppol;
1641}
1642
1643/* This function is not multithread-safe. */
1644static void free_policyinfo(int cpu)
1645{
1646 struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
1647 int j;
1648
1649 if (!ppol)
1650 return;
1651
1652 for_each_possible_cpu(j)
1653 if (per_cpu(polinfo, j) == ppol)
1654 per_cpu(polinfo, cpu) = NULL;
1655 kfree(ppol->cached_tunables);
Joonwoo Park22d94972015-09-15 09:35:53 -07001656 kfree(ppol->sl);
Junjie Wucf531ef2015-04-17 12:48:36 -07001657 kfree(ppol);
1658}
1659
1660static struct cpufreq_interactive_tunables *get_tunables(
1661 struct cpufreq_interactive_policyinfo *ppol)
1662{
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001663 if (have_governor_per_policy())
Junjie Wucf531ef2015-04-17 12:48:36 -07001664 return ppol->cached_tunables;
Saravana Kannan07c2aa62014-07-22 15:42:51 -07001665 else
Junjie Wucf531ef2015-04-17 12:48:36 -07001666 return cached_common_tunables;
Junjie Wuc5a97d92014-05-23 12:22:59 -07001667}
1668
Mike Chanef969692010-06-22 11:26:45 -07001669static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1670 unsigned int event)
1671{
1672 int rc;
Junjie Wucf531ef2015-04-17 12:48:36 -07001673 struct cpufreq_interactive_policyinfo *ppol;
Mike Chanef969692010-06-22 11:26:45 -07001674 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301675 struct cpufreq_interactive_tunables *tunables;
1676
1677 if (have_governor_per_policy())
1678 tunables = policy->governor_data;
1679 else
1680 tunables = common_tunables;
1681
Junjie Wu98341c02014-08-05 11:24:32 -07001682 BUG_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001683
1684 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301685 case CPUFREQ_GOV_POLICY_INIT:
Junjie Wucf531ef2015-04-17 12:48:36 -07001686 ppol = get_policyinfo(policy);
1687 if (IS_ERR(ppol))
1688 return PTR_ERR(ppol);
1689
Viresh Kumar17d15c42013-05-16 14:58:54 +05301690 if (have_governor_per_policy()) {
1691 WARN_ON(tunables);
1692 } else if (tunables) {
1693 tunables->usage_count++;
Junjie Wud6f5e522015-07-29 18:22:21 -07001694 cpumask_or(&controlled_cpus, &controlled_cpus,
1695 policy->related_cpus);
1696 sched_update_freq_max_load(policy->related_cpus);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301697 policy->governor_data = tunables;
1698 return 0;
1699 }
1700
Junjie Wucf531ef2015-04-17 12:48:36 -07001701 tunables = get_tunables(ppol);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301702 if (!tunables) {
Junjie Wuc5a97d92014-05-23 12:22:59 -07001703 tunables = alloc_tunable(policy);
1704 if (IS_ERR(tunables))
1705 return PTR_ERR(tunables);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301706 }
1707
Viresh Kumar17d15c42013-05-16 14:58:54 +05301708 tunables->usage_count = 1;
Minsung Kim82cc6a92014-01-19 14:32:42 +09001709 policy->governor_data = tunables;
Junjie Wuab1db0a2015-11-18 14:49:29 -08001710 if (!have_governor_per_policy())
Minsung Kim82cc6a92014-01-19 14:32:42 +09001711 common_tunables = tunables;
1712
1713 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1714 get_sysfs_attr());
1715 if (rc) {
1716 kfree(tunables);
1717 policy->governor_data = NULL;
Junjie Wuab1db0a2015-11-18 14:49:29 -08001718 if (!have_governor_per_policy())
Minsung Kim82cc6a92014-01-19 14:32:42 +09001719 common_tunables = NULL;
1720 return rc;
1721 }
1722
Junjie Wucf531ef2015-04-17 12:48:36 -07001723 if (!policy->governor->initialized)
Viresh Kumar17d15c42013-05-16 14:58:54 +05301724 cpufreq_register_notifier(&cpufreq_notifier_block,
1725 CPUFREQ_TRANSITION_NOTIFIER);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301726
Junjie Wu4344ea32014-04-28 16:22:24 -07001727 if (tunables->use_sched_load)
1728 cpufreq_interactive_enable_sched_input(tunables);
1729
Junjie Wud6f5e522015-07-29 18:22:21 -07001730 cpumask_or(&controlled_cpus, &controlled_cpus,
1731 policy->related_cpus);
1732 sched_update_freq_max_load(policy->related_cpus);
1733
Junjie Wucf531ef2015-04-17 12:48:36 -07001734 if (have_governor_per_policy())
1735 ppol->cached_tunables = tunables;
1736 else
1737 cached_common_tunables = tunables;
1738
Viresh Kumar17d15c42013-05-16 14:58:54 +05301739 break;
1740
1741 case CPUFREQ_GOV_POLICY_EXIT:
Junjie Wud6f5e522015-07-29 18:22:21 -07001742 cpumask_andnot(&controlled_cpus, &controlled_cpus,
1743 policy->related_cpus);
1744 sched_update_freq_max_load(cpu_possible_mask);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301745 if (!--tunables->usage_count) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001746 if (policy->governor->initialized == 1)
Viresh Kumar17d15c42013-05-16 14:58:54 +05301747 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1748 CPUFREQ_TRANSITION_NOTIFIER);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301749
1750 sysfs_remove_group(get_governor_parent_kobj(policy),
1751 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001752
Viresh Kumar17d15c42013-05-16 14:58:54 +05301753 common_tunables = NULL;
1754 }
1755
1756 policy->governor_data = NULL;
Junjie Wu4344ea32014-04-28 16:22:24 -07001757
1758 if (tunables->use_sched_load)
1759 cpufreq_interactive_disable_sched_input(tunables);
1760
Viresh Kumar17d15c42013-05-16 14:58:54 +05301761 break;
1762
Mike Chanef969692010-06-22 11:26:45 -07001763 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001764 mutex_lock(&gov_lock);
1765
Viresh Kumar17d15c42013-05-16 14:58:54 +05301766 freq_table = cpufreq_frequency_get_table(policy->cpu);
1767 if (!tunables->hispeed_freq)
1768 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001769
Junjie Wucf531ef2015-04-17 12:48:36 -07001770 ppol = per_cpu(polinfo, policy->cpu);
1771 ppol->policy = policy;
1772 ppol->target_freq = policy->cur;
1773 ppol->freq_table = freq_table;
Saravana Kannan433ed992014-08-14 18:29:45 -07001774 ppol->p_nolim = *policy;
1775 ppol->p_nolim.min = policy->cpuinfo.min_freq;
1776 ppol->p_nolim.max = policy->cpuinfo.max_freq;
Junjie Wucf531ef2015-04-17 12:48:36 -07001777 ppol->floor_freq = ppol->target_freq;
1778 ppol->floor_validate_time = ktime_to_us(ktime_get());
1779 ppol->hispeed_validate_time = ppol->floor_validate_time;
1780 ppol->min_freq = policy->min;
1781 ppol->reject_notification = true;
Junjie Wu506bfb02015-09-23 12:00:33 -07001782 ppol->notif_pending = false;
Junjie Wucf531ef2015-04-17 12:48:36 -07001783 down_write(&ppol->enable_sem);
1784 del_timer_sync(&ppol->policy_timer);
1785 del_timer_sync(&ppol->policy_slack_timer);
1786 ppol->policy_timer.data = policy->cpu;
1787 ppol->last_evaluated_jiffy = get_jiffies_64();
1788 cpufreq_interactive_timer_start(tunables, policy->cpu);
1789 ppol->governor_enabled = 1;
1790 up_write(&ppol->enable_sem);
1791 ppol->reject_notification = false;
Mike Chanef969692010-06-22 11:26:45 -07001792
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001793 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001794 break;
1795
1796 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001797 mutex_lock(&gov_lock);
Junjie Wucf531ef2015-04-17 12:48:36 -07001798
1799 ppol = per_cpu(polinfo, policy->cpu);
1800 ppol->reject_notification = true;
1801 down_write(&ppol->enable_sem);
1802 ppol->governor_enabled = 0;
1803 ppol->target_freq = 0;
1804 del_timer_sync(&ppol->policy_timer);
1805 del_timer_sync(&ppol->policy_slack_timer);
1806 up_write(&ppol->enable_sem);
1807 ppol->reject_notification = false;
Mike Chanef969692010-06-22 11:26:45 -07001808
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001809 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001810 break;
1811
1812 case CPUFREQ_GOV_LIMITS:
Junjie Wucf531ef2015-04-17 12:48:36 -07001813 ppol = per_cpu(polinfo, policy->cpu);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001814
Saravana Kannan433ed992014-08-14 18:29:45 -07001815 __cpufreq_driver_target(policy,
1816 ppol->target_freq, CPUFREQ_RELATION_L);
1817
Junjie Wucf531ef2015-04-17 12:48:36 -07001818 down_read(&ppol->enable_sem);
1819 if (ppol->governor_enabled) {
Junjie Wucf531ef2015-04-17 12:48:36 -07001820 if (policy->min < ppol->min_freq)
1821 cpufreq_interactive_timer_resched(policy->cpu,
1822 true);
1823 ppol->min_freq = policy->min;
Lianwei Wang90c6c152013-04-26 13:30:51 +08001824 }
Junjie Wucf531ef2015-04-17 12:48:36 -07001825 up_read(&ppol->enable_sem);
1826
Mike Chanef969692010-06-22 11:26:45 -07001827 break;
1828 }
1829 return 0;
1830}
1831
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301832#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1833static
1834#endif
1835struct cpufreq_governor cpufreq_gov_interactive = {
1836 .name = "interactive",
1837 .governor = cpufreq_governor_interactive,
1838 .max_transition_latency = 10000000,
1839 .owner = THIS_MODULE,
1840};
1841
Mike Chanef969692010-06-22 11:26:45 -07001842static int __init cpufreq_interactive_init(void)
1843{
Mike Chanef969692010-06-22 11:26:45 -07001844 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1845
Todd Poynor0f1920b2012-07-16 17:07:15 -07001846 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001847 mutex_init(&gov_lock);
Junjie Wu4344ea32014-04-28 16:22:24 -07001848 mutex_init(&sched_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001849 speedchange_task =
1850 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1851 "cfinteractive");
1852 if (IS_ERR(speedchange_task))
1853 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001854
Todd Poynor0f1920b2012-07-16 17:07:15 -07001855 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1856 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001857
Sam Leffler5c9b8272012-06-27 12:55:56 -07001858 /* NB: wake up so the thread does not look hung to the freezer */
Junjie Wu94137b22016-01-05 11:09:41 -08001859 wake_up_process_no_notif(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001860
Mike Chanef969692010-06-22 11:26:45 -07001861 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001862}
1863
1864#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1865fs_initcall(cpufreq_interactive_init);
1866#else
1867module_init(cpufreq_interactive_init);
1868#endif
1869
1870static void __exit cpufreq_interactive_exit(void)
1871{
Junjie Wuc5a97d92014-05-23 12:22:59 -07001872 int cpu;
1873
Mike Chanef969692010-06-22 11:26:45 -07001874 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001875 kthread_stop(speedchange_task);
1876 put_task_struct(speedchange_task);
Junjie Wuc5a97d92014-05-23 12:22:59 -07001877
Junjie Wucf531ef2015-04-17 12:48:36 -07001878 for_each_possible_cpu(cpu)
1879 free_policyinfo(cpu);
Mike Chanef969692010-06-22 11:26:45 -07001880}
1881
1882module_exit(cpufreq_interactive_exit);
1883
1884MODULE_AUTHOR("Mike Chan <mike@android.com>");
1885MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1886 "Latency sensitive workloads");
1887MODULE_LICENSE("GPL");