blob: 9259d48297654c1c574f39828784c2dce3a1d47c [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070045 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -070047 spinlock_t target_freq_lock; /*protects target freq */
Mike Chanef969692010-06-22 11:26:45 -070048 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070049 unsigned int floor_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -070050 u64 pol_floor_val_time; /* policy floor_validate_time */
51 u64 loc_floor_val_time; /* per-cpu floor_validate_time */
Saravana Kannanfbae2f22014-10-15 12:44:18 -070052 u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
53 u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
Todd Poynor5cad6092012-12-18 17:50:44 -080054 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070055 int governor_enabled;
56};
57
58static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59
Todd Poynor0f1920b2012-07-16 17:07:15 -070060/* realtime thread handles frequency scaling */
61static struct task_struct *speedchange_task;
62static cpumask_t speedchange_cpumask;
63static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080064static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070065
Todd Poynor8d2d93f2012-11-28 17:58:17 -080066/* Target load. Lower values result in higher CPU speeds. */
67#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080068static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080069
Todd Poynora380aa82012-04-17 17:39:34 -070070#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070071#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090072static unsigned int default_above_hispeed_delay[] = {
73 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070074
Viresh Kumar17d15c42013-05-16 14:58:54 +053075struct cpufreq_interactive_tunables {
76 int usage_count;
77 /* Hi speed to bump to from lo speed when load burst (default max) */
78 unsigned int hispeed_freq;
79 /* Go to hi speed when CPU load at or above this value. */
80#define DEFAULT_GO_HISPEED_LOAD 99
81 unsigned long go_hispeed_load;
82 /* Target load. Lower values result in higher CPU speeds. */
83 spinlock_t target_loads_lock;
84 unsigned int *target_loads;
85 int ntarget_loads;
86 /*
87 * The minimum amount of time to spend at a frequency before we can ramp
88 * down.
89 */
90#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91 unsigned long min_sample_time;
92 /*
93 * The sample rate of the timer used to increase frequency
94 */
95 unsigned long timer_rate;
96 /*
97 * Wait this long before raising speed above hispeed, by default a
98 * single timer interval.
99 */
100 spinlock_t above_hispeed_delay_lock;
101 unsigned int *above_hispeed_delay;
102 int nabove_hispeed_delay;
103 /* Non-zero means indefinite speed boost active */
104 int boost_val;
105 /* Duration of a boot pulse in usecs */
106 int boostpulse_duration_val;
107 /* End time of boost pulse in ktime converted to usecs */
108 u64 boostpulse_endtime;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800109 bool boosted;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530110 /*
111 * Max additional time to wait in idle, beyond timer_rate, at speeds
112 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
113 */
Todd Poynor4add2592012-12-18 17:50:10 -0800114#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530115 int timer_slack_val;
116 bool io_is_busy;
117};
Lianwei Wangd72db422012-11-01 09:59:52 +0800118
Amit Pundir94c7a812015-11-20 18:54:30 +0530119/*
120 * HACK: FIXME: Bring back cpufreq_{get,put}_global_kobject()
121 * definition removed by upstream commit 8eec1020f0c0 "cpufreq:
122 * create cpu/cpufreq at boot time" to fix build failures.
123 */
124static int cpufreq_global_kobject_usage;
125
126int cpufreq_get_global_kobject(void)
127{
128 if (!cpufreq_global_kobject_usage++)
129 return kobject_add(cpufreq_global_kobject,
130 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
131
132 return 0;
133}
134
135void cpufreq_put_global_kobject(void)
136{
137 if (!--cpufreq_global_kobject_usage)
138 kobject_del(cpufreq_global_kobject);
139}
140
Viresh Kumar17d15c42013-05-16 14:58:54 +0530141/* For cases where we have single governor instance for system */
Cylen Yaoc0b6ed62014-09-05 18:27:38 -0700142static struct cpufreq_interactive_tunables *common_tunables;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530143
144static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800145
Todd Poynor8eccd412012-10-08 20:14:34 -0700146static void cpufreq_interactive_timer_resched(
147 struct cpufreq_interactive_cpuinfo *pcpu)
148{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530149 struct cpufreq_interactive_tunables *tunables =
150 pcpu->policy->governor_data;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700151 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800152 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800153
Todd Poynordf673d12013-01-02 13:14:00 -0800154 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700155 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800156 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530157 &pcpu->time_in_idle_timestamp,
158 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800159 pcpu->cputime_speedadj = 0;
160 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530161 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700162 mod_timer_pinned(&pcpu->cpu_timer, expires);
163
Viresh Kumar17d15c42013-05-16 14:58:54 +0530164 if (tunables->timer_slack_val >= 0 &&
165 pcpu->target_freq > pcpu->policy->min) {
166 expires += usecs_to_jiffies(tunables->timer_slack_val);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700167 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
168 }
169
Todd Poynordf673d12013-01-02 13:14:00 -0800170 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700171}
172
Lianwei Wang90c6c152013-04-26 13:30:51 +0800173/* The caller shall take enable_sem write semaphore to avoid any timer race.
174 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
175 * function.
176 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530177static void cpufreq_interactive_timer_start(
178 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800179{
180 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530181 unsigned long expires = jiffies +
182 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800183 unsigned long flags;
184
185 pcpu->cpu_timer.expires = expires;
186 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530187 if (tunables->timer_slack_val >= 0 &&
188 pcpu->target_freq > pcpu->policy->min) {
189 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800190 pcpu->cpu_slack_timer.expires = expires;
191 add_timer_on(&pcpu->cpu_slack_timer, cpu);
192 }
193
194 spin_lock_irqsave(&pcpu->load_lock, flags);
195 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530196 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
197 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800198 pcpu->cputime_speedadj = 0;
199 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
200 spin_unlock_irqrestore(&pcpu->load_lock, flags);
201}
202
Viresh Kumar17d15c42013-05-16 14:58:54 +0530203static unsigned int freq_to_above_hispeed_delay(
204 struct cpufreq_interactive_tunables *tunables,
205 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900206{
207 int i;
208 unsigned int ret;
209 unsigned long flags;
210
Viresh Kumar17d15c42013-05-16 14:58:54 +0530211 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900212
Viresh Kumar17d15c42013-05-16 14:58:54 +0530213 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
214 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900215 ;
216
Viresh Kumar17d15c42013-05-16 14:58:54 +0530217 ret = tunables->above_hispeed_delay[i];
218 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900219 return ret;
220}
221
Viresh Kumar17d15c42013-05-16 14:58:54 +0530222static unsigned int freq_to_targetload(
223 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800224{
225 int i;
226 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800227 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800228
Viresh Kumar17d15c42013-05-16 14:58:54 +0530229 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800230
Viresh Kumar17d15c42013-05-16 14:58:54 +0530231 for (i = 0; i < tunables->ntarget_loads - 1 &&
232 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800233 ;
234
Viresh Kumar17d15c42013-05-16 14:58:54 +0530235 ret = tunables->target_loads[i];
236 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800237 return ret;
238}
239
240/*
241 * If increasing frequencies never map to a lower target load then
242 * choose_freq() will find the minimum frequency that does not exceed its
243 * target load given the current load.
244 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530245static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
246 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800247{
248 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800249 unsigned int prevfreq, freqmin, freqmax;
250 unsigned int tl;
251 int index;
252
253 freqmin = 0;
254 freqmax = UINT_MAX;
255
256 do {
257 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530258 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800259
260 /*
261 * Find the lowest frequency where the computed load is less
262 * than or equal to the target load.
263 */
264
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700265 if (cpufreq_frequency_table_target(
266 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
267 CPUFREQ_RELATION_L, &index))
268 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800269 freq = pcpu->freq_table[index].frequency;
270
271 if (freq > prevfreq) {
272 /* The previous frequency is too low. */
273 freqmin = prevfreq;
274
275 if (freq >= freqmax) {
276 /*
277 * Find the highest frequency that is less
278 * than freqmax.
279 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700280 if (cpufreq_frequency_table_target(
281 pcpu->policy, pcpu->freq_table,
282 freqmax - 1, CPUFREQ_RELATION_H,
283 &index))
284 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800285 freq = pcpu->freq_table[index].frequency;
286
287 if (freq == freqmin) {
288 /*
289 * The first frequency below freqmax
290 * has already been found to be too
291 * low. freqmax is the lowest speed
292 * we found that is fast enough.
293 */
294 freq = freqmax;
295 break;
296 }
297 }
298 } else if (freq < prevfreq) {
299 /* The previous frequency is high enough. */
300 freqmax = prevfreq;
301
302 if (freq <= freqmin) {
303 /*
304 * Find the lowest frequency that is higher
305 * than freqmin.
306 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700307 if (cpufreq_frequency_table_target(
308 pcpu->policy, pcpu->freq_table,
309 freqmin + 1, CPUFREQ_RELATION_L,
310 &index))
311 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800312 freq = pcpu->freq_table[index].frequency;
313
314 /*
315 * If freqmax is the first frequency above
316 * freqmin then we have already found that
317 * this speed is fast enough.
318 */
319 if (freq == freqmax)
320 break;
321 }
322 }
323
324 /* If same frequency chosen as previous then done. */
325 } while (freq != prevfreq);
326
327 return freq;
328}
329
Todd Poynor0e58da22012-12-11 16:05:03 -0800330static u64 update_load(int cpu)
331{
332 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530333 struct cpufreq_interactive_tunables *tunables =
334 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800335 u64 now;
336 u64 now_idle;
337 unsigned int delta_idle;
338 unsigned int delta_time;
339 u64 active_time;
340
Viresh Kumar17d15c42013-05-16 14:58:54 +0530341 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800342 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
343 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900344
345 if (delta_time <= delta_idle)
346 active_time = 0;
347 else
348 active_time = delta_time - delta_idle;
349
Todd Poynor0e58da22012-12-11 16:05:03 -0800350 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
351
352 pcpu->time_in_idle = now_idle;
353 pcpu->time_in_idle_timestamp = now;
354 return now;
355}
356
Mike Chanef969692010-06-22 11:26:45 -0700357static void cpufreq_interactive_timer(unsigned long data)
358{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800359 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700360 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800361 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700362 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700363 struct cpufreq_interactive_cpuinfo *pcpu =
364 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530365 struct cpufreq_interactive_tunables *tunables =
366 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700367 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800368 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700369 unsigned int index;
370 unsigned long flags;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700371 u64 max_fvtime;
Mike Chanef969692010-06-22 11:26:45 -0700372
Todd Poynor5cad6092012-12-18 17:50:44 -0800373 if (!down_read_trylock(&pcpu->enable_sem))
374 return;
Mike Chanef969692010-06-22 11:26:45 -0700375 if (!pcpu->governor_enabled)
376 goto exit;
377
Todd Poynordf673d12013-01-02 13:14:00 -0800378 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800379 now = update_load(data);
380 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
381 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800382 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700383
Todd Poynor0e58da22012-12-11 16:05:03 -0800384 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700385 goto rearm;
386
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700387 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800388 do_div(cputime_speedadj, delta_time);
389 loadadjfreq = (unsigned int)cputime_speedadj * 100;
390 cpu_load = loadadjfreq / pcpu->target_freq;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800391 tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700392
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800393 if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700394 if (pcpu->policy->cur < tunables->hispeed_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530395 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800396 } else {
397 new_freq = choose_freq(pcpu, loadadjfreq);
398
Viresh Kumar17d15c42013-05-16 14:58:54 +0530399 if (new_freq < tunables->hispeed_freq)
400 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800401 }
402 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800403 new_freq = choose_freq(pcpu, loadadjfreq);
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700404 if (new_freq > tunables->hispeed_freq &&
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700405 pcpu->policy->cur < tunables->hispeed_freq)
Ruchi Kandoi9df0ca92014-06-13 16:24:15 -0700406 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800407 }
Todd Poynor131ff022012-11-08 15:06:55 -0800408
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700409 if (pcpu->policy->cur >= tunables->hispeed_freq &&
410 new_freq > pcpu->policy->cur &&
411 now - pcpu->pol_hispeed_val_time <
412 freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800413 trace_cpufreq_interactive_notyet(
414 data, cpu_load, pcpu->target_freq,
415 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700416 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor131ff022012-11-08 15:06:55 -0800417 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700418 }
419
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700420 pcpu->loc_hispeed_val_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700421
Mike Chanef969692010-06-22 11:26:45 -0700422 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800423 new_freq, CPUFREQ_RELATION_L,
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700424 &index)) {
425 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700426 goto rearm;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700427 }
Mike Chanef969692010-06-22 11:26:45 -0700428
429 new_freq = pcpu->freq_table[index].frequency;
430
Mike Chanef969692010-06-22 11:26:45 -0700431 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700432 * Do not scale below floor_freq unless we have been at or above the
433 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700434 */
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700435 max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
436 if (new_freq < pcpu->floor_freq &&
437 pcpu->target_freq >= pcpu->policy->cur) {
438 if (now - max_fvtime < tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800439 trace_cpufreq_interactive_notyet(
440 data, cpu_load, pcpu->target_freq,
441 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700442 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700443 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800444 }
Mike Chanef969692010-06-22 11:26:45 -0700445 }
446
Todd Poynore16d5922012-12-14 17:31:19 -0800447 /*
448 * Update the timestamp for checking whether speed has been held at
449 * or above the selected frequency for a minimum of min_sample_time,
450 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
451 * allow the speed to drop as soon as the boostpulse duration expires
452 * (or the indefinite boost is turned off).
453 */
454
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800455 if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800456 pcpu->floor_freq = new_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700457 if (pcpu->target_freq >= pcpu->policy->cur ||
458 new_freq >= pcpu->policy->cur)
459 pcpu->loc_floor_val_time = now;
Todd Poynore16d5922012-12-14 17:31:19 -0800460 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700461
Minsung Kim9f2841b2014-11-29 21:43:53 +0900462 if (pcpu->target_freq == new_freq &&
463 pcpu->target_freq <= pcpu->policy->cur) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800464 trace_cpufreq_interactive_already(
465 data, cpu_load, pcpu->target_freq,
466 pcpu->policy->cur, new_freq);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700467 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Rohit Gupta189c2222015-03-06 18:46:04 -0800468 goto rearm;
Todd Poynor1f408dc2012-04-06 19:59:36 -0700469 }
470
Todd Poynorae010472012-02-16 16:27:59 -0800471 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800472 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800473
Todd Poynor0f1920b2012-07-16 17:07:15 -0700474 pcpu->target_freq = new_freq;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700475 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700476 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
477 cpumask_set_cpu(data, &speedchange_cpumask);
478 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
479 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700480
Mike Chanef969692010-06-22 11:26:45 -0700481rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800482 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700483 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700484
485exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800486 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700487 return;
488}
489
Mike Chanef969692010-06-22 11:26:45 -0700490static void cpufreq_interactive_idle_end(void)
491{
492 struct cpufreq_interactive_cpuinfo *pcpu =
493 &per_cpu(cpuinfo, smp_processor_id());
494
Todd Poynor5cad6092012-12-18 17:50:44 -0800495 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700496 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800497 if (!pcpu->governor_enabled) {
498 up_read(&pcpu->enable_sem);
499 return;
500 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700501
Todd Poynore7afb7e2012-11-05 13:09:03 -0800502 /* Arm the timer for 1-2 ticks later if not already. */
503 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700504 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800505 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700506 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800507 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700508 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700509 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800510
511 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700512}
513
Todd Poynor0f1920b2012-07-16 17:07:15 -0700514static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700515{
516 unsigned int cpu;
517 cpumask_t tmp_mask;
518 unsigned long flags;
519 struct cpufreq_interactive_cpuinfo *pcpu;
520
521 while (1) {
522 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700523 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700524
Todd Poynor0f1920b2012-07-16 17:07:15 -0700525 if (cpumask_empty(&speedchange_cpumask)) {
526 spin_unlock_irqrestore(&speedchange_cpumask_lock,
527 flags);
Mike Chanef969692010-06-22 11:26:45 -0700528 schedule();
529
530 if (kthread_should_stop())
531 break;
532
Todd Poynor0f1920b2012-07-16 17:07:15 -0700533 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700534 }
535
536 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700537 tmp_mask = speedchange_cpumask;
538 cpumask_clear(&speedchange_cpumask);
539 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700540
541 for_each_cpu(cpu, &tmp_mask) {
542 unsigned int j;
543 unsigned int max_freq = 0;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700544 struct cpufreq_interactive_cpuinfo *pjcpu;
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700545 u64 hvt = ~0ULL, fvt = 0;
Mike Chanef969692010-06-22 11:26:45 -0700546
547 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800548 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700549 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800550 if (!pcpu->governor_enabled) {
551 up_read(&pcpu->enable_sem);
552 continue;
553 }
Mike Chanef969692010-06-22 11:26:45 -0700554
Mike Chanef969692010-06-22 11:26:45 -0700555 for_each_cpu(j, pcpu->policy->cpus) {
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700556 pjcpu = &per_cpu(cpuinfo, j);
Mike Chanef969692010-06-22 11:26:45 -0700557
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700558 fvt = max(fvt, pjcpu->loc_floor_val_time);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700559 if (pjcpu->target_freq > max_freq) {
Mike Chanef969692010-06-22 11:26:45 -0700560 max_freq = pjcpu->target_freq;
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700561 hvt = pjcpu->loc_hispeed_val_time;
562 } else if (pjcpu->target_freq == max_freq) {
563 hvt = min(hvt, pjcpu->loc_hispeed_val_time);
564 }
Mike Chanef969692010-06-22 11:26:45 -0700565 }
Junjie Wud5ac8ee2015-03-24 15:51:10 -0700566 for_each_cpu(j, pcpu->policy->cpus) {
567 pjcpu = &per_cpu(cpuinfo, j);
568 pjcpu->pol_floor_val_time = fvt;
569 }
Mike Chanef969692010-06-22 11:26:45 -0700570
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700571 if (max_freq != pcpu->policy->cur) {
Mike Chanef969692010-06-22 11:26:45 -0700572 __cpufreq_driver_target(pcpu->policy,
573 max_freq,
574 CPUFREQ_RELATION_H);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700575 for_each_cpu(j, pcpu->policy->cpus) {
576 pjcpu = &per_cpu(cpuinfo, j);
577 pjcpu->pol_hispeed_val_time = hvt;
578 }
579 }
Todd Poynor0f1920b2012-07-16 17:07:15 -0700580 trace_cpufreq_interactive_setspeed(cpu,
581 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800582 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800583
584 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700585 }
586 }
587
588 return 0;
589}
590
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800591static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
Todd Poynorab8dc402012-04-02 17:17:14 -0700592{
593 int i;
594 int anyboost = 0;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700595 unsigned long flags[2];
Todd Poynorab8dc402012-04-02 17:17:14 -0700596 struct cpufreq_interactive_cpuinfo *pcpu;
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800597
598 tunables->boosted = true;
Todd Poynorab8dc402012-04-02 17:17:14 -0700599
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700600 spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700601
602 for_each_online_cpu(i) {
603 pcpu = &per_cpu(cpuinfo, i);
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800604 if (tunables != pcpu->policy->governor_data)
605 continue;
Todd Poynorab8dc402012-04-02 17:17:14 -0700606
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700607 spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530608 if (pcpu->target_freq < tunables->hispeed_freq) {
609 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700610 cpumask_set_cpu(i, &speedchange_cpumask);
Saravana Kannanfbae2f22014-10-15 12:44:18 -0700611 pcpu->pol_hispeed_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -0800612 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700613 anyboost = 1;
614 }
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700615 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700616 }
617
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -0700618 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
Todd Poynorab8dc402012-04-02 17:17:14 -0700619
620 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700621 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700622}
623
Todd Poynor0e58da22012-12-11 16:05:03 -0800624static int cpufreq_interactive_notifier(
625 struct notifier_block *nb, unsigned long val, void *data)
626{
627 struct cpufreq_freqs *freq = data;
628 struct cpufreq_interactive_cpuinfo *pcpu;
629 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800630 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800631
632 if (val == CPUFREQ_POSTCHANGE) {
633 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800634 if (!down_read_trylock(&pcpu->enable_sem))
635 return 0;
636 if (!pcpu->governor_enabled) {
637 up_read(&pcpu->enable_sem);
638 return 0;
639 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800640
641 for_each_cpu(cpu, pcpu->policy->cpus) {
642 struct cpufreq_interactive_cpuinfo *pjcpu =
643 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800644 if (cpu != freq->cpu) {
645 if (!down_read_trylock(&pjcpu->enable_sem))
646 continue;
647 if (!pjcpu->governor_enabled) {
648 up_read(&pjcpu->enable_sem);
649 continue;
650 }
651 }
Todd Poynordf673d12013-01-02 13:14:00 -0800652 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800653 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800654 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800655 if (cpu != freq->cpu)
656 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800657 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800658
Todd Poynor34974c32012-12-23 12:28:49 -0800659 up_read(&pcpu->enable_sem);
660 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800661 return 0;
662}
663
664static struct notifier_block cpufreq_notifier_block = {
665 .notifier_call = cpufreq_interactive_notifier,
666};
667
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900668static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
669{
670 const char *cp;
671 int i;
672 int ntokens = 1;
673 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700674 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900675
676 cp = buf;
677 while ((cp = strpbrk(cp + 1, " :")))
678 ntokens++;
679
Todd Poynor233dfa02013-03-20 15:40:46 -0700680 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900681 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900682
683 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
684 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700685 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900686 goto err;
687 }
688
689 cp = buf;
690 i = 0;
691 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700692 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900693 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900694
695 cp = strpbrk(cp, " :");
696 if (!cp)
697 break;
698 cp++;
699 }
700
Todd Poynor233dfa02013-03-20 15:40:46 -0700701 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900702 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900703
704 *num_tokens = ntokens;
705 return tokenized_data;
706
707err_kfree:
708 kfree(tokenized_data);
709err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700710 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900711}
712
Todd Poynore9c60742012-11-14 11:41:21 -0800713static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530714 struct cpufreq_interactive_tunables *tunables,
715 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800716{
Todd Poynore9c60742012-11-14 11:41:21 -0800717 int i;
718 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800719 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800720
Viresh Kumar17d15c42013-05-16 14:58:54 +0530721 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800722
Viresh Kumar17d15c42013-05-16 14:58:54 +0530723 for (i = 0; i < tunables->ntarget_loads; i++)
724 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800725 i & 0x1 ? ":" : " ");
726
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800727 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530728 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800729 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800730}
731
Todd Poynore9c60742012-11-14 11:41:21 -0800732static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530733 struct cpufreq_interactive_tunables *tunables,
734 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800735{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900736 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800737 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800738 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800739
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900740 new_target_loads = get_tokenized_data(buf, &ntokens);
741 if (IS_ERR(new_target_loads))
742 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800743
Viresh Kumar17d15c42013-05-16 14:58:54 +0530744 spin_lock_irqsave(&tunables->target_loads_lock, flags);
745 if (tunables->target_loads != default_target_loads)
746 kfree(tunables->target_loads);
747 tunables->target_loads = new_target_loads;
748 tunables->ntarget_loads = ntokens;
749 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800750 return count;
751}
752
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900753static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530754 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900755{
756 int i;
757 ssize_t ret = 0;
758 unsigned long flags;
759
Viresh Kumar17d15c42013-05-16 14:58:54 +0530760 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900761
Viresh Kumar17d15c42013-05-16 14:58:54 +0530762 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
763 ret += sprintf(buf + ret, "%u%s",
764 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900765 i & 0x1 ? ":" : " ");
766
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800767 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530768 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900769 return ret;
770}
771
772static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530773 struct cpufreq_interactive_tunables *tunables,
774 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900775{
776 int ntokens;
777 unsigned int *new_above_hispeed_delay = NULL;
778 unsigned long flags;
779
780 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
781 if (IS_ERR(new_above_hispeed_delay))
782 return PTR_RET(new_above_hispeed_delay);
783
Viresh Kumar17d15c42013-05-16 14:58:54 +0530784 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
785 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
786 kfree(tunables->above_hispeed_delay);
787 tunables->above_hispeed_delay = new_above_hispeed_delay;
788 tunables->nabove_hispeed_delay = ntokens;
789 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900790 return count;
791
792}
793
Viresh Kumar17d15c42013-05-16 14:58:54 +0530794static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
795 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700796{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530797 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700798}
799
Viresh Kumar17d15c42013-05-16 14:58:54 +0530800static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
801 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700802{
803 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700804 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700805
Amit Pundircf076402015-11-03 20:53:29 +0530806 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700807 if (ret < 0)
808 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530809 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700810 return count;
811}
812
Viresh Kumar17d15c42013-05-16 14:58:54 +0530813static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
814 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700815{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530816 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700817}
818
Viresh Kumar17d15c42013-05-16 14:58:54 +0530819static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
820 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700821{
822 int ret;
823 unsigned long val;
824
Amit Pundircf076402015-11-03 20:53:29 +0530825 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700826 if (ret < 0)
827 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530828 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700829 return count;
830}
831
Viresh Kumar17d15c42013-05-16 14:58:54 +0530832static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
833 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700834{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530835 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700836}
837
Viresh Kumar17d15c42013-05-16 14:58:54 +0530838static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
839 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700840{
841 int ret;
842 unsigned long val;
843
Amit Pundircf076402015-11-03 20:53:29 +0530844 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700845 if (ret < 0)
846 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530847 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700848 return count;
849}
850
Viresh Kumar17d15c42013-05-16 14:58:54 +0530851static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
852 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700853{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530854 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700855}
856
Viresh Kumar17d15c42013-05-16 14:58:54 +0530857static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
858 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700859{
860 int ret;
Junjie Wu847796e2014-08-15 16:34:37 -0700861 unsigned long val, val_round;
Mike Chanef969692010-06-22 11:26:45 -0700862
Amit Pundircf076402015-11-03 20:53:29 +0530863 ret = kstrtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700864 if (ret < 0)
865 return ret;
Junjie Wu847796e2014-08-15 16:34:37 -0700866
867 val_round = jiffies_to_usecs(usecs_to_jiffies(val));
868 if (val != val_round)
869 pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
870 val_round);
871
872 tunables->timer_rate = val_round;
Mike Chanef969692010-06-22 11:26:45 -0700873 return count;
874}
875
Viresh Kumar17d15c42013-05-16 14:58:54 +0530876static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
877 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -0800878{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530879 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -0800880}
881
Viresh Kumar17d15c42013-05-16 14:58:54 +0530882static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
883 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -0800884{
885 int ret;
886 unsigned long val;
887
888 ret = kstrtol(buf, 10, &val);
889 if (ret < 0)
890 return ret;
891
Viresh Kumar17d15c42013-05-16 14:58:54 +0530892 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -0800893 return count;
894}
895
Viresh Kumar17d15c42013-05-16 14:58:54 +0530896static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700897 char *buf)
898{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530899 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -0700900}
901
Viresh Kumar17d15c42013-05-16 14:58:54 +0530902static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700903 const char *buf, size_t count)
904{
905 int ret;
906 unsigned long val;
907
908 ret = kstrtoul(buf, 0, &val);
909 if (ret < 0)
910 return ret;
911
Viresh Kumar17d15c42013-05-16 14:58:54 +0530912 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700913
Viresh Kumar17d15c42013-05-16 14:58:54 +0530914 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -0700915 trace_cpufreq_interactive_boost("on");
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800916 if (!tunables->boosted)
917 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -0700918 } else {
Ruchi Kandoi296d7912014-04-09 16:47:59 -0700919 tunables->boostpulse_endtime = ktime_to_us(ktime_get());
Todd Poynor442a3122012-05-03 00:16:55 -0700920 trace_cpufreq_interactive_unboost("off");
921 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700922
923 return count;
924}
925
Viresh Kumar17d15c42013-05-16 14:58:54 +0530926static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -0700927 const char *buf, size_t count)
928{
929 int ret;
930 unsigned long val;
931
932 ret = kstrtoul(buf, 0, &val);
933 if (ret < 0)
934 return ret;
935
Viresh Kumar17d15c42013-05-16 14:58:54 +0530936 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
937 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700938 trace_cpufreq_interactive_boost("pulse");
Lianwei Wang2277e3f2014-12-02 17:20:50 -0800939 if (!tunables->boosted)
940 cpufreq_interactive_boost(tunables);
Todd Poynor442a3122012-05-03 00:16:55 -0700941 return count;
942}
943
Viresh Kumar17d15c42013-05-16 14:58:54 +0530944static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
945 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -0800946{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530947 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -0800948}
949
Viresh Kumar17d15c42013-05-16 14:58:54 +0530950static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
951 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -0800952{
953 int ret;
954 unsigned long val;
955
956 ret = kstrtoul(buf, 0, &val);
957 if (ret < 0)
958 return ret;
959
Viresh Kumar17d15c42013-05-16 14:58:54 +0530960 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -0800961 return count;
962}
963
Viresh Kumar17d15c42013-05-16 14:58:54 +0530964static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
965 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +0800966{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530967 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +0800968}
969
Viresh Kumar17d15c42013-05-16 14:58:54 +0530970static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
971 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +0800972{
973 int ret;
974 unsigned long val;
975
976 ret = kstrtoul(buf, 0, &val);
977 if (ret < 0)
978 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530979 tunables->io_is_busy = val;
Lianwei Wang72e40572013-02-22 11:39:18 +0800980 return count;
981}
982
Viresh Kumar17d15c42013-05-16 14:58:54 +0530983/*
984 * Create show/store routines
985 * - sys: One governor instance for complete SYSTEM
986 * - pol: One governor instance per struct cpufreq_policy
987 */
988#define show_gov_pol_sys(file_name) \
989static ssize_t show_##file_name##_gov_sys \
990(struct kobject *kobj, struct attribute *attr, char *buf) \
991{ \
992 return show_##file_name(common_tunables, buf); \
993} \
994 \
995static ssize_t show_##file_name##_gov_pol \
996(struct cpufreq_policy *policy, char *buf) \
997{ \
998 return show_##file_name(policy->governor_data, buf); \
999}
Lianwei Wang72e40572013-02-22 11:39:18 +08001000
Viresh Kumar17d15c42013-05-16 14:58:54 +05301001#define store_gov_pol_sys(file_name) \
1002static ssize_t store_##file_name##_gov_sys \
1003(struct kobject *kobj, struct attribute *attr, const char *buf, \
1004 size_t count) \
1005{ \
1006 return store_##file_name(common_tunables, buf, count); \
1007} \
1008 \
1009static ssize_t store_##file_name##_gov_pol \
1010(struct cpufreq_policy *policy, const char *buf, size_t count) \
1011{ \
1012 return store_##file_name(policy->governor_data, buf, count); \
1013}
1014
1015#define show_store_gov_pol_sys(file_name) \
1016show_gov_pol_sys(file_name); \
1017store_gov_pol_sys(file_name)
1018
1019show_store_gov_pol_sys(target_loads);
1020show_store_gov_pol_sys(above_hispeed_delay);
1021show_store_gov_pol_sys(hispeed_freq);
1022show_store_gov_pol_sys(go_hispeed_load);
1023show_store_gov_pol_sys(min_sample_time);
1024show_store_gov_pol_sys(timer_rate);
1025show_store_gov_pol_sys(timer_slack);
1026show_store_gov_pol_sys(boost);
1027store_gov_pol_sys(boostpulse);
1028show_store_gov_pol_sys(boostpulse_duration);
1029show_store_gov_pol_sys(io_is_busy);
1030
1031#define gov_sys_attr_rw(_name) \
1032static struct global_attr _name##_gov_sys = \
1033__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1034
1035#define gov_pol_attr_rw(_name) \
1036static struct freq_attr _name##_gov_pol = \
1037__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1038
1039#define gov_sys_pol_attr_rw(_name) \
1040 gov_sys_attr_rw(_name); \
1041 gov_pol_attr_rw(_name)
1042
1043gov_sys_pol_attr_rw(target_loads);
1044gov_sys_pol_attr_rw(above_hispeed_delay);
1045gov_sys_pol_attr_rw(hispeed_freq);
1046gov_sys_pol_attr_rw(go_hispeed_load);
1047gov_sys_pol_attr_rw(min_sample_time);
1048gov_sys_pol_attr_rw(timer_rate);
1049gov_sys_pol_attr_rw(timer_slack);
1050gov_sys_pol_attr_rw(boost);
1051gov_sys_pol_attr_rw(boostpulse_duration);
1052gov_sys_pol_attr_rw(io_is_busy);
1053
1054static struct global_attr boostpulse_gov_sys =
1055 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1056
1057static struct freq_attr boostpulse_gov_pol =
1058 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1059
1060/* One Governor instance for entire system */
1061static struct attribute *interactive_attributes_gov_sys[] = {
1062 &target_loads_gov_sys.attr,
1063 &above_hispeed_delay_gov_sys.attr,
1064 &hispeed_freq_gov_sys.attr,
1065 &go_hispeed_load_gov_sys.attr,
1066 &min_sample_time_gov_sys.attr,
1067 &timer_rate_gov_sys.attr,
1068 &timer_slack_gov_sys.attr,
1069 &boost_gov_sys.attr,
1070 &boostpulse_gov_sys.attr,
1071 &boostpulse_duration_gov_sys.attr,
1072 &io_is_busy_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001073 NULL,
1074};
1075
Viresh Kumar17d15c42013-05-16 14:58:54 +05301076static struct attribute_group interactive_attr_group_gov_sys = {
1077 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001078 .name = "interactive",
1079};
1080
Viresh Kumar17d15c42013-05-16 14:58:54 +05301081/* Per policy governor instance */
1082static struct attribute *interactive_attributes_gov_pol[] = {
1083 &target_loads_gov_pol.attr,
1084 &above_hispeed_delay_gov_pol.attr,
1085 &hispeed_freq_gov_pol.attr,
1086 &go_hispeed_load_gov_pol.attr,
1087 &min_sample_time_gov_pol.attr,
1088 &timer_rate_gov_pol.attr,
1089 &timer_slack_gov_pol.attr,
1090 &boost_gov_pol.attr,
1091 &boostpulse_gov_pol.attr,
1092 &boostpulse_duration_gov_pol.attr,
1093 &io_is_busy_gov_pol.attr,
1094 NULL,
1095};
1096
1097static struct attribute_group interactive_attr_group_gov_pol = {
1098 .attrs = interactive_attributes_gov_pol,
1099 .name = "interactive",
1100};
1101
1102static struct attribute_group *get_sysfs_attr(void)
1103{
1104 if (have_governor_per_policy())
1105 return &interactive_attr_group_gov_pol;
1106 else
1107 return &interactive_attr_group_gov_sys;
1108}
1109
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001110static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1111 unsigned long val,
1112 void *data)
1113{
Rohit Gupta189c2222015-03-06 18:46:04 -08001114 if (val == IDLE_END)
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001115 cpufreq_interactive_idle_end();
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001116
1117 return 0;
1118}
1119
1120static struct notifier_block cpufreq_interactive_idle_nb = {
1121 .notifier_call = cpufreq_interactive_idle_notifier,
1122};
1123
Mike Chanef969692010-06-22 11:26:45 -07001124static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1125 unsigned int event)
1126{
1127 int rc;
1128 unsigned int j;
1129 struct cpufreq_interactive_cpuinfo *pcpu;
1130 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301131 struct cpufreq_interactive_tunables *tunables;
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001132 unsigned long flags;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301133
1134 if (have_governor_per_policy())
1135 tunables = policy->governor_data;
1136 else
1137 tunables = common_tunables;
1138
1139 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001140
1141 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301142 case CPUFREQ_GOV_POLICY_INIT:
1143 if (have_governor_per_policy()) {
1144 WARN_ON(tunables);
1145 } else if (tunables) {
1146 tunables->usage_count++;
1147 policy->governor_data = tunables;
1148 return 0;
1149 }
1150
1151 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1152 if (!tunables) {
1153 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1154 return -ENOMEM;
1155 }
1156
Viresh Kumar17d15c42013-05-16 14:58:54 +05301157 tunables->usage_count = 1;
1158 tunables->above_hispeed_delay = default_above_hispeed_delay;
1159 tunables->nabove_hispeed_delay =
1160 ARRAY_SIZE(default_above_hispeed_delay);
1161 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1162 tunables->target_loads = default_target_loads;
1163 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1164 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1165 tunables->timer_rate = DEFAULT_TIMER_RATE;
1166 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1167 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1168
1169 spin_lock_init(&tunables->target_loads_lock);
1170 spin_lock_init(&tunables->above_hispeed_delay_lock);
1171
Minsung Kim82cc6a92014-01-19 14:32:42 +09001172 policy->governor_data = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001173 if (!have_governor_per_policy()) {
Minsung Kim82cc6a92014-01-19 14:32:42 +09001174 common_tunables = tunables;
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001175 WARN_ON(cpufreq_get_global_kobject());
1176 }
Minsung Kim82cc6a92014-01-19 14:32:42 +09001177
1178 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1179 get_sysfs_attr());
1180 if (rc) {
1181 kfree(tunables);
1182 policy->governor_data = NULL;
Junjie Wu40ede972015-02-06 20:28:37 -08001183 if (!have_governor_per_policy()) {
Minsung Kim82cc6a92014-01-19 14:32:42 +09001184 common_tunables = NULL;
Junjie Wu40ede972015-02-06 20:28:37 -08001185 cpufreq_put_global_kobject();
1186 }
Minsung Kim82cc6a92014-01-19 14:32:42 +09001187 return rc;
1188 }
1189
Viresh Kumar17d15c42013-05-16 14:58:54 +05301190 if (!policy->governor->initialized) {
1191 idle_notifier_register(&cpufreq_interactive_idle_nb);
1192 cpufreq_register_notifier(&cpufreq_notifier_block,
1193 CPUFREQ_TRANSITION_NOTIFIER);
1194 }
1195
Viresh Kumar17d15c42013-05-16 14:58:54 +05301196 break;
1197
1198 case CPUFREQ_GOV_POLICY_EXIT:
1199 if (!--tunables->usage_count) {
1200 if (policy->governor->initialized == 1) {
1201 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1202 CPUFREQ_TRANSITION_NOTIFIER);
1203 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1204 }
1205
1206 sysfs_remove_group(get_governor_parent_kobj(policy),
1207 get_sysfs_attr());
Greg Hackmann6bc30c32014-12-08 10:08:35 -08001208
1209 if (!have_governor_per_policy())
1210 cpufreq_put_global_kobject();
1211
Viresh Kumar17d15c42013-05-16 14:58:54 +05301212 kfree(tunables);
1213 common_tunables = NULL;
1214 }
1215
1216 policy->governor_data = NULL;
1217 break;
1218
Mike Chanef969692010-06-22 11:26:45 -07001219 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001220 mutex_lock(&gov_lock);
1221
Viresh Kumar17d15c42013-05-16 14:58:54 +05301222 freq_table = cpufreq_frequency_get_table(policy->cpu);
1223 if (!tunables->hispeed_freq)
1224 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001225
1226 for_each_cpu(j, policy->cpus) {
1227 pcpu = &per_cpu(cpuinfo, j);
1228 pcpu->policy = policy;
1229 pcpu->target_freq = policy->cur;
1230 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001231 pcpu->floor_freq = pcpu->target_freq;
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001232 pcpu->pol_floor_val_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001233 ktime_to_us(ktime_get());
Junjie Wud5ac8ee2015-03-24 15:51:10 -07001234 pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
1235 pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
1236 pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
Todd Poynor39512062012-12-20 15:51:00 -08001237 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301238 del_timer_sync(&pcpu->cpu_timer);
1239 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301240 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001241 pcpu->governor_enabled = 1;
1242 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001243 }
1244
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001245 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001246 break;
1247
1248 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001249 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001250 for_each_cpu(j, policy->cpus) {
1251 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001252 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001253 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001254 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001255 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001256 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001257 }
1258
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001259 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001260 break;
1261
1262 case CPUFREQ_GOV_LIMITS:
1263 if (policy->max < policy->cur)
1264 __cpufreq_driver_target(policy,
1265 policy->max, CPUFREQ_RELATION_H);
1266 else if (policy->min > policy->cur)
1267 __cpufreq_driver_target(policy,
1268 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001269 for_each_cpu(j, policy->cpus) {
1270 pcpu = &per_cpu(cpuinfo, j);
1271
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001272 down_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001273 if (pcpu->governor_enabled == 0) {
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001274 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001275 continue;
1276 }
1277
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001278 spin_lock_irqsave(&pcpu->target_freq_lock, flags);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001279 if (policy->max < pcpu->target_freq)
1280 pcpu->target_freq = policy->max;
1281 else if (policy->min > pcpu->target_freq)
1282 pcpu->target_freq = policy->min;
1283
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001284 spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1285 up_read(&pcpu->enable_sem);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001286 }
Mike Chanef969692010-06-22 11:26:45 -07001287 break;
1288 }
1289 return 0;
1290}
1291
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301292#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1293static
1294#endif
1295struct cpufreq_governor cpufreq_gov_interactive = {
1296 .name = "interactive",
1297 .governor = cpufreq_governor_interactive,
1298 .max_transition_latency = 10000000,
1299 .owner = THIS_MODULE,
1300};
1301
Todd Poynor4add2592012-12-18 17:50:10 -08001302static void cpufreq_interactive_nop_timer(unsigned long data)
1303{
1304}
1305
Mike Chanef969692010-06-22 11:26:45 -07001306static int __init cpufreq_interactive_init(void)
1307{
1308 unsigned int i;
1309 struct cpufreq_interactive_cpuinfo *pcpu;
1310 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1311
Mike Chanef969692010-06-22 11:26:45 -07001312 /* Initalize per-cpu timers */
1313 for_each_possible_cpu(i) {
1314 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001315 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001316 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1317 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001318 init_timer(&pcpu->cpu_slack_timer);
1319 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001320 spin_lock_init(&pcpu->load_lock);
Badhri Jagan Sridharanef1eddd2014-04-07 18:26:30 -07001321 spin_lock_init(&pcpu->target_freq_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001322 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001323 }
1324
Todd Poynor0f1920b2012-07-16 17:07:15 -07001325 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001326 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001327 speedchange_task =
1328 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1329 "cfinteractive");
1330 if (IS_ERR(speedchange_task))
1331 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001332
Todd Poynor0f1920b2012-07-16 17:07:15 -07001333 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1334 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001335
Sam Leffler5c9b8272012-06-27 12:55:56 -07001336 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001337 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001338
Mike Chanef969692010-06-22 11:26:45 -07001339 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001340}
1341
1342#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1343fs_initcall(cpufreq_interactive_init);
1344#else
1345module_init(cpufreq_interactive_init);
1346#endif
1347
1348static void __exit cpufreq_interactive_exit(void)
1349{
1350 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001351 kthread_stop(speedchange_task);
1352 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001353}
1354
1355module_exit(cpufreq_interactive_exit);
1356
1357MODULE_AUTHOR("Mike Chan <mike@android.com>");
1358MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1359 "Latency sensitive workloads");
1360MODULE_LICENSE("GPL");