blob: ff77b300df88b3a504753527603043b35a49d385 [file] [log] [blame]
Mike Chanef969692010-06-22 11:26:45 -07001/*
2 * drivers/cpufreq/cpufreq_interactive.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Author: Mike Chan (mike@android.com)
16 *
17 */
18
19#include <linux/cpu.h>
20#include <linux/cpumask.h>
21#include <linux/cpufreq.h>
22#include <linux/module.h>
Lianwei Wangd72db422012-11-01 09:59:52 +080023#include <linux/moduleparam.h>
Todd Poynor5cad6092012-12-18 17:50:44 -080024#include <linux/rwsem.h>
Mike Chanef969692010-06-22 11:26:45 -070025#include <linux/sched.h>
26#include <linux/sched/rt.h>
27#include <linux/tick.h>
28#include <linux/time.h>
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31#include <linux/kthread.h>
Todd Poynorab8dc402012-04-02 17:17:14 -070032#include <linux/slab.h>
Mike Chanef969692010-06-22 11:26:45 -070033
Todd Poynorae010472012-02-16 16:27:59 -080034#define CREATE_TRACE_POINTS
35#include <trace/events/cpufreq_interactive.h>
36
Mike Chanef969692010-06-22 11:26:45 -070037struct cpufreq_interactive_cpuinfo {
38 struct timer_list cpu_timer;
Todd Poynor4add2592012-12-18 17:50:10 -080039 struct timer_list cpu_slack_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -080040 spinlock_t load_lock; /* protects the next 4 fields */
Mike Chanef969692010-06-22 11:26:45 -070041 u64 time_in_idle;
Todd Poynor8eccd412012-10-08 20:14:34 -070042 u64 time_in_idle_timestamp;
Todd Poynor0e58da22012-12-11 16:05:03 -080043 u64 cputime_speedadj;
44 u64 cputime_speedadj_timestamp;
Mike Chanef969692010-06-22 11:26:45 -070045 struct cpufreq_policy *policy;
46 struct cpufreq_frequency_table *freq_table;
47 unsigned int target_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -070048 unsigned int floor_freq;
49 u64 floor_validate_time;
Todd Poynor1a0389a2012-05-10 23:28:06 -070050 u64 hispeed_validate_time;
Todd Poynor5cad6092012-12-18 17:50:44 -080051 struct rw_semaphore enable_sem;
Mike Chanef969692010-06-22 11:26:45 -070052 int governor_enabled;
53};
54
55static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
56
Todd Poynor0f1920b2012-07-16 17:07:15 -070057/* realtime thread handles frequency scaling */
58static struct task_struct *speedchange_task;
59static cpumask_t speedchange_cpumask;
60static spinlock_t speedchange_cpumask_lock;
Lianwei Wang1d4f9a72013-01-07 14:15:51 +080061static struct mutex gov_lock;
Mike Chanef969692010-06-22 11:26:45 -070062
Todd Poynor8d2d93f2012-11-28 17:58:17 -080063/* Target load. Lower values result in higher CPU speeds. */
64#define DEFAULT_TARGET_LOAD 90
Todd Poynore9c60742012-11-14 11:41:21 -080065static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
Todd Poynor8d2d93f2012-11-28 17:58:17 -080066
Todd Poynora380aa82012-04-17 17:39:34 -070067#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
Todd Poynorcbbe17d2012-04-13 20:18:02 -070068#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
Minsung Kim9c1f83a2013-02-25 23:48:04 +090069static unsigned int default_above_hispeed_delay[] = {
70 DEFAULT_ABOVE_HISPEED_DELAY };
Todd Poynorcbbe17d2012-04-13 20:18:02 -070071
Viresh Kumar17d15c42013-05-16 14:58:54 +053072struct cpufreq_interactive_tunables {
73 int usage_count;
74 /* Hi speed to bump to from lo speed when load burst (default max) */
75 unsigned int hispeed_freq;
76 /* Go to hi speed when CPU load at or above this value. */
77#define DEFAULT_GO_HISPEED_LOAD 99
78 unsigned long go_hispeed_load;
79 /* Target load. Lower values result in higher CPU speeds. */
80 spinlock_t target_loads_lock;
81 unsigned int *target_loads;
82 int ntarget_loads;
83 /*
84 * The minimum amount of time to spend at a frequency before we can ramp
85 * down.
86 */
87#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
88 unsigned long min_sample_time;
89 /*
90 * The sample rate of the timer used to increase frequency
91 */
92 unsigned long timer_rate;
93 /*
94 * Wait this long before raising speed above hispeed, by default a
95 * single timer interval.
96 */
97 spinlock_t above_hispeed_delay_lock;
98 unsigned int *above_hispeed_delay;
99 int nabove_hispeed_delay;
100 /* Non-zero means indefinite speed boost active */
101 int boost_val;
102 /* Duration of a boot pulse in usecs */
103 int boostpulse_duration_val;
104 /* End time of boost pulse in ktime converted to usecs */
105 u64 boostpulse_endtime;
106 /*
107 * Max additional time to wait in idle, beyond timer_rate, at speeds
108 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
109 */
Todd Poynor4add2592012-12-18 17:50:10 -0800110#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
Viresh Kumar17d15c42013-05-16 14:58:54 +0530111 int timer_slack_val;
112 bool io_is_busy;
113};
Lianwei Wangd72db422012-11-01 09:59:52 +0800114
Viresh Kumar17d15c42013-05-16 14:58:54 +0530115/* For cases where we have single governor instance for system */
116struct cpufreq_interactive_tunables *common_tunables;
117
118static struct attribute_group *get_sysfs_attr(void);
Lianwei Wang72e40572013-02-22 11:39:18 +0800119
Todd Poynor8eccd412012-10-08 20:14:34 -0700120static void cpufreq_interactive_timer_resched(
121 struct cpufreq_interactive_cpuinfo *pcpu)
122{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530123 struct cpufreq_interactive_tunables *tunables =
124 pcpu->policy->governor_data;
Todd Poynor4e25bf92013-04-05 13:25:21 -0700125 unsigned long expires;
Todd Poynordf673d12013-01-02 13:14:00 -0800126 unsigned long flags;
Todd Poynor4add2592012-12-18 17:50:10 -0800127
Todd Poynordf673d12013-01-02 13:14:00 -0800128 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700129 pcpu->time_in_idle =
Lianwei Wang72e40572013-02-22 11:39:18 +0800130 get_cpu_idle_time(smp_processor_id(),
Viresh Kumar17d15c42013-05-16 14:58:54 +0530131 &pcpu->time_in_idle_timestamp,
132 tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800133 pcpu->cputime_speedadj = 0;
134 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530135 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700136 mod_timer_pinned(&pcpu->cpu_timer, expires);
137
Viresh Kumar17d15c42013-05-16 14:58:54 +0530138 if (tunables->timer_slack_val >= 0 &&
139 pcpu->target_freq > pcpu->policy->min) {
140 expires += usecs_to_jiffies(tunables->timer_slack_val);
Todd Poynor4e25bf92013-04-05 13:25:21 -0700141 mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
142 }
143
Todd Poynordf673d12013-01-02 13:14:00 -0800144 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Todd Poynor8eccd412012-10-08 20:14:34 -0700145}
146
Lianwei Wang90c6c152013-04-26 13:30:51 +0800147/* The caller shall take enable_sem write semaphore to avoid any timer race.
148 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
149 * function.
150 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530151static void cpufreq_interactive_timer_start(
152 struct cpufreq_interactive_tunables *tunables, int cpu)
Lianwei Wang90c6c152013-04-26 13:30:51 +0800153{
154 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530155 unsigned long expires = jiffies +
156 usecs_to_jiffies(tunables->timer_rate);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800157 unsigned long flags;
158
159 pcpu->cpu_timer.expires = expires;
160 add_timer_on(&pcpu->cpu_timer, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530161 if (tunables->timer_slack_val >= 0 &&
162 pcpu->target_freq > pcpu->policy->min) {
163 expires += usecs_to_jiffies(tunables->timer_slack_val);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800164 pcpu->cpu_slack_timer.expires = expires;
165 add_timer_on(&pcpu->cpu_slack_timer, cpu);
166 }
167
168 spin_lock_irqsave(&pcpu->load_lock, flags);
169 pcpu->time_in_idle =
Viresh Kumar17d15c42013-05-16 14:58:54 +0530170 get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
171 tunables->io_is_busy);
Lianwei Wang90c6c152013-04-26 13:30:51 +0800172 pcpu->cputime_speedadj = 0;
173 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
174 spin_unlock_irqrestore(&pcpu->load_lock, flags);
175}
176
Viresh Kumar17d15c42013-05-16 14:58:54 +0530177static unsigned int freq_to_above_hispeed_delay(
178 struct cpufreq_interactive_tunables *tunables,
179 unsigned int freq)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900180{
181 int i;
182 unsigned int ret;
183 unsigned long flags;
184
Viresh Kumar17d15c42013-05-16 14:58:54 +0530185 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900186
Viresh Kumar17d15c42013-05-16 14:58:54 +0530187 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
188 freq >= tunables->above_hispeed_delay[i+1]; i += 2)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900189 ;
190
Viresh Kumar17d15c42013-05-16 14:58:54 +0530191 ret = tunables->above_hispeed_delay[i];
192 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900193 return ret;
194}
195
Viresh Kumar17d15c42013-05-16 14:58:54 +0530196static unsigned int freq_to_targetload(
197 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
Todd Poynore9c60742012-11-14 11:41:21 -0800198{
199 int i;
200 unsigned int ret;
Todd Poynordf673d12013-01-02 13:14:00 -0800201 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800202
Viresh Kumar17d15c42013-05-16 14:58:54 +0530203 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800204
Viresh Kumar17d15c42013-05-16 14:58:54 +0530205 for (i = 0; i < tunables->ntarget_loads - 1 &&
206 freq >= tunables->target_loads[i+1]; i += 2)
Todd Poynore9c60742012-11-14 11:41:21 -0800207 ;
208
Viresh Kumar17d15c42013-05-16 14:58:54 +0530209 ret = tunables->target_loads[i];
210 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800211 return ret;
212}
213
214/*
215 * If increasing frequencies never map to a lower target load then
216 * choose_freq() will find the minimum frequency that does not exceed its
217 * target load given the current load.
218 */
Viresh Kumar17d15c42013-05-16 14:58:54 +0530219static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
220 unsigned int loadadjfreq)
Todd Poynore9c60742012-11-14 11:41:21 -0800221{
222 unsigned int freq = pcpu->policy->cur;
Todd Poynore9c60742012-11-14 11:41:21 -0800223 unsigned int prevfreq, freqmin, freqmax;
224 unsigned int tl;
225 int index;
226
227 freqmin = 0;
228 freqmax = UINT_MAX;
229
230 do {
231 prevfreq = freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530232 tl = freq_to_targetload(pcpu->policy->governor_data, freq);
Todd Poynore9c60742012-11-14 11:41:21 -0800233
234 /*
235 * Find the lowest frequency where the computed load is less
236 * than or equal to the target load.
237 */
238
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700239 if (cpufreq_frequency_table_target(
240 pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
241 CPUFREQ_RELATION_L, &index))
242 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800243 freq = pcpu->freq_table[index].frequency;
244
245 if (freq > prevfreq) {
246 /* The previous frequency is too low. */
247 freqmin = prevfreq;
248
249 if (freq >= freqmax) {
250 /*
251 * Find the highest frequency that is less
252 * than freqmax.
253 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700254 if (cpufreq_frequency_table_target(
255 pcpu->policy, pcpu->freq_table,
256 freqmax - 1, CPUFREQ_RELATION_H,
257 &index))
258 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800259 freq = pcpu->freq_table[index].frequency;
260
261 if (freq == freqmin) {
262 /*
263 * The first frequency below freqmax
264 * has already been found to be too
265 * low. freqmax is the lowest speed
266 * we found that is fast enough.
267 */
268 freq = freqmax;
269 break;
270 }
271 }
272 } else if (freq < prevfreq) {
273 /* The previous frequency is high enough. */
274 freqmax = prevfreq;
275
276 if (freq <= freqmin) {
277 /*
278 * Find the lowest frequency that is higher
279 * than freqmin.
280 */
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700281 if (cpufreq_frequency_table_target(
282 pcpu->policy, pcpu->freq_table,
283 freqmin + 1, CPUFREQ_RELATION_L,
284 &index))
285 break;
Todd Poynore9c60742012-11-14 11:41:21 -0800286 freq = pcpu->freq_table[index].frequency;
287
288 /*
289 * If freqmax is the first frequency above
290 * freqmin then we have already found that
291 * this speed is fast enough.
292 */
293 if (freq == freqmax)
294 break;
295 }
296 }
297
298 /* If same frequency chosen as previous then done. */
299 } while (freq != prevfreq);
300
301 return freq;
302}
303
Todd Poynor0e58da22012-12-11 16:05:03 -0800304static u64 update_load(int cpu)
305{
306 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530307 struct cpufreq_interactive_tunables *tunables =
308 pcpu->policy->governor_data;
Todd Poynor0e58da22012-12-11 16:05:03 -0800309 u64 now;
310 u64 now_idle;
311 unsigned int delta_idle;
312 unsigned int delta_time;
313 u64 active_time;
314
Viresh Kumar17d15c42013-05-16 14:58:54 +0530315 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
Todd Poynor0e58da22012-12-11 16:05:03 -0800316 delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
317 delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
Minsung Kim98b3b562013-04-23 22:32:01 +0900318
319 if (delta_time <= delta_idle)
320 active_time = 0;
321 else
322 active_time = delta_time - delta_idle;
323
Todd Poynor0e58da22012-12-11 16:05:03 -0800324 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
325
326 pcpu->time_in_idle = now_idle;
327 pcpu->time_in_idle_timestamp = now;
328 return now;
329}
330
Mike Chanef969692010-06-22 11:26:45 -0700331static void cpufreq_interactive_timer(unsigned long data)
332{
Todd Poynore7afb7e2012-11-05 13:09:03 -0800333 u64 now;
Mike Chanef969692010-06-22 11:26:45 -0700334 unsigned int delta_time;
Todd Poynor0e58da22012-12-11 16:05:03 -0800335 u64 cputime_speedadj;
Mike Chanef969692010-06-22 11:26:45 -0700336 int cpu_load;
Mike Chanef969692010-06-22 11:26:45 -0700337 struct cpufreq_interactive_cpuinfo *pcpu =
338 &per_cpu(cpuinfo, data);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530339 struct cpufreq_interactive_tunables *tunables =
340 pcpu->policy->governor_data;
Mike Chanef969692010-06-22 11:26:45 -0700341 unsigned int new_freq;
Todd Poynor0e58da22012-12-11 16:05:03 -0800342 unsigned int loadadjfreq;
Mike Chanef969692010-06-22 11:26:45 -0700343 unsigned int index;
344 unsigned long flags;
Todd Poynore16d5922012-12-14 17:31:19 -0800345 bool boosted;
Mike Chanef969692010-06-22 11:26:45 -0700346
Todd Poynor5cad6092012-12-18 17:50:44 -0800347 if (!down_read_trylock(&pcpu->enable_sem))
348 return;
Mike Chanef969692010-06-22 11:26:45 -0700349 if (!pcpu->governor_enabled)
350 goto exit;
351
Todd Poynordf673d12013-01-02 13:14:00 -0800352 spin_lock_irqsave(&pcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800353 now = update_load(data);
354 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
355 cputime_speedadj = pcpu->cputime_speedadj;
Todd Poynordf673d12013-01-02 13:14:00 -0800356 spin_unlock_irqrestore(&pcpu->load_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700357
Todd Poynor0e58da22012-12-11 16:05:03 -0800358 if (WARN_ON_ONCE(!delta_time))
Mike Chanef969692010-06-22 11:26:45 -0700359 goto rearm;
360
Todd Poynor0e58da22012-12-11 16:05:03 -0800361 do_div(cputime_speedadj, delta_time);
362 loadadjfreq = (unsigned int)cputime_speedadj * 100;
363 cpu_load = loadadjfreq / pcpu->target_freq;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530364 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
Mike Chanef969692010-06-22 11:26:45 -0700365
Viresh Kumar17d15c42013-05-16 14:58:54 +0530366 if (cpu_load >= tunables->go_hispeed_load || boosted) {
367 if (pcpu->target_freq < tunables->hispeed_freq) {
368 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800369 } else {
370 new_freq = choose_freq(pcpu, loadadjfreq);
371
Viresh Kumar17d15c42013-05-16 14:58:54 +0530372 if (new_freq < tunables->hispeed_freq)
373 new_freq = tunables->hispeed_freq;
Todd Poynor2b660492012-12-19 16:06:48 -0800374 }
375 } else {
Todd Poynor0e58da22012-12-11 16:05:03 -0800376 new_freq = choose_freq(pcpu, loadadjfreq);
Todd Poynor2b660492012-12-19 16:06:48 -0800377 }
Todd Poynor131ff022012-11-08 15:06:55 -0800378
Viresh Kumar17d15c42013-05-16 14:58:54 +0530379 if (pcpu->target_freq >= tunables->hispeed_freq &&
Todd Poynor131ff022012-11-08 15:06:55 -0800380 new_freq > pcpu->target_freq &&
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900381 now - pcpu->hispeed_validate_time <
Viresh Kumar17d15c42013-05-16 14:58:54 +0530382 freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
Todd Poynor131ff022012-11-08 15:06:55 -0800383 trace_cpufreq_interactive_notyet(
384 data, cpu_load, pcpu->target_freq,
385 pcpu->policy->cur, new_freq);
386 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700387 }
388
Todd Poynor131ff022012-11-08 15:06:55 -0800389 pcpu->hispeed_validate_time = now;
Todd Poynor1a0389a2012-05-10 23:28:06 -0700390
Mike Chanef969692010-06-22 11:26:45 -0700391 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800392 new_freq, CPUFREQ_RELATION_L,
Todd Poynor8cdabdc2013-04-22 16:44:58 -0700393 &index))
Mike Chanef969692010-06-22 11:26:45 -0700394 goto rearm;
Mike Chanef969692010-06-22 11:26:45 -0700395
396 new_freq = pcpu->freq_table[index].frequency;
397
Mike Chanef969692010-06-22 11:26:45 -0700398 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700399 * Do not scale below floor_freq unless we have been at or above the
400 * floor frequency for the minimum sample time since last validated.
Mike Chanef969692010-06-22 11:26:45 -0700401 */
Todd Poynor6d15fa32012-04-26 21:41:40 -0700402 if (new_freq < pcpu->floor_freq) {
Viresh Kumar17d15c42013-05-16 14:58:54 +0530403 if (now - pcpu->floor_validate_time <
404 tunables->min_sample_time) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800405 trace_cpufreq_interactive_notyet(
406 data, cpu_load, pcpu->target_freq,
407 pcpu->policy->cur, new_freq);
Mike Chanef969692010-06-22 11:26:45 -0700408 goto rearm;
Todd Poynorae010472012-02-16 16:27:59 -0800409 }
Mike Chanef969692010-06-22 11:26:45 -0700410 }
411
Todd Poynore16d5922012-12-14 17:31:19 -0800412 /*
413 * Update the timestamp for checking whether speed has been held at
414 * or above the selected frequency for a minimum of min_sample_time,
415 * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
416 * allow the speed to drop as soon as the boostpulse duration expires
417 * (or the indefinite boost is turned off).
418 */
419
Viresh Kumar17d15c42013-05-16 14:58:54 +0530420 if (!boosted || new_freq > tunables->hispeed_freq) {
Todd Poynore16d5922012-12-14 17:31:19 -0800421 pcpu->floor_freq = new_freq;
422 pcpu->floor_validate_time = now;
423 }
Todd Poynor1f408dc2012-04-06 19:59:36 -0700424
425 if (pcpu->target_freq == new_freq) {
Todd Poynore60cc1b2012-11-28 17:56:09 -0800426 trace_cpufreq_interactive_already(
427 data, cpu_load, pcpu->target_freq,
428 pcpu->policy->cur, new_freq);
Todd Poynor1f408dc2012-04-06 19:59:36 -0700429 goto rearm_if_notmax;
430 }
431
Todd Poynorae010472012-02-16 16:27:59 -0800432 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
Todd Poynore60cc1b2012-11-28 17:56:09 -0800433 pcpu->policy->cur, new_freq);
Todd Poynorae010472012-02-16 16:27:59 -0800434
Todd Poynor0f1920b2012-07-16 17:07:15 -0700435 pcpu->target_freq = new_freq;
436 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
437 cpumask_set_cpu(data, &speedchange_cpumask);
438 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
439 wake_up_process(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -0700440
441rearm_if_notmax:
442 /*
443 * Already set max speed and don't see a need to change that,
444 * wait until next idle to re-evaluate, don't need timer.
445 */
446 if (pcpu->target_freq == pcpu->policy->max)
447 goto exit;
448
449rearm:
Todd Poynor4add2592012-12-18 17:50:10 -0800450 if (!timer_pending(&pcpu->cpu_timer))
Todd Poynor8eccd412012-10-08 20:14:34 -0700451 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700452
453exit:
Todd Poynor5cad6092012-12-18 17:50:44 -0800454 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700455 return;
456}
457
458static void cpufreq_interactive_idle_start(void)
459{
460 struct cpufreq_interactive_cpuinfo *pcpu =
461 &per_cpu(cpuinfo, smp_processor_id());
462 int pending;
463
Todd Poynor5cad6092012-12-18 17:50:44 -0800464 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700465 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800466 if (!pcpu->governor_enabled) {
467 up_read(&pcpu->enable_sem);
468 return;
469 }
Mike Chanef969692010-06-22 11:26:45 -0700470
Mike Chanef969692010-06-22 11:26:45 -0700471 pending = timer_pending(&pcpu->cpu_timer);
472
473 if (pcpu->target_freq != pcpu->policy->min) {
Mike Chanef969692010-06-22 11:26:45 -0700474 /*
475 * Entering idle while not at lowest speed. On some
476 * platforms this can hold the other CPU(s) at that speed
477 * even though the CPU is idle. Set a timer to re-evaluate
478 * speed so this idle CPU doesn't hold the other CPUs above
479 * min indefinitely. This should probably be a quirk of
480 * the CPUFreq driver.
481 */
Todd Poynor4add2592012-12-18 17:50:10 -0800482 if (!pending)
Todd Poynor8eccd412012-10-08 20:14:34 -0700483 cpufreq_interactive_timer_resched(pcpu);
Mike Chanef969692010-06-22 11:26:45 -0700484 }
485
Todd Poynor5cad6092012-12-18 17:50:44 -0800486 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700487}
488
489static void cpufreq_interactive_idle_end(void)
490{
491 struct cpufreq_interactive_cpuinfo *pcpu =
492 &per_cpu(cpuinfo, smp_processor_id());
493
Todd Poynor5cad6092012-12-18 17:50:44 -0800494 if (!down_read_trylock(&pcpu->enable_sem))
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700495 return;
Todd Poynor5cad6092012-12-18 17:50:44 -0800496 if (!pcpu->governor_enabled) {
497 up_read(&pcpu->enable_sem);
498 return;
499 }
Sam Leffler3ab7c2b2012-06-27 10:12:04 -0700500
Todd Poynore7afb7e2012-11-05 13:09:03 -0800501 /* Arm the timer for 1-2 ticks later if not already. */
502 if (!timer_pending(&pcpu->cpu_timer)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700503 cpufreq_interactive_timer_resched(pcpu);
Todd Poynor4add2592012-12-18 17:50:10 -0800504 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
Todd Poynor8eccd412012-10-08 20:14:34 -0700505 del_timer(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -0800506 del_timer(&pcpu->cpu_slack_timer);
Todd Poynor8eccd412012-10-08 20:14:34 -0700507 cpufreq_interactive_timer(smp_processor_id());
Mike Chanef969692010-06-22 11:26:45 -0700508 }
Todd Poynor5cad6092012-12-18 17:50:44 -0800509
510 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700511}
512
Todd Poynor0f1920b2012-07-16 17:07:15 -0700513static int cpufreq_interactive_speedchange_task(void *data)
Mike Chanef969692010-06-22 11:26:45 -0700514{
515 unsigned int cpu;
516 cpumask_t tmp_mask;
517 unsigned long flags;
518 struct cpufreq_interactive_cpuinfo *pcpu;
519
520 while (1) {
521 set_current_state(TASK_INTERRUPTIBLE);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700522 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700523
Todd Poynor0f1920b2012-07-16 17:07:15 -0700524 if (cpumask_empty(&speedchange_cpumask)) {
525 spin_unlock_irqrestore(&speedchange_cpumask_lock,
526 flags);
Mike Chanef969692010-06-22 11:26:45 -0700527 schedule();
528
529 if (kthread_should_stop())
530 break;
531
Todd Poynor0f1920b2012-07-16 17:07:15 -0700532 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700533 }
534
535 set_current_state(TASK_RUNNING);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700536 tmp_mask = speedchange_cpumask;
537 cpumask_clear(&speedchange_cpumask);
538 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Mike Chanef969692010-06-22 11:26:45 -0700539
540 for_each_cpu(cpu, &tmp_mask) {
541 unsigned int j;
542 unsigned int max_freq = 0;
543
544 pcpu = &per_cpu(cpuinfo, cpu);
Todd Poynor5cad6092012-12-18 17:50:44 -0800545 if (!down_read_trylock(&pcpu->enable_sem))
Mike Chanef969692010-06-22 11:26:45 -0700546 continue;
Todd Poynor5cad6092012-12-18 17:50:44 -0800547 if (!pcpu->governor_enabled) {
548 up_read(&pcpu->enable_sem);
549 continue;
550 }
Mike Chanef969692010-06-22 11:26:45 -0700551
Mike Chanef969692010-06-22 11:26:45 -0700552 for_each_cpu(j, pcpu->policy->cpus) {
553 struct cpufreq_interactive_cpuinfo *pjcpu =
554 &per_cpu(cpuinfo, j);
555
556 if (pjcpu->target_freq > max_freq)
557 max_freq = pjcpu->target_freq;
558 }
559
560 if (max_freq != pcpu->policy->cur)
561 __cpufreq_driver_target(pcpu->policy,
562 max_freq,
563 CPUFREQ_RELATION_H);
Todd Poynor0f1920b2012-07-16 17:07:15 -0700564 trace_cpufreq_interactive_setspeed(cpu,
565 pcpu->target_freq,
Todd Poynorae010472012-02-16 16:27:59 -0800566 pcpu->policy->cur);
Todd Poynor5cad6092012-12-18 17:50:44 -0800567
568 up_read(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -0700569 }
570 }
571
572 return 0;
573}
574
Todd Poynorab8dc402012-04-02 17:17:14 -0700575static void cpufreq_interactive_boost(void)
576{
577 int i;
578 int anyboost = 0;
579 unsigned long flags;
580 struct cpufreq_interactive_cpuinfo *pcpu;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530581 struct cpufreq_interactive_tunables *tunables;
Todd Poynorab8dc402012-04-02 17:17:14 -0700582
Todd Poynor0f1920b2012-07-16 17:07:15 -0700583 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700584
585 for_each_online_cpu(i) {
586 pcpu = &per_cpu(cpuinfo, i);
Viresh Kumar17d15c42013-05-16 14:58:54 +0530587 tunables = pcpu->policy->governor_data;
Todd Poynorab8dc402012-04-02 17:17:14 -0700588
Viresh Kumar17d15c42013-05-16 14:58:54 +0530589 if (pcpu->target_freq < tunables->hispeed_freq) {
590 pcpu->target_freq = tunables->hispeed_freq;
Todd Poynor0f1920b2012-07-16 17:07:15 -0700591 cpumask_set_cpu(i, &speedchange_cpumask);
Todd Poynor31817c92012-12-07 20:08:45 -0800592 pcpu->hispeed_validate_time =
593 ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700594 anyboost = 1;
595 }
596
597 /*
Todd Poynor6d15fa32012-04-26 21:41:40 -0700598 * Set floor freq and (re)start timer for when last
599 * validated.
Todd Poynorab8dc402012-04-02 17:17:14 -0700600 */
601
Viresh Kumar17d15c42013-05-16 14:58:54 +0530602 pcpu->floor_freq = tunables->hispeed_freq;
Todd Poynor6d15fa32012-04-26 21:41:40 -0700603 pcpu->floor_validate_time = ktime_to_us(ktime_get());
Todd Poynorab8dc402012-04-02 17:17:14 -0700604 }
605
Todd Poynor0f1920b2012-07-16 17:07:15 -0700606 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
Todd Poynorab8dc402012-04-02 17:17:14 -0700607
608 if (anyboost)
Todd Poynor0f1920b2012-07-16 17:07:15 -0700609 wake_up_process(speedchange_task);
Todd Poynorab8dc402012-04-02 17:17:14 -0700610}
611
Todd Poynor0e58da22012-12-11 16:05:03 -0800612static int cpufreq_interactive_notifier(
613 struct notifier_block *nb, unsigned long val, void *data)
614{
615 struct cpufreq_freqs *freq = data;
616 struct cpufreq_interactive_cpuinfo *pcpu;
617 int cpu;
Todd Poynordf673d12013-01-02 13:14:00 -0800618 unsigned long flags;
Todd Poynor0e58da22012-12-11 16:05:03 -0800619
620 if (val == CPUFREQ_POSTCHANGE) {
621 pcpu = &per_cpu(cpuinfo, freq->cpu);
Todd Poynor34974c32012-12-23 12:28:49 -0800622 if (!down_read_trylock(&pcpu->enable_sem))
623 return 0;
624 if (!pcpu->governor_enabled) {
625 up_read(&pcpu->enable_sem);
626 return 0;
627 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800628
629 for_each_cpu(cpu, pcpu->policy->cpus) {
630 struct cpufreq_interactive_cpuinfo *pjcpu =
631 &per_cpu(cpuinfo, cpu);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800632 if (cpu != freq->cpu) {
633 if (!down_read_trylock(&pjcpu->enable_sem))
634 continue;
635 if (!pjcpu->governor_enabled) {
636 up_read(&pjcpu->enable_sem);
637 continue;
638 }
639 }
Todd Poynordf673d12013-01-02 13:14:00 -0800640 spin_lock_irqsave(&pjcpu->load_lock, flags);
Todd Poynor0e58da22012-12-11 16:05:03 -0800641 update_load(cpu);
Todd Poynordf673d12013-01-02 13:14:00 -0800642 spin_unlock_irqrestore(&pjcpu->load_lock, flags);
Lianwei Wangc79705d2013-05-16 12:07:23 +0800643 if (cpu != freq->cpu)
644 up_read(&pjcpu->enable_sem);
Todd Poynor0e58da22012-12-11 16:05:03 -0800645 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800646
Todd Poynor34974c32012-12-23 12:28:49 -0800647 up_read(&pcpu->enable_sem);
648 }
Todd Poynor0e58da22012-12-11 16:05:03 -0800649 return 0;
650}
651
652static struct notifier_block cpufreq_notifier_block = {
653 .notifier_call = cpufreq_interactive_notifier,
654};
655
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900656static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
657{
658 const char *cp;
659 int i;
660 int ntokens = 1;
661 unsigned int *tokenized_data;
Todd Poynor233dfa02013-03-20 15:40:46 -0700662 int err = -EINVAL;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900663
664 cp = buf;
665 while ((cp = strpbrk(cp + 1, " :")))
666 ntokens++;
667
Todd Poynor233dfa02013-03-20 15:40:46 -0700668 if (!(ntokens & 0x1))
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900669 goto err;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900670
671 tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
672 if (!tokenized_data) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700673 err = -ENOMEM;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900674 goto err;
675 }
676
677 cp = buf;
678 i = 0;
679 while (i < ntokens) {
Todd Poynor233dfa02013-03-20 15:40:46 -0700680 if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900681 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900682
683 cp = strpbrk(cp, " :");
684 if (!cp)
685 break;
686 cp++;
687 }
688
Todd Poynor233dfa02013-03-20 15:40:46 -0700689 if (i != ntokens)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900690 goto err_kfree;
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900691
692 *num_tokens = ntokens;
693 return tokenized_data;
694
695err_kfree:
696 kfree(tokenized_data);
697err:
Todd Poynor233dfa02013-03-20 15:40:46 -0700698 return ERR_PTR(err);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900699}
700
Todd Poynore9c60742012-11-14 11:41:21 -0800701static ssize_t show_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530702 struct cpufreq_interactive_tunables *tunables,
703 char *buf)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800704{
Todd Poynore9c60742012-11-14 11:41:21 -0800705 int i;
706 ssize_t ret = 0;
Todd Poynordf673d12013-01-02 13:14:00 -0800707 unsigned long flags;
Todd Poynore9c60742012-11-14 11:41:21 -0800708
Viresh Kumar17d15c42013-05-16 14:58:54 +0530709 spin_lock_irqsave(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800710
Viresh Kumar17d15c42013-05-16 14:58:54 +0530711 for (i = 0; i < tunables->ntarget_loads; i++)
712 ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
Todd Poynore9c60742012-11-14 11:41:21 -0800713 i & 0x1 ? ":" : " ");
714
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800715 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530716 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynore9c60742012-11-14 11:41:21 -0800717 return ret;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800718}
719
Todd Poynore9c60742012-11-14 11:41:21 -0800720static ssize_t store_target_loads(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530721 struct cpufreq_interactive_tunables *tunables,
722 const char *buf, size_t count)
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800723{
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900724 int ntokens;
Todd Poynore9c60742012-11-14 11:41:21 -0800725 unsigned int *new_target_loads = NULL;
Todd Poynordf673d12013-01-02 13:14:00 -0800726 unsigned long flags;
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800727
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900728 new_target_loads = get_tokenized_data(buf, &ntokens);
729 if (IS_ERR(new_target_loads))
730 return PTR_RET(new_target_loads);
Todd Poynore9c60742012-11-14 11:41:21 -0800731
Viresh Kumar17d15c42013-05-16 14:58:54 +0530732 spin_lock_irqsave(&tunables->target_loads_lock, flags);
733 if (tunables->target_loads != default_target_loads)
734 kfree(tunables->target_loads);
735 tunables->target_loads = new_target_loads;
736 tunables->ntarget_loads = ntokens;
737 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
Todd Poynor8d2d93f2012-11-28 17:58:17 -0800738 return count;
739}
740
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900741static ssize_t show_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530742 struct cpufreq_interactive_tunables *tunables, char *buf)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900743{
744 int i;
745 ssize_t ret = 0;
746 unsigned long flags;
747
Viresh Kumar17d15c42013-05-16 14:58:54 +0530748 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900749
Viresh Kumar17d15c42013-05-16 14:58:54 +0530750 for (i = 0; i < tunables->nabove_hispeed_delay; i++)
751 ret += sprintf(buf + ret, "%u%s",
752 tunables->above_hispeed_delay[i],
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900753 i & 0x1 ? ":" : " ");
754
Chih-Wei Huang8d9e5302013-12-24 17:51:55 +0800755 sprintf(buf + ret - 1, "\n");
Viresh Kumar17d15c42013-05-16 14:58:54 +0530756 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900757 return ret;
758}
759
760static ssize_t store_above_hispeed_delay(
Viresh Kumar17d15c42013-05-16 14:58:54 +0530761 struct cpufreq_interactive_tunables *tunables,
762 const char *buf, size_t count)
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900763{
764 int ntokens;
765 unsigned int *new_above_hispeed_delay = NULL;
766 unsigned long flags;
767
768 new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
769 if (IS_ERR(new_above_hispeed_delay))
770 return PTR_RET(new_above_hispeed_delay);
771
Viresh Kumar17d15c42013-05-16 14:58:54 +0530772 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
773 if (tunables->above_hispeed_delay != default_above_hispeed_delay)
774 kfree(tunables->above_hispeed_delay);
775 tunables->above_hispeed_delay = new_above_hispeed_delay;
776 tunables->nabove_hispeed_delay = ntokens;
777 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
Minsung Kim9c1f83a2013-02-25 23:48:04 +0900778 return count;
779
780}
781
Viresh Kumar17d15c42013-05-16 14:58:54 +0530782static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
783 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700784{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530785 return sprintf(buf, "%u\n", tunables->hispeed_freq);
Mike Chanef969692010-06-22 11:26:45 -0700786}
787
Viresh Kumar17d15c42013-05-16 14:58:54 +0530788static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
789 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700790{
791 int ret;
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700792 long unsigned int val;
Mike Chanef969692010-06-22 11:26:45 -0700793
Todd Poynor3b7b5f82012-10-03 00:39:56 -0700794 ret = strict_strtoul(buf, 0, &val);
Mike Chanef969692010-06-22 11:26:45 -0700795 if (ret < 0)
796 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530797 tunables->hispeed_freq = val;
Mike Chanef969692010-06-22 11:26:45 -0700798 return count;
799}
800
Viresh Kumar17d15c42013-05-16 14:58:54 +0530801static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
802 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700803{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530804 return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
Mike Chanef969692010-06-22 11:26:45 -0700805}
806
Viresh Kumar17d15c42013-05-16 14:58:54 +0530807static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
808 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700809{
810 int ret;
811 unsigned long val;
812
813 ret = strict_strtoul(buf, 0, &val);
814 if (ret < 0)
815 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530816 tunables->go_hispeed_load = val;
Mike Chanef969692010-06-22 11:26:45 -0700817 return count;
818}
819
Viresh Kumar17d15c42013-05-16 14:58:54 +0530820static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
821 *tunables, char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700822{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530823 return sprintf(buf, "%lu\n", tunables->min_sample_time);
Mike Chanef969692010-06-22 11:26:45 -0700824}
825
Viresh Kumar17d15c42013-05-16 14:58:54 +0530826static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
827 *tunables, const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700828{
829 int ret;
830 unsigned long val;
831
832 ret = strict_strtoul(buf, 0, &val);
833 if (ret < 0)
834 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530835 tunables->min_sample_time = val;
Mike Chanef969692010-06-22 11:26:45 -0700836 return count;
837}
838
Viresh Kumar17d15c42013-05-16 14:58:54 +0530839static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
840 char *buf)
Mike Chanef969692010-06-22 11:26:45 -0700841{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530842 return sprintf(buf, "%lu\n", tunables->timer_rate);
Mike Chanef969692010-06-22 11:26:45 -0700843}
844
Viresh Kumar17d15c42013-05-16 14:58:54 +0530845static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
846 const char *buf, size_t count)
Mike Chanef969692010-06-22 11:26:45 -0700847{
848 int ret;
849 unsigned long val;
850
851 ret = strict_strtoul(buf, 0, &val);
852 if (ret < 0)
853 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530854 tunables->timer_rate = val;
Mike Chanef969692010-06-22 11:26:45 -0700855 return count;
856}
857
Viresh Kumar17d15c42013-05-16 14:58:54 +0530858static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
859 char *buf)
Todd Poynor4add2592012-12-18 17:50:10 -0800860{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530861 return sprintf(buf, "%d\n", tunables->timer_slack_val);
Todd Poynor4add2592012-12-18 17:50:10 -0800862}
863
Viresh Kumar17d15c42013-05-16 14:58:54 +0530864static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
865 const char *buf, size_t count)
Todd Poynor4add2592012-12-18 17:50:10 -0800866{
867 int ret;
868 unsigned long val;
869
870 ret = kstrtol(buf, 10, &val);
871 if (ret < 0)
872 return ret;
873
Viresh Kumar17d15c42013-05-16 14:58:54 +0530874 tunables->timer_slack_val = val;
Todd Poynor4add2592012-12-18 17:50:10 -0800875 return count;
876}
877
Viresh Kumar17d15c42013-05-16 14:58:54 +0530878static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700879 char *buf)
880{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530881 return sprintf(buf, "%d\n", tunables->boost_val);
Todd Poynor15a9ea02012-04-23 20:42:41 -0700882}
883
Viresh Kumar17d15c42013-05-16 14:58:54 +0530884static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
Todd Poynor15a9ea02012-04-23 20:42:41 -0700885 const char *buf, size_t count)
886{
887 int ret;
888 unsigned long val;
889
890 ret = kstrtoul(buf, 0, &val);
891 if (ret < 0)
892 return ret;
893
Viresh Kumar17d15c42013-05-16 14:58:54 +0530894 tunables->boost_val = val;
Todd Poynor15a9ea02012-04-23 20:42:41 -0700895
Viresh Kumar17d15c42013-05-16 14:58:54 +0530896 if (tunables->boost_val) {
Todd Poynor442a3122012-05-03 00:16:55 -0700897 trace_cpufreq_interactive_boost("on");
Todd Poynor15a9ea02012-04-23 20:42:41 -0700898 cpufreq_interactive_boost();
Todd Poynor442a3122012-05-03 00:16:55 -0700899 } else {
900 trace_cpufreq_interactive_unboost("off");
901 }
Todd Poynor15a9ea02012-04-23 20:42:41 -0700902
903 return count;
904}
905
Viresh Kumar17d15c42013-05-16 14:58:54 +0530906static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
Todd Poynor442a3122012-05-03 00:16:55 -0700907 const char *buf, size_t count)
908{
909 int ret;
910 unsigned long val;
911
912 ret = kstrtoul(buf, 0, &val);
913 if (ret < 0)
914 return ret;
915
Viresh Kumar17d15c42013-05-16 14:58:54 +0530916 tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
917 tunables->boostpulse_duration_val;
Todd Poynor442a3122012-05-03 00:16:55 -0700918 trace_cpufreq_interactive_boost("pulse");
919 cpufreq_interactive_boost();
920 return count;
921}
922
Viresh Kumar17d15c42013-05-16 14:58:54 +0530923static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
924 *tunables, char *buf)
Todd Poynore16d5922012-12-14 17:31:19 -0800925{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530926 return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
Todd Poynore16d5922012-12-14 17:31:19 -0800927}
928
Viresh Kumar17d15c42013-05-16 14:58:54 +0530929static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
930 *tunables, const char *buf, size_t count)
Todd Poynore16d5922012-12-14 17:31:19 -0800931{
932 int ret;
933 unsigned long val;
934
935 ret = kstrtoul(buf, 0, &val);
936 if (ret < 0)
937 return ret;
938
Viresh Kumar17d15c42013-05-16 14:58:54 +0530939 tunables->boostpulse_duration_val = val;
Todd Poynore16d5922012-12-14 17:31:19 -0800940 return count;
941}
942
Viresh Kumar17d15c42013-05-16 14:58:54 +0530943static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
944 char *buf)
Lianwei Wang72e40572013-02-22 11:39:18 +0800945{
Viresh Kumar17d15c42013-05-16 14:58:54 +0530946 return sprintf(buf, "%u\n", tunables->io_is_busy);
Lianwei Wang72e40572013-02-22 11:39:18 +0800947}
948
Viresh Kumar17d15c42013-05-16 14:58:54 +0530949static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
950 const char *buf, size_t count)
Lianwei Wang72e40572013-02-22 11:39:18 +0800951{
952 int ret;
953 unsigned long val;
954
955 ret = kstrtoul(buf, 0, &val);
956 if (ret < 0)
957 return ret;
Viresh Kumar17d15c42013-05-16 14:58:54 +0530958 tunables->io_is_busy = val;
Lianwei Wang72e40572013-02-22 11:39:18 +0800959 return count;
960}
961
Viresh Kumar17d15c42013-05-16 14:58:54 +0530962/*
963 * Create show/store routines
964 * - sys: One governor instance for complete SYSTEM
965 * - pol: One governor instance per struct cpufreq_policy
966 */
967#define show_gov_pol_sys(file_name) \
968static ssize_t show_##file_name##_gov_sys \
969(struct kobject *kobj, struct attribute *attr, char *buf) \
970{ \
971 return show_##file_name(common_tunables, buf); \
972} \
973 \
974static ssize_t show_##file_name##_gov_pol \
975(struct cpufreq_policy *policy, char *buf) \
976{ \
977 return show_##file_name(policy->governor_data, buf); \
978}
Lianwei Wang72e40572013-02-22 11:39:18 +0800979
Viresh Kumar17d15c42013-05-16 14:58:54 +0530980#define store_gov_pol_sys(file_name) \
981static ssize_t store_##file_name##_gov_sys \
982(struct kobject *kobj, struct attribute *attr, const char *buf, \
983 size_t count) \
984{ \
985 return store_##file_name(common_tunables, buf, count); \
986} \
987 \
988static ssize_t store_##file_name##_gov_pol \
989(struct cpufreq_policy *policy, const char *buf, size_t count) \
990{ \
991 return store_##file_name(policy->governor_data, buf, count); \
992}
993
994#define show_store_gov_pol_sys(file_name) \
995show_gov_pol_sys(file_name); \
996store_gov_pol_sys(file_name)
997
998show_store_gov_pol_sys(target_loads);
999show_store_gov_pol_sys(above_hispeed_delay);
1000show_store_gov_pol_sys(hispeed_freq);
1001show_store_gov_pol_sys(go_hispeed_load);
1002show_store_gov_pol_sys(min_sample_time);
1003show_store_gov_pol_sys(timer_rate);
1004show_store_gov_pol_sys(timer_slack);
1005show_store_gov_pol_sys(boost);
1006store_gov_pol_sys(boostpulse);
1007show_store_gov_pol_sys(boostpulse_duration);
1008show_store_gov_pol_sys(io_is_busy);
1009
1010#define gov_sys_attr_rw(_name) \
1011static struct global_attr _name##_gov_sys = \
1012__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1013
1014#define gov_pol_attr_rw(_name) \
1015static struct freq_attr _name##_gov_pol = \
1016__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1017
1018#define gov_sys_pol_attr_rw(_name) \
1019 gov_sys_attr_rw(_name); \
1020 gov_pol_attr_rw(_name)
1021
1022gov_sys_pol_attr_rw(target_loads);
1023gov_sys_pol_attr_rw(above_hispeed_delay);
1024gov_sys_pol_attr_rw(hispeed_freq);
1025gov_sys_pol_attr_rw(go_hispeed_load);
1026gov_sys_pol_attr_rw(min_sample_time);
1027gov_sys_pol_attr_rw(timer_rate);
1028gov_sys_pol_attr_rw(timer_slack);
1029gov_sys_pol_attr_rw(boost);
1030gov_sys_pol_attr_rw(boostpulse_duration);
1031gov_sys_pol_attr_rw(io_is_busy);
1032
1033static struct global_attr boostpulse_gov_sys =
1034 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1035
1036static struct freq_attr boostpulse_gov_pol =
1037 __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1038
1039/* One Governor instance for entire system */
1040static struct attribute *interactive_attributes_gov_sys[] = {
1041 &target_loads_gov_sys.attr,
1042 &above_hispeed_delay_gov_sys.attr,
1043 &hispeed_freq_gov_sys.attr,
1044 &go_hispeed_load_gov_sys.attr,
1045 &min_sample_time_gov_sys.attr,
1046 &timer_rate_gov_sys.attr,
1047 &timer_slack_gov_sys.attr,
1048 &boost_gov_sys.attr,
1049 &boostpulse_gov_sys.attr,
1050 &boostpulse_duration_gov_sys.attr,
1051 &io_is_busy_gov_sys.attr,
Mike Chanef969692010-06-22 11:26:45 -07001052 NULL,
1053};
1054
Viresh Kumar17d15c42013-05-16 14:58:54 +05301055static struct attribute_group interactive_attr_group_gov_sys = {
1056 .attrs = interactive_attributes_gov_sys,
Mike Chanef969692010-06-22 11:26:45 -07001057 .name = "interactive",
1058};
1059
Viresh Kumar17d15c42013-05-16 14:58:54 +05301060/* Per policy governor instance */
1061static struct attribute *interactive_attributes_gov_pol[] = {
1062 &target_loads_gov_pol.attr,
1063 &above_hispeed_delay_gov_pol.attr,
1064 &hispeed_freq_gov_pol.attr,
1065 &go_hispeed_load_gov_pol.attr,
1066 &min_sample_time_gov_pol.attr,
1067 &timer_rate_gov_pol.attr,
1068 &timer_slack_gov_pol.attr,
1069 &boost_gov_pol.attr,
1070 &boostpulse_gov_pol.attr,
1071 &boostpulse_duration_gov_pol.attr,
1072 &io_is_busy_gov_pol.attr,
1073 NULL,
1074};
1075
1076static struct attribute_group interactive_attr_group_gov_pol = {
1077 .attrs = interactive_attributes_gov_pol,
1078 .name = "interactive",
1079};
1080
1081static struct attribute_group *get_sysfs_attr(void)
1082{
1083 if (have_governor_per_policy())
1084 return &interactive_attr_group_gov_pol;
1085 else
1086 return &interactive_attr_group_gov_sys;
1087}
1088
Sam Leffler3ab7c2b2012-06-27 10:12:04 -07001089static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1090 unsigned long val,
1091 void *data)
1092{
1093 switch (val) {
1094 case IDLE_START:
1095 cpufreq_interactive_idle_start();
1096 break;
1097 case IDLE_END:
1098 cpufreq_interactive_idle_end();
1099 break;
1100 }
1101
1102 return 0;
1103}
1104
1105static struct notifier_block cpufreq_interactive_idle_nb = {
1106 .notifier_call = cpufreq_interactive_idle_notifier,
1107};
1108
Mike Chanef969692010-06-22 11:26:45 -07001109static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1110 unsigned int event)
1111{
1112 int rc;
1113 unsigned int j;
1114 struct cpufreq_interactive_cpuinfo *pcpu;
1115 struct cpufreq_frequency_table *freq_table;
Viresh Kumar17d15c42013-05-16 14:58:54 +05301116 struct cpufreq_interactive_tunables *tunables;
1117
1118 if (have_governor_per_policy())
1119 tunables = policy->governor_data;
1120 else
1121 tunables = common_tunables;
1122
1123 WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
Mike Chanef969692010-06-22 11:26:45 -07001124
1125 switch (event) {
Viresh Kumar17d15c42013-05-16 14:58:54 +05301126 case CPUFREQ_GOV_POLICY_INIT:
1127 if (have_governor_per_policy()) {
1128 WARN_ON(tunables);
1129 } else if (tunables) {
1130 tunables->usage_count++;
1131 policy->governor_data = tunables;
1132 return 0;
1133 }
1134
1135 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1136 if (!tunables) {
1137 pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1138 return -ENOMEM;
1139 }
1140
Viresh Kumar17d15c42013-05-16 14:58:54 +05301141 tunables->usage_count = 1;
1142 tunables->above_hispeed_delay = default_above_hispeed_delay;
1143 tunables->nabove_hispeed_delay =
1144 ARRAY_SIZE(default_above_hispeed_delay);
1145 tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1146 tunables->target_loads = default_target_loads;
1147 tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1148 tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1149 tunables->timer_rate = DEFAULT_TIMER_RATE;
1150 tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1151 tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1152
1153 spin_lock_init(&tunables->target_loads_lock);
1154 spin_lock_init(&tunables->above_hispeed_delay_lock);
1155
Minsung Kim82cc6a92014-01-19 14:32:42 +09001156 policy->governor_data = tunables;
1157 if (!have_governor_per_policy())
1158 common_tunables = tunables;
1159
1160 rc = sysfs_create_group(get_governor_parent_kobj(policy),
1161 get_sysfs_attr());
1162 if (rc) {
1163 kfree(tunables);
1164 policy->governor_data = NULL;
1165 if (!have_governor_per_policy())
1166 common_tunables = NULL;
1167 return rc;
1168 }
1169
Viresh Kumar17d15c42013-05-16 14:58:54 +05301170 if (!policy->governor->initialized) {
1171 idle_notifier_register(&cpufreq_interactive_idle_nb);
1172 cpufreq_register_notifier(&cpufreq_notifier_block,
1173 CPUFREQ_TRANSITION_NOTIFIER);
1174 }
1175
Viresh Kumar17d15c42013-05-16 14:58:54 +05301176 break;
1177
1178 case CPUFREQ_GOV_POLICY_EXIT:
1179 if (!--tunables->usage_count) {
1180 if (policy->governor->initialized == 1) {
1181 cpufreq_unregister_notifier(&cpufreq_notifier_block,
1182 CPUFREQ_TRANSITION_NOTIFIER);
1183 idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1184 }
1185
1186 sysfs_remove_group(get_governor_parent_kobj(policy),
1187 get_sysfs_attr());
1188 kfree(tunables);
1189 common_tunables = NULL;
1190 }
1191
1192 policy->governor_data = NULL;
1193 break;
1194
Mike Chanef969692010-06-22 11:26:45 -07001195 case CPUFREQ_GOV_START:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001196 mutex_lock(&gov_lock);
1197
Viresh Kumar17d15c42013-05-16 14:58:54 +05301198 freq_table = cpufreq_frequency_get_table(policy->cpu);
1199 if (!tunables->hispeed_freq)
1200 tunables->hispeed_freq = policy->max;
Mike Chanef969692010-06-22 11:26:45 -07001201
1202 for_each_cpu(j, policy->cpus) {
1203 pcpu = &per_cpu(cpuinfo, j);
1204 pcpu->policy = policy;
1205 pcpu->target_freq = policy->cur;
1206 pcpu->freq_table = freq_table;
Todd Poynor6d15fa32012-04-26 21:41:40 -07001207 pcpu->floor_freq = pcpu->target_freq;
1208 pcpu->floor_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001209 ktime_to_us(ktime_get());
Todd Poynor1a0389a2012-05-10 23:28:06 -07001210 pcpu->hispeed_validate_time =
Todd Poynor31817c92012-12-07 20:08:45 -08001211 pcpu->floor_validate_time;
Todd Poynor39512062012-12-20 15:51:00 -08001212 down_write(&pcpu->enable_sem);
Shridhar Rasal2907f842013-09-09 19:17:14 +05301213 del_timer_sync(&pcpu->cpu_timer);
1214 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301215 cpufreq_interactive_timer_start(tunables, j);
Todd Poynor39512062012-12-20 15:51:00 -08001216 pcpu->governor_enabled = 1;
1217 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001218 }
1219
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001220 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001221 break;
1222
1223 case CPUFREQ_GOV_STOP:
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001224 mutex_lock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001225 for_each_cpu(j, policy->cpus) {
1226 pcpu = &per_cpu(cpuinfo, j);
Todd Poynor5cad6092012-12-18 17:50:44 -08001227 down_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001228 pcpu->governor_enabled = 0;
Mike Chanef969692010-06-22 11:26:45 -07001229 del_timer_sync(&pcpu->cpu_timer);
Todd Poynor4add2592012-12-18 17:50:10 -08001230 del_timer_sync(&pcpu->cpu_slack_timer);
Todd Poynor5cad6092012-12-18 17:50:44 -08001231 up_write(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001232 }
1233
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001234 mutex_unlock(&gov_lock);
Mike Chanef969692010-06-22 11:26:45 -07001235 break;
1236
1237 case CPUFREQ_GOV_LIMITS:
1238 if (policy->max < policy->cur)
1239 __cpufreq_driver_target(policy,
1240 policy->max, CPUFREQ_RELATION_H);
1241 else if (policy->min > policy->cur)
1242 __cpufreq_driver_target(policy,
1243 policy->min, CPUFREQ_RELATION_L);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001244 for_each_cpu(j, policy->cpus) {
1245 pcpu = &per_cpu(cpuinfo, j);
1246
1247 /* hold write semaphore to avoid race */
1248 down_write(&pcpu->enable_sem);
1249 if (pcpu->governor_enabled == 0) {
1250 up_write(&pcpu->enable_sem);
1251 continue;
1252 }
1253
1254 /* update target_freq firstly */
1255 if (policy->max < pcpu->target_freq)
1256 pcpu->target_freq = policy->max;
1257 else if (policy->min > pcpu->target_freq)
1258 pcpu->target_freq = policy->min;
1259
1260 /* Reschedule timer.
1261 * Delete the timers, else the timer callback may
1262 * return without re-arm the timer when failed
1263 * acquire the semaphore. This race may cause timer
1264 * stopped unexpectedly.
1265 */
1266 del_timer_sync(&pcpu->cpu_timer);
1267 del_timer_sync(&pcpu->cpu_slack_timer);
Viresh Kumar17d15c42013-05-16 14:58:54 +05301268 cpufreq_interactive_timer_start(tunables, j);
Lianwei Wang90c6c152013-04-26 13:30:51 +08001269 up_write(&pcpu->enable_sem);
1270 }
Mike Chanef969692010-06-22 11:26:45 -07001271 break;
1272 }
1273 return 0;
1274}
1275
Viresh Kumarc7f826b2013-05-16 14:58:53 +05301276#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1277static
1278#endif
1279struct cpufreq_governor cpufreq_gov_interactive = {
1280 .name = "interactive",
1281 .governor = cpufreq_governor_interactive,
1282 .max_transition_latency = 10000000,
1283 .owner = THIS_MODULE,
1284};
1285
Todd Poynor4add2592012-12-18 17:50:10 -08001286static void cpufreq_interactive_nop_timer(unsigned long data)
1287{
1288}
1289
Mike Chanef969692010-06-22 11:26:45 -07001290static int __init cpufreq_interactive_init(void)
1291{
1292 unsigned int i;
1293 struct cpufreq_interactive_cpuinfo *pcpu;
1294 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1295
Mike Chanef969692010-06-22 11:26:45 -07001296 /* Initalize per-cpu timers */
1297 for_each_possible_cpu(i) {
1298 pcpu = &per_cpu(cpuinfo, i);
Todd Poynor4add2592012-12-18 17:50:10 -08001299 init_timer_deferrable(&pcpu->cpu_timer);
Mike Chanef969692010-06-22 11:26:45 -07001300 pcpu->cpu_timer.function = cpufreq_interactive_timer;
1301 pcpu->cpu_timer.data = i;
Todd Poynor4add2592012-12-18 17:50:10 -08001302 init_timer(&pcpu->cpu_slack_timer);
1303 pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
Todd Poynor0e58da22012-12-11 16:05:03 -08001304 spin_lock_init(&pcpu->load_lock);
Todd Poynor5cad6092012-12-18 17:50:44 -08001305 init_rwsem(&pcpu->enable_sem);
Mike Chanef969692010-06-22 11:26:45 -07001306 }
1307
Todd Poynor0f1920b2012-07-16 17:07:15 -07001308 spin_lock_init(&speedchange_cpumask_lock);
Lianwei Wang1d4f9a72013-01-07 14:15:51 +08001309 mutex_init(&gov_lock);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001310 speedchange_task =
1311 kthread_create(cpufreq_interactive_speedchange_task, NULL,
1312 "cfinteractive");
1313 if (IS_ERR(speedchange_task))
1314 return PTR_ERR(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001315
Todd Poynor0f1920b2012-07-16 17:07:15 -07001316 sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1317 get_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001318
Sam Leffler5c9b8272012-06-27 12:55:56 -07001319 /* NB: wake up so the thread does not look hung to the freezer */
Todd Poynor0f1920b2012-07-16 17:07:15 -07001320 wake_up_process(speedchange_task);
Sam Leffler5c9b8272012-06-27 12:55:56 -07001321
Mike Chanef969692010-06-22 11:26:45 -07001322 return cpufreq_register_governor(&cpufreq_gov_interactive);
Mike Chanef969692010-06-22 11:26:45 -07001323}
1324
1325#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1326fs_initcall(cpufreq_interactive_init);
1327#else
1328module_init(cpufreq_interactive_init);
1329#endif
1330
1331static void __exit cpufreq_interactive_exit(void)
1332{
1333 cpufreq_unregister_governor(&cpufreq_gov_interactive);
Todd Poynor0f1920b2012-07-16 17:07:15 -07001334 kthread_stop(speedchange_task);
1335 put_task_struct(speedchange_task);
Mike Chanef969692010-06-22 11:26:45 -07001336}
1337
1338module_exit(cpufreq_interactive_exit);
1339
1340MODULE_AUTHOR("Mike Chan <mike@android.com>");
1341MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1342 "Latency sensitive workloads");
1343MODULE_LICENSE("GPL");